From 01ff5c2eae9fc75beab3bc1368e17a7278dc0905 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Sat, 7 Apr 2018 15:03:26 -0600 Subject: tests: fix ec2 integration network metadata validation Fix integraiton test logic for ec2 to look for network and availability-zone data under the key path 'ds'=>'meta-data' instead of just 'ds' when parsing instance-data.json. --- tests/cloud_tests/testcases/base.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'tests') diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py index 324c7c91..7598d469 100644 --- a/tests/cloud_tests/testcases/base.py +++ b/tests/cloud_tests/testcases/base.py @@ -149,7 +149,10 @@ class CloudTestCase(unittest.TestCase): self.assertEqual( ['ds/user-data'], instance_data['base64-encoded-keys']) ds = instance_data.get('ds', {}) - macs = ds.get('network', {}).get('interfaces', {}).get('macs', {}) + v1_data = instance_data.get('v1', {}) + metadata = ds.get('meta-data', {}) + macs = metadata.get( + 'network', {}).get('interfaces', {}).get('macs', {}) if not macs: raise AssertionError('No network data from EC2 meta-data') # Check meta-data items we depend on @@ -160,10 +163,8 @@ class CloudTestCase(unittest.TestCase): for key in expected_net_keys: self.assertIn(key, mac_data) self.assertIsNotNone( - ds.get('placement', {}).get('availability-zone'), + metadata.get('placement', {}).get('availability-zone'), 'Could not determine EC2 Availability zone placement') - ds = instance_data.get('ds', {}) - v1_data = instance_data.get('v1', {}) self.assertIsNotNone( v1_data['availability-zone'], 'expected ec2 availability-zone') self.assertEqual('aws', v1_data['cloud-name']) -- cgit v1.2.3 From c6dff581a9c253170d5e3f12fb83d16a8dec8257 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Thu, 12 Apr 2018 15:32:25 -0400 Subject: Implement ntp client spec with auto support for distro selection Add a base NTP client configuration dictionary and allow Distro specific changes to be merged. Add a select client function which implements logic to preferr installed clients over clients which need to be installed. Also allow distributions to override the cloud-init defaults. LP: #1749722 --- cloudinit/config/cc_ntp.py | 485 ++++++++++-- cloudinit/distros/__init__.py | 12 + cloudinit/distros/opensuse.py | 24 + cloudinit/distros/ubuntu.py | 19 + config/cloud.cfg.tmpl | 2 + templates/chrony.conf.debian.tmpl | 39 + templates/chrony.conf.fedora.tmpl | 48 ++ templates/chrony.conf.opensuse.tmpl | 38 + templates/chrony.conf.rhel.tmpl | 45 ++ templates/chrony.conf.sles.tmpl | 38 + templates/chrony.conf.ubuntu.tmpl | 42 + tests/cloud_tests/testcases/modules/ntp.yaml | 1 + tests/cloud_tests/testcases/modules/ntp_chrony.py | 15 + .../cloud_tests/testcases/modules/ntp_chrony.yaml | 17 + tests/cloud_tests/testcases/modules/ntp_pools.yaml | 1 + .../cloud_tests/testcases/modules/ntp_servers.yaml | 1 + .../cloud_tests/testcases/modules/ntp_timesyncd.py | 15 + .../testcases/modules/ntp_timesyncd.yaml | 15 + tests/unittests/test_distros/test_netconfig.py | 6 + .../test_distros/test_user_data_normalize.py | 6 + tests/unittests/test_handler/test_handler_ntp.py | 881 ++++++++++++++------- 21 files changed, 1369 insertions(+), 381 deletions(-) create mode 100644 templates/chrony.conf.debian.tmpl create mode 100644 templates/chrony.conf.fedora.tmpl create mode 100644 templates/chrony.conf.opensuse.tmpl create mode 100644 templates/chrony.conf.rhel.tmpl create mode 100644 templates/chrony.conf.sles.tmpl create mode 100644 templates/chrony.conf.ubuntu.tmpl create mode 100644 tests/cloud_tests/testcases/modules/ntp_chrony.py create mode 100644 tests/cloud_tests/testcases/modules/ntp_chrony.yaml create mode 100644 tests/cloud_tests/testcases/modules/ntp_timesyncd.py create mode 100644 tests/cloud_tests/testcases/modules/ntp_timesyncd.yaml (limited to 'tests') diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index cbd0237d..9e074bda 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -10,20 +10,95 @@ from cloudinit.config.schema import ( get_schema_doc, validate_cloudconfig_schema) from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE +from cloudinit import temp_utils from cloudinit import templater from cloudinit import type_utils from cloudinit import util +import copy import os +import six from textwrap import dedent LOG = logging.getLogger(__name__) frequency = PER_INSTANCE NTP_CONF = '/etc/ntp.conf' -TIMESYNCD_CONF = '/etc/systemd/timesyncd.conf.d/cloud-init.conf' NR_POOL_SERVERS = 4 -distros = ['centos', 'debian', 'fedora', 'opensuse', 'sles', 'ubuntu'] +distros = ['centos', 'debian', 'fedora', 'opensuse', 'rhel', 'sles', 'ubuntu'] + +NTP_CLIENT_CONFIG = { + 'chrony': { + 'check_exe': 'chronyd', + 'confpath': '/etc/chrony.conf', + 'packages': ['chrony'], + 'service_name': 'chrony', + 'template_name': 'chrony.conf.{distro}', + 'template': None, + }, + 'ntp': { + 'check_exe': 'ntpd', + 'confpath': NTP_CONF, + 'packages': ['ntp'], + 'service_name': 'ntp', + 'template_name': 'ntp.conf.{distro}', + 'template': None, + }, + 'ntpdate': { + 'check_exe': 'ntpdate', + 'confpath': NTP_CONF, + 'packages': ['ntpdate'], + 'service_name': 'ntpdate', + 'template_name': 'ntp.conf.{distro}', + 'template': None, + }, + 'systemd-timesyncd': { + 'check_exe': '/lib/systemd/systemd-timesyncd', + 'confpath': '/etc/systemd/timesyncd.conf.d/cloud-init.conf', + 'packages': [], + 'service_name': 'systemd-timesyncd', + 'template_name': 'timesyncd.conf', + 'template': None, + }, +} + +# This is Distro-specific configuration overrides of the base config +DISTRO_CLIENT_CONFIG = { + 'debian': { + 'chrony': { + 'confpath': '/etc/chrony/chrony.conf', + }, + }, + 'opensuse': { + 'chrony': { + 'service_name': 'chronyd', + }, + 'ntp': { + 'confpath': '/etc/ntp.conf', + 'service_name': 'ntpd', + }, + 'systemd-timesyncd': { + 'check_exe': '/usr/lib/systemd/systemd-timesyncd', + }, + }, + 'sles': { + 'chrony': { + 'service_name': 'chronyd', + }, + 'ntp': { + 'confpath': '/etc/ntp.conf', + 'service_name': 'ntpd', + }, + 'systemd-timesyncd': { + 'check_exe': '/usr/lib/systemd/systemd-timesyncd', + }, + }, + 'ubuntu': { + 'chrony': { + 'confpath': '/etc/chrony/chrony.conf', + }, + }, +} # The schema definition for each cloud-config module is a strict contract for @@ -48,7 +123,34 @@ schema = { 'distros': distros, 'examples': [ dedent("""\ + # Override ntp with chrony configuration on Ubuntu + ntp: + enabled: true + ntp_client: chrony # Uses cloud-init default chrony configuration + """), + dedent("""\ + # Provide a custom ntp client configuration ntp: + enabled: true + ntp_client: myntpclient + config: + confpath: /etc/myntpclient/myntpclient.conf + check_exe: myntpclientd + packages: + - myntpclient + service_name: myntpclient + template: | + ## template:jinja + # My NTP Client config + {% if pools -%}# pools{% endif %} + {% for pool in pools -%} + pool {{pool}} iburst + {% endfor %} + {%- if servers %}# servers + {% endif %} + {% for server in servers -%} + server {{server}} iburst + {% endfor %} pools: [0.int.pool.ntp.org, 1.int.pool.ntp.org, ntp.myorg.org] servers: - ntp.server.local @@ -83,79 +185,159 @@ schema = { List of ntp servers. If both pools and servers are empty, 4 default pool servers will be provided with the format ``{0-3}.{distro}.pool.ntp.org``.""") - } + }, + 'ntp_client': { + 'type': 'string', + 'default': 'auto', + 'description': dedent("""\ + Name of an NTP client to use to configure system NTP. + When unprovided or 'auto' the default client preferred + by the distribution will be used. The following + built-in client names can be used to override existing + configuration defaults: chrony, ntp, ntpdate, + systemd-timesyncd."""), + }, + 'enabled': { + 'type': 'boolean', + 'default': True, + 'description': dedent("""\ + Attempt to enable ntp clients if set to True. If set + to False, ntp client will not be configured or + installed"""), + }, + 'config': { + 'description': dedent("""\ + Configuration settings or overrides for the + ``ntp_client`` specified."""), + 'type': ['object'], + 'properties': { + 'confpath': { + 'type': 'string', + 'description': dedent("""\ + The path to where the ``ntp_client`` + configuration is written."""), + }, + 'check_exe': { + 'type': 'string', + 'description': dedent("""\ + The executable name for the ``ntp_client``. + For example, ntp service ``check_exe`` is + 'ntpd' because it runs the ntpd binary."""), + }, + 'packages': { + 'type': 'array', + 'items': { + 'type': 'string', + }, + 'uniqueItems': True, + 'description': dedent("""\ + List of packages needed to be installed for the + selected ``ntp_client``."""), + }, + 'service_name': { + 'type': 'string', + 'description': dedent("""\ + The systemd or sysvinit service name used to + start and stop the ``ntp_client`` + service."""), + }, + 'template': { + 'type': 'string', + 'description': dedent("""\ + Inline template allowing users to define their + own ``ntp_client`` configuration template. + The value must start with '## template:jinja' + to enable use of templating support. + """), + }, + }, + # Don't use REQUIRED_NTP_CONFIG_KEYS to allow for override + # of builtin client values. + 'required': [], + 'minProperties': 1, # If we have config, define something + 'additionalProperties': False + }, }, 'required': [], 'additionalProperties': False } } } - -__doc__ = get_schema_doc(schema) # Supplement python help() +REQUIRED_NTP_CONFIG_KEYS = frozenset([ + 'check_exe', 'confpath', 'packages', 'service_name']) -def handle(name, cfg, cloud, log, _args): - """Enable and configure ntp.""" - if 'ntp' not in cfg: - LOG.debug( - "Skipping module named %s, not present or disabled by cfg", name) - return - ntp_cfg = cfg['ntp'] - if ntp_cfg is None: - ntp_cfg = {} # Allow empty config which will install the package +__doc__ = get_schema_doc(schema) # Supplement python help() - # TODO drop this when validate_cloudconfig_schema is strict=True - if not isinstance(ntp_cfg, (dict)): - raise RuntimeError( - "'ntp' key existed in config, but not a dictionary type," - " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg))) - validate_cloudconfig_schema(cfg, schema) - if ntp_installable(): - service_name = 'ntp' - confpath = NTP_CONF - template_name = None - packages = ['ntp'] - check_exe = 'ntpd' - else: - service_name = 'systemd-timesyncd' - confpath = TIMESYNCD_CONF - template_name = 'timesyncd.conf' - packages = [] - check_exe = '/lib/systemd/systemd-timesyncd' - - rename_ntp_conf() - # ensure when ntp is installed it has a configuration file - # to use instead of starting up with packaged defaults - write_ntp_config_template(ntp_cfg, cloud, confpath, template=template_name) - install_ntp(cloud.distro.install_packages, packages=packages, - check_exe=check_exe) +def distro_ntp_client_configs(distro): + """Construct a distro-specific ntp client config dictionary by merging + distro specific changes into base config. - try: - reload_ntp(service_name, systemd=cloud.distro.uses_systemd()) - except util.ProcessExecutionError as e: - LOG.exception("Failed to reload/start ntp service: %s", e) - raise + @param distro: String providing the distro class name. + @returns: Dict of distro configurations for ntp clients. + """ + dcfg = DISTRO_CLIENT_CONFIG + cfg = copy.copy(NTP_CLIENT_CONFIG) + if distro in dcfg: + cfg = util.mergemanydict([cfg, dcfg[distro]], reverse=True) + return cfg -def ntp_installable(): - """Check if we can install ntp package +def select_ntp_client(ntp_client, distro): + """Determine which ntp client is to be used, consulting the distro + for its preference. - Ubuntu-Core systems do not have an ntp package available, so - we always return False. Other systems require package managers to install - the ntp package If we fail to find one of the package managers, then we - cannot install ntp. + @param ntp_client: String name of the ntp client to use. + @param distro: Distro class instance. + @returns: Dict of the selected ntp client or {} if none selected. """ - if util.system_is_snappy(): - return False - if any(map(util.which, ['apt-get', 'dnf', 'yum', 'zypper'])): - return True + # construct distro-specific ntp_client_config dict + distro_cfg = distro_ntp_client_configs(distro.name) + + # user specified client, return its config + if ntp_client and ntp_client != 'auto': + LOG.debug('Selected NTP client "%s" via user-data configuration', + ntp_client) + return distro_cfg.get(ntp_client, {}) + + # default to auto if unset in distro + distro_ntp_client = distro.get_option('ntp_client', 'auto') + + clientcfg = {} + if distro_ntp_client == "auto": + for client in distro.preferred_ntp_clients: + cfg = distro_cfg.get(client) + if util.which(cfg.get('check_exe')): + LOG.debug('Selected NTP client "%s", already installed', + client) + clientcfg = cfg + break + + if not clientcfg: + client = distro.preferred_ntp_clients[0] + LOG.debug( + 'Selected distro preferred NTP client "%s", not yet installed', + client) + clientcfg = distro_cfg.get(client) + else: + LOG.debug('Selected NTP client "%s" via distro system config', + distro_ntp_client) + clientcfg = distro_cfg.get(distro_ntp_client, {}) + + return clientcfg - return False +def install_ntp_client(install_func, packages=None, check_exe="ntpd"): + """Install ntp client package if not already installed. -def install_ntp(install_func, packages=None, check_exe="ntpd"): + @param install_func: function. This parameter is invoked with the contents + of the packages parameter. + @param packages: list. This parameter defaults to ['ntp']. + @param check_exe: string. The name of a binary that indicates the package + the specified package is already installed. + """ if util.which(check_exe): return if packages is None: @@ -164,15 +346,23 @@ def install_ntp(install_func, packages=None, check_exe="ntpd"): install_func(packages) -def rename_ntp_conf(config=None): - """Rename any existing ntp.conf file""" - if config is None: # For testing - config = NTP_CONF - if os.path.exists(config): - util.rename(config, config + ".dist") +def rename_ntp_conf(confpath=None): + """Rename any existing ntp client config file + + @param confpath: string. Specify a path to an existing ntp client + configuration file. + """ + if os.path.exists(confpath): + util.rename(confpath, confpath + ".dist") def generate_server_names(distro): + """Generate a list of server names to populate an ntp client configuration + file. + + @param distro: string. Specify the distro name + @returns: list: A list of strings representing ntp servers for this distro. + """ names = [] pool_distro = distro # For legal reasons x.pool.sles.ntp.org does not exist, @@ -185,34 +375,60 @@ def generate_server_names(distro): return names -def write_ntp_config_template(cfg, cloud, path, template=None): - servers = cfg.get('servers', []) - pools = cfg.get('pools', []) +def write_ntp_config_template(distro_name, servers=None, pools=None, + path=None, template_fn=None, template=None): + """Render a ntp client configuration for the specified client. + + @param distro_name: string. The distro class name. + @param servers: A list of strings specifying ntp servers. Defaults to empty + list. + @param pools: A list of strings specifying ntp pools. Defaults to empty + list. + @param path: A string to specify where to write the rendered template. + @param template_fn: A string to specify the template source file. + @param template: A string specifying the contents of the template. This + content will be written to a temporary file before being used to render + the configuration file. + + @raises: ValueError when path is None. + @raises: ValueError when template_fn is None and template is None. + """ + if not servers: + servers = [] + if not pools: + pools = [] if len(servers) == 0 and len(pools) == 0: - pools = generate_server_names(cloud.distro.name) + pools = generate_server_names(distro_name) LOG.debug( 'Adding distro default ntp pool servers: %s', ','.join(pools)) - params = { - 'servers': servers, - 'pools': pools, - } + if not path: + raise ValueError('Invalid value for path parameter') - if template is None: - template = 'ntp.conf.%s' % cloud.distro.name + if not template_fn and not template: + raise ValueError('Not template_fn or template provided') - template_fn = cloud.get_template_filename(template) - if not template_fn: - template_fn = cloud.get_template_filename('ntp.conf') - if not template_fn: - raise RuntimeError( - 'No template found, not rendering {path}'.format(path=path)) + params = {'servers': servers, 'pools': pools} + if template: + tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl") + template_fn = tfile[1] # filepath is second item in tuple + util.write_file(template_fn, content=template) templater.render_to_file(template_fn, path, params) + # clean up temporary template + if template: + util.del_file(template_fn) def reload_ntp(service, systemd=False): + """Restart or reload an ntp system service. + + @param service: A string specifying the name of the service to be affected. + @param systemd: A boolean indicating if the distro uses systemd, defaults + to False. + @returns: A tuple of stdout, stderr results from executing the action. + """ if systemd: cmd = ['systemctl', 'reload-or-restart', service] else: @@ -220,4 +436,117 @@ def reload_ntp(service, systemd=False): util.subp(cmd, capture=True) +def supplemental_schema_validation(ntp_config): + """Validate user-provided ntp:config option values. + + This function supplements flexible jsonschema validation with specific + value checks to aid in triage of invalid user-provided configuration. + + @param ntp_config: Dictionary of configuration value under 'ntp'. + + @raises: ValueError describing invalid values provided. + """ + errors = [] + missing = REQUIRED_NTP_CONFIG_KEYS.difference(set(ntp_config.keys())) + if missing: + keys = ', '.join(sorted(missing)) + errors.append( + 'Missing required ntp:config keys: {keys}'.format(keys=keys)) + elif not any([ntp_config.get('template'), + ntp_config.get('template_name')]): + errors.append( + 'Either ntp:config:template or ntp:config:template_name values' + ' are required') + for key, value in sorted(ntp_config.items()): + keypath = 'ntp:config:' + key + if key == 'confpath': + if not all([value, isinstance(value, six.string_types)]): + errors.append( + 'Expected a config file path {keypath}.' + ' Found ({value})'.format(keypath=keypath, value=value)) + elif key == 'packages': + if not isinstance(value, list): + errors.append( + 'Expected a list of required package names for {keypath}.' + ' Found ({value})'.format(keypath=keypath, value=value)) + elif key in ('template', 'template_name'): + if value is None: # Either template or template_name can be none + continue + if not isinstance(value, six.string_types): + errors.append( + 'Expected a string type for {keypath}.' + ' Found ({value})'.format(keypath=keypath, value=value)) + elif not isinstance(value, six.string_types): + errors.append( + 'Expected a string type for {keypath}.' + ' Found ({value})'.format(keypath=keypath, value=value)) + + if errors: + raise ValueError(r'Invalid ntp configuration:\n{errors}'.format( + errors='\n'.join(errors))) + + +def handle(name, cfg, cloud, log, _args): + """Enable and configure ntp.""" + if 'ntp' not in cfg: + LOG.debug( + "Skipping module named %s, not present or disabled by cfg", name) + return + ntp_cfg = cfg['ntp'] + if ntp_cfg is None: + ntp_cfg = {} # Allow empty config which will install the package + + # TODO drop this when validate_cloudconfig_schema is strict=True + if not isinstance(ntp_cfg, (dict)): + raise RuntimeError( + "'ntp' key existed in config, but not a dictionary type," + " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg))) + + validate_cloudconfig_schema(cfg, schema) + + # Allow users to explicitly enable/disable + enabled = ntp_cfg.get('enabled', True) + if util.is_false(enabled): + LOG.debug("Skipping module named %s, disabled by cfg", name) + return + + # Select which client is going to be used and get the configuration + ntp_client_config = select_ntp_client(ntp_cfg.get('ntp_client'), + cloud.distro) + + # Allow user ntp config to override distro configurations + ntp_client_config = util.mergemanydict( + [ntp_client_config, ntp_cfg.get('config', {})], reverse=True) + + supplemental_schema_validation(ntp_client_config) + rename_ntp_conf(confpath=ntp_client_config.get('confpath')) + + template_fn = None + if not ntp_client_config.get('template'): + template_name = ( + ntp_client_config.get('template_name').replace('{distro}', + cloud.distro.name)) + template_fn = cloud.get_template_filename(template_name) + if not template_fn: + msg = ('No template found, not rendering %s' % + ntp_client_config.get('template_name')) + raise RuntimeError(msg) + + write_ntp_config_template(cloud.distro.name, + servers=ntp_cfg.get('servers', []), + pools=ntp_cfg.get('pools', []), + path=ntp_client_config.get('confpath'), + template_fn=template_fn, + template=ntp_client_config.get('template')) + + install_ntp_client(cloud.distro.install_packages, + packages=ntp_client_config['packages'], + check_exe=ntp_client_config['check_exe']) + try: + reload_ntp(ntp_client_config['service_name'], + systemd=cloud.distro.uses_systemd()) + except util.ProcessExecutionError as e: + LOG.exception("Failed to reload/start ntp service: %s", e) + raise + # vi: ts=4 expandtab diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 55260eae..6c22b07f 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -49,6 +49,9 @@ LOG = logging.getLogger(__name__) # It could break when Amazon adds new regions and new AZs. _EC2_AZ_RE = re.compile('^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$') +# Default NTP Client Configurations +PREFERRED_NTP_CLIENTS = ['chrony', 'systemd-timesyncd', 'ntp', 'ntpdate'] + @six.add_metaclass(abc.ABCMeta) class Distro(object): @@ -60,6 +63,7 @@ class Distro(object): tz_zone_dir = "/usr/share/zoneinfo" init_cmd = ['service'] # systemctl, service etc renderer_configs = {} + _preferred_ntp_clients = None def __init__(self, name, cfg, paths): self._paths = paths @@ -339,6 +343,14 @@ class Distro(object): contents.write("%s\n" % (eh)) util.write_file(self.hosts_fn, contents.getvalue(), mode=0o644) + @property + def preferred_ntp_clients(self): + """Allow distro to determine the preferred ntp client list""" + if not self._preferred_ntp_clients: + self._preferred_ntp_clients = list(PREFERRED_NTP_CLIENTS) + + return self._preferred_ntp_clients + def _bring_up_interface(self, device_name): cmd = ['ifup', device_name] LOG.debug("Attempting to run bring up interface %s using command %s", diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py index 162dfa05..9f90e95e 100644 --- a/cloudinit/distros/opensuse.py +++ b/cloudinit/distros/opensuse.py @@ -208,4 +208,28 @@ class Distro(distros.Distro): nameservers, searchservers) return dev_names + @property + def preferred_ntp_clients(self): + """The preferred ntp client is dependent on the version.""" + + """Allow distro to determine the preferred ntp client list""" + if not self._preferred_ntp_clients: + distro_info = util.system_info()['dist'] + name = distro_info[0] + major_ver = int(distro_info[1].split('.')[0]) + + # This is horribly complicated because of a case of + # "we do not care if versions should be increasing syndrome" + if ( + (major_ver >= 15 and 'openSUSE' not in name) or + (major_ver >= 15 and 'openSUSE' in name and major_ver != 42) + ): + self._preferred_ntp_clients = ['chrony', + 'systemd-timesyncd', 'ntp'] + else: + self._preferred_ntp_clients = ['ntp', + 'systemd-timesyncd', 'chrony'] + + return self._preferred_ntp_clients + # vi: ts=4 expandtab diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py index 82ca34f5..fdc1f622 100644 --- a/cloudinit/distros/ubuntu.py +++ b/cloudinit/distros/ubuntu.py @@ -10,12 +10,31 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit.distros import debian +from cloudinit.distros import PREFERRED_NTP_CLIENTS from cloudinit import log as logging +from cloudinit import util + +import copy LOG = logging.getLogger(__name__) class Distro(debian.Distro): + + @property + def preferred_ntp_clients(self): + """The preferred ntp client is dependent on the version.""" + if not self._preferred_ntp_clients: + (name, version, codename) = util.system_info()['dist'] + # Xenial cloud-init only installed ntp, UbuntuCore has timesyncd. + if codename == "xenial" and not util.system_is_snappy(): + self._preferred_ntp_clients = ['ntp'] + else: + self._preferred_ntp_clients = ( + copy.deepcopy(PREFERRED_NTP_CLIENTS)) + return self._preferred_ntp_clients + pass + # vi: ts=4 expandtab diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 3129d4eb..5619de3e 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -151,6 +151,8 @@ system_info: groups: [adm, audio, cdrom, dialout, dip, floppy, lxd, netdev, plugdev, sudo, video] sudo: ["ALL=(ALL) NOPASSWD:ALL"] shell: /bin/bash + # Automatically discover the best ntp_client + ntp_client: auto # Other config here will be given to the distro class and/or path classes paths: cloud_dir: /var/lib/cloud/ diff --git a/templates/chrony.conf.debian.tmpl b/templates/chrony.conf.debian.tmpl new file mode 100644 index 00000000..e72bfee1 --- /dev/null +++ b/templates/chrony.conf.debian.tmpl @@ -0,0 +1,39 @@ +## template:jinja +# Welcome to the chrony configuration file. See chrony.conf(5) for more +# information about usuable directives. +{% if pools %}# pools +{% endif %} +{% for pool in pools -%} +pool {{pool}} iburst +{% endfor %} +{%- if servers %}# servers +{% endif %} +{% for server in servers -%} +server {{server}} iburst +{% endfor %} + +# This directive specify the location of the file containing ID/key pairs for +# NTP authentication. +keyfile /etc/chrony/chrony.keys + +# This directive specify the file into which chronyd will store the rate +# information. +driftfile /var/lib/chrony/chrony.drift + +# Uncomment the following line to turn logging on. +#log tracking measurements statistics + +# Log files location. +logdir /var/log/chrony + +# Stop bad estimates upsetting machine clock. +maxupdateskew 100.0 + +# This directive enables kernel synchronisation (every 11 minutes) of the +# real-time clock. Note that it can't be used along with the 'rtcfile' directive. +rtcsync + +# Step the system clock instead of slewing it if the adjustment is larger than +# one second, but only in the first three clock updates. +makestep 1 3 + diff --git a/templates/chrony.conf.fedora.tmpl b/templates/chrony.conf.fedora.tmpl new file mode 100644 index 00000000..8551f793 --- /dev/null +++ b/templates/chrony.conf.fedora.tmpl @@ -0,0 +1,48 @@ +## template:jinja +# Use public servers from the pool.ntp.org project. +# Please consider joining the pool (http://www.pool.ntp.org/join.html). +{% if pools %}# pools +{% endif %} +{% for pool in pools -%} +pool {{pool}} iburst +{% endfor %} +{%- if servers %}# servers +{% endif %} +{% for server in servers -%} +server {{server}} iburst +{% endfor %} + +# Record the rate at which the system clock gains/losses time. +driftfile /var/lib/chrony/drift + +# Allow the system clock to be stepped in the first three updates +# if its offset is larger than 1 second. +makestep 1.0 3 + +# Enable kernel synchronization of the real-time clock (RTC). +rtcsync + +# Enable hardware timestamping on all interfaces that support it. +#hwtimestamp * + +# Increase the minimum number of selectable sources required to adjust +# the system clock. +#minsources 2 + +# Allow NTP client access from local network. +#allow 192.168.0.0/16 + +# Serve time even if not synchronized to a time source. +#local stratum 10 + +# Specify file containing keys for NTP authentication. +#keyfile /etc/chrony.keys + +# Get TAI-UTC offset and leap seconds from the system tz database. +leapsectz right/UTC + +# Specify directory for log files. +logdir /var/log/chrony + +# Select which information is logged. +#log measurements statistics tracking diff --git a/templates/chrony.conf.opensuse.tmpl b/templates/chrony.conf.opensuse.tmpl new file mode 100644 index 00000000..a3d3e0ec --- /dev/null +++ b/templates/chrony.conf.opensuse.tmpl @@ -0,0 +1,38 @@ +## template:jinja +# Use public servers from the pool.ntp.org project. +# Please consider joining the pool (http://www.pool.ntp.org/join.html). +{% if pools %}# pools +{% endif %} +{% for pool in pools -%} +pool {{pool}} iburst +{% endfor %} +{%- if servers %}# servers +{% endif %} +{% for server in servers -%} +server {{server}} iburst +{% endfor %} + +# Record the rate at which the system clock gains/losses time. +driftfile /var/lib/chrony/drift + +# In first three updates step the system clock instead of slew +# if the adjustment is larger than 1 second. +makestep 1.0 3 + +# Enable kernel synchronization of the real-time clock (RTC). +rtcsync + +# Allow NTP client access from local network. +#allow 192.168/16 + +# Serve time even if not synchronized to any NTP server. +#local stratum 10 + +# Specify file containing keys for NTP authentication. +#keyfile /etc/chrony.keys + +# Specify directory for log files. +logdir /var/log/chrony + +# Select which information is logged. +#log measurements statistics tracking diff --git a/templates/chrony.conf.rhel.tmpl b/templates/chrony.conf.rhel.tmpl new file mode 100644 index 00000000..5b3542ef --- /dev/null +++ b/templates/chrony.conf.rhel.tmpl @@ -0,0 +1,45 @@ +## template:jinja +# Use public servers from the pool.ntp.org project. +# Please consider joining the pool (http://www.pool.ntp.org/join.html). +{% if pools %}# pools +{% endif %} +{% for pool in pools -%} +pool {{pool}} iburst +{% endfor %} +{%- if servers %}# servers +{% endif %} +{% for server in servers -%} +server {{server}} iburst +{% endfor %} + +# Record the rate at which the system clock gains/losses time. +driftfile /var/lib/chrony/drift + +# Allow the system clock to be stepped in the first three updates +# if its offset is larger than 1 second. +makestep 1.0 3 + +# Enable kernel synchronization of the real-time clock (RTC). +rtcsync + +# Enable hardware timestamping on all interfaces that support it. +#hwtimestamp * + +# Increase the minimum number of selectable sources required to adjust +# the system clock. +#minsources 2 + +# Allow NTP client access from local network. +#allow 192.168.0.0/16 + +# Serve time even if not synchronized to a time source. +#local stratum 10 + +# Specify file containing keys for NTP authentication. +#keyfile /etc/chrony.keys + +# Specify directory for log files. +logdir /var/log/chrony + +# Select which information is logged. +#log measurements statistics tracking diff --git a/templates/chrony.conf.sles.tmpl b/templates/chrony.conf.sles.tmpl new file mode 100644 index 00000000..a3d3e0ec --- /dev/null +++ b/templates/chrony.conf.sles.tmpl @@ -0,0 +1,38 @@ +## template:jinja +# Use public servers from the pool.ntp.org project. +# Please consider joining the pool (http://www.pool.ntp.org/join.html). +{% if pools %}# pools +{% endif %} +{% for pool in pools -%} +pool {{pool}} iburst +{% endfor %} +{%- if servers %}# servers +{% endif %} +{% for server in servers -%} +server {{server}} iburst +{% endfor %} + +# Record the rate at which the system clock gains/losses time. +driftfile /var/lib/chrony/drift + +# In first three updates step the system clock instead of slew +# if the adjustment is larger than 1 second. +makestep 1.0 3 + +# Enable kernel synchronization of the real-time clock (RTC). +rtcsync + +# Allow NTP client access from local network. +#allow 192.168/16 + +# Serve time even if not synchronized to any NTP server. +#local stratum 10 + +# Specify file containing keys for NTP authentication. +#keyfile /etc/chrony.keys + +# Specify directory for log files. +logdir /var/log/chrony + +# Select which information is logged. +#log measurements statistics tracking diff --git a/templates/chrony.conf.ubuntu.tmpl b/templates/chrony.conf.ubuntu.tmpl new file mode 100644 index 00000000..da7f16a4 --- /dev/null +++ b/templates/chrony.conf.ubuntu.tmpl @@ -0,0 +1,42 @@ +## template:jinja +# Welcome to the chrony configuration file. See chrony.conf(5) for more +# information about usuable directives. + +# Use servers from the NTP Pool Project. Approved by Ubuntu Technical Board +# on 2011-02-08 (LP: #104525). See http://www.pool.ntp.org/join.html for +# more information. +{% if pools %}# pools +{% endif %} +{% for pool in pools -%} +pool {{pool}} iburst +{% endfor %} +{%- if servers %}# servers +{% endif %} +{% for server in servers -%} +server {{server}} iburst +{% endfor %} + +# This directive specify the location of the file containing ID/key pairs for +# NTP authentication. +keyfile /etc/chrony/chrony.keys + +# This directive specify the file into which chronyd will store the rate +# information. +driftfile /var/lib/chrony/chrony.drift + +# Uncomment the following line to turn logging on. +#log tracking measurements statistics + +# Log files location. +logdir /var/log/chrony + +# Stop bad estimates upsetting machine clock. +maxupdateskew 100.0 + +# This directive enables kernel synchronisation (every 11 minutes) of the +# real-time clock. Note that it can't be used along with the 'rtcfile' directive. +rtcsync + +# Step the system clock instead of slewing it if the adjustment is larger than +# one second, but only in the first three clock updates. +makestep 1 3 diff --git a/tests/cloud_tests/testcases/modules/ntp.yaml b/tests/cloud_tests/testcases/modules/ntp.yaml index 2530d72e..7ea0707d 100644 --- a/tests/cloud_tests/testcases/modules/ntp.yaml +++ b/tests/cloud_tests/testcases/modules/ntp.yaml @@ -4,6 +4,7 @@ cloud_config: | #cloud-config ntp: + ntp_client: ntp pools: [] servers: [] collect_scripts: diff --git a/tests/cloud_tests/testcases/modules/ntp_chrony.py b/tests/cloud_tests/testcases/modules/ntp_chrony.py new file mode 100644 index 00000000..461630a8 --- /dev/null +++ b/tests/cloud_tests/testcases/modules/ntp_chrony.py @@ -0,0 +1,15 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""cloud-init Integration Test Verify Script.""" +from tests.cloud_tests.testcases import base + + +class TestNtpChrony(base.CloudTestCase): + """Test ntp module with chrony client""" + + def test_chrony_entires(self): + """Test chrony config entries""" + out = self.get_data_file('chrony_conf') + self.assertIn('.pool.ntp.org', out) + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/ntp_chrony.yaml b/tests/cloud_tests/testcases/modules/ntp_chrony.yaml new file mode 100644 index 00000000..120735e2 --- /dev/null +++ b/tests/cloud_tests/testcases/modules/ntp_chrony.yaml @@ -0,0 +1,17 @@ +# +# ntp enabled, chrony selected, check conf file +# as chrony won't start in a container +# +cloud_config: | + #cloud-config + ntp: + enabled: true + ntp_client: chrony +collect_scripts: + chrony_conf: | + #!/bin/sh + set -- /etc/chrony.conf /etc/chrony/chrony.conf + for p in "$@"; do + [ -e "$p" ] && { cat "$p"; exit; } + done +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/ntp_pools.yaml b/tests/cloud_tests/testcases/modules/ntp_pools.yaml index d490b228..60fa0fd1 100644 --- a/tests/cloud_tests/testcases/modules/ntp_pools.yaml +++ b/tests/cloud_tests/testcases/modules/ntp_pools.yaml @@ -9,6 +9,7 @@ required_features: cloud_config: | #cloud-config ntp: + ntp_client: ntp pools: - 0.cloud-init.mypool - 1.cloud-init.mypool diff --git a/tests/cloud_tests/testcases/modules/ntp_servers.yaml b/tests/cloud_tests/testcases/modules/ntp_servers.yaml index 6b13b70e..ee636679 100644 --- a/tests/cloud_tests/testcases/modules/ntp_servers.yaml +++ b/tests/cloud_tests/testcases/modules/ntp_servers.yaml @@ -6,6 +6,7 @@ required_features: cloud_config: | #cloud-config ntp: + ntp_client: ntp servers: - 172.16.15.14 - 172.16.17.18 diff --git a/tests/cloud_tests/testcases/modules/ntp_timesyncd.py b/tests/cloud_tests/testcases/modules/ntp_timesyncd.py new file mode 100644 index 00000000..eca750bc --- /dev/null +++ b/tests/cloud_tests/testcases/modules/ntp_timesyncd.py @@ -0,0 +1,15 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""cloud-init Integration Test Verify Script.""" +from tests.cloud_tests.testcases import base + + +class TestNtpTimesyncd(base.CloudTestCase): + """Test ntp module with systemd-timesyncd client""" + + def test_timesyncd_entries(self): + """Test timesyncd config entries""" + out = self.get_data_file('timesyncd_conf') + self.assertIn('.pool.ntp.org', out) + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/ntp_timesyncd.yaml b/tests/cloud_tests/testcases/modules/ntp_timesyncd.yaml new file mode 100644 index 00000000..ee47a741 --- /dev/null +++ b/tests/cloud_tests/testcases/modules/ntp_timesyncd.yaml @@ -0,0 +1,15 @@ +# +# ntp enabled, systemd-timesyncd selected, check conf file +# as systemd-timesyncd won't start in a container +# +cloud_config: | + #cloud-config + ntp: + enabled: true + ntp_client: systemd-timesyncd +collect_scripts: + timesyncd_conf: | + #!/bin/sh + cat /etc/systemd/timesyncd.conf.d/cloud-init.conf + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index 1c2e45fe..7765e408 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -189,6 +189,12 @@ hn0: flags=8843 metric 0 mtu 1500 status: active """ + def setUp(self): + super(TestNetCfgDistro, self).setUp() + self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy') + self.add_patch('cloudinit.util.system_info', 'm_sysinfo') + self.m_sysinfo.return_value = {'dist': ('Distro', '99.1', 'Codename')} + def _get_distro(self, dname, renderers=None): cls = distros.fetch(dname) cfg = settings.CFG_BUILTIN diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py index 0fa9cdb5..fa4b6cfe 100644 --- a/tests/unittests/test_distros/test_user_data_normalize.py +++ b/tests/unittests/test_distros/test_user_data_normalize.py @@ -22,6 +22,12 @@ bcfg = { class TestUGNormalize(TestCase): + def setUp(self): + super(TestUGNormalize, self).setUp() + self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy') + self.add_patch('cloudinit.util.system_info', 'm_sysinfo') + self.m_sysinfo.return_value = {'dist': ('Distro', '99.1', 'Codename')} + def _make_distro(self, dtype, def_user=None): cfg = dict(settings.CFG_BUILTIN) cfg['system_info']['distro'] = dtype diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py index 695897c0..1b3ca570 100644 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ b/tests/unittests/test_handler/test_handler_ntp.py @@ -4,20 +4,21 @@ from cloudinit.config import cc_ntp from cloudinit.sources import DataSourceNone from cloudinit import (distros, helpers, cloud, util) from cloudinit.tests.helpers import ( - FilesystemMockingTestCase, mock, skipUnlessJsonSchema) + CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema) +import copy import os from os.path import dirname import shutil -NTP_TEMPLATE = b"""\ +NTP_TEMPLATE = """\ ## template: jinja servers {{servers}} pools {{pools}} """ -TIMESYNCD_TEMPLATE = b"""\ +TIMESYNCD_TEMPLATE = """\ ## template:jinja [Time] {% if servers or pools -%} @@ -32,56 +33,88 @@ class TestNtp(FilesystemMockingTestCase): def setUp(self): super(TestNtp, self).setUp() - self.subp = util.subp self.new_root = self.tmp_dir() + self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy') + self.m_snappy.return_value = False + self.add_patch('cloudinit.util.system_info', 'm_sysinfo') + self.m_sysinfo.return_value = {'dist': ('Distro', '99.1', 'Codename')} - def _get_cloud(self, distro): - self.patchUtils(self.new_root) + def _get_cloud(self, distro, sys_cfg=None): + self.new_root = self.reRoot(root=self.new_root) paths = helpers.Paths({'templates_dir': self.new_root}) cls = distros.fetch(distro) - mydist = cls(distro, {}, paths) - myds = DataSourceNone.DataSourceNone({}, mydist, paths) - return cloud.Cloud(myds, paths, {}, mydist, None) + if not sys_cfg: + sys_cfg = {} + mydist = cls(distro, sys_cfg, paths) + myds = DataSourceNone.DataSourceNone(sys_cfg, mydist, paths) + return cloud.Cloud(myds, paths, sys_cfg, mydist, None) + + def _get_template_path(self, template_name, distro, basepath=None): + # ntp.conf.{distro} -> ntp.conf.debian.tmpl + template_fn = '{0}.tmpl'.format( + template_name.replace('{distro}', distro)) + if not basepath: + basepath = self.new_root + path = os.path.join(basepath, template_fn) + return path + + def _generate_template(self, template=None): + if not template: + template = NTP_TEMPLATE + confpath = os.path.join(self.new_root, 'client.conf') + template_fn = os.path.join(self.new_root, 'client.conf.tmpl') + util.write_file(template_fn, content=template) + return (confpath, template_fn) + + def _mock_ntp_client_config(self, client=None, distro=None): + if not client: + client = 'ntp' + if not distro: + distro = 'ubuntu' + dcfg = cc_ntp.distro_ntp_client_configs(distro) + if client == 'systemd-timesyncd': + template = TIMESYNCD_TEMPLATE + else: + template = NTP_TEMPLATE + (confpath, template_fn) = self._generate_template(template=template) + ntpconfig = copy.deepcopy(dcfg[client]) + ntpconfig['confpath'] = confpath + ntpconfig['template_name'] = os.path.basename(confpath) + return ntpconfig @mock.patch("cloudinit.config.cc_ntp.util") def test_ntp_install(self, mock_util): - """ntp_install installs via install_func when check_exe is absent.""" + """ntp_install_client runs install_func when check_exe is absent.""" mock_util.which.return_value = None # check_exe not found. install_func = mock.MagicMock() - cc_ntp.install_ntp(install_func, packages=['ntpx'], check_exe='ntpdx') - + cc_ntp.install_ntp_client(install_func, + packages=['ntpx'], check_exe='ntpdx') mock_util.which.assert_called_with('ntpdx') install_func.assert_called_once_with(['ntpx']) @mock.patch("cloudinit.config.cc_ntp.util") def test_ntp_install_not_needed(self, mock_util): - """ntp_install doesn't attempt install when check_exe is found.""" - mock_util.which.return_value = ["/usr/sbin/ntpd"] # check_exe found. + """ntp_install_client doesn't install when check_exe is found.""" + client = 'chrony' + mock_util.which.return_value = [client] # check_exe found. install_func = mock.MagicMock() - cc_ntp.install_ntp(install_func, packages=['ntp'], check_exe='ntpd') + cc_ntp.install_ntp_client(install_func, packages=[client], + check_exe=client) install_func.assert_not_called() @mock.patch("cloudinit.config.cc_ntp.util") def test_ntp_install_no_op_with_empty_pkg_list(self, mock_util): - """ntp_install calls install_func with empty list""" + """ntp_install_client runs install_func with empty list""" mock_util.which.return_value = None # check_exe not found install_func = mock.MagicMock() - cc_ntp.install_ntp(install_func, packages=[], check_exe='timesyncd') + cc_ntp.install_ntp_client(install_func, packages=[], + check_exe='timesyncd') install_func.assert_called_once_with([]) - def test_ntp_rename_ntp_conf(self): - """When NTP_CONF exists, rename_ntp moves it.""" - ntpconf = self.tmp_path("ntp.conf", self.new_root) - util.write_file(ntpconf, "") - with mock.patch("cloudinit.config.cc_ntp.NTP_CONF", ntpconf): - cc_ntp.rename_ntp_conf() - self.assertFalse(os.path.exists(ntpconf)) - self.assertTrue(os.path.exists("{0}.dist".format(ntpconf))) - @mock.patch("cloudinit.config.cc_ntp.util") def test_reload_ntp_defaults(self, mock_util): """Test service is restarted/reloaded (defaults)""" - service = 'ntp' + service = 'ntp_service_name' cmd = ['service', service, 'restart'] cc_ntp.reload_ntp(service) mock_util.subp.assert_called_with(cmd, capture=True) @@ -89,193 +122,171 @@ class TestNtp(FilesystemMockingTestCase): @mock.patch("cloudinit.config.cc_ntp.util") def test_reload_ntp_systemd(self, mock_util): """Test service is restarted/reloaded (systemd)""" - service = 'ntp' - cmd = ['systemctl', 'reload-or-restart', service] + service = 'ntp_service_name' cc_ntp.reload_ntp(service, systemd=True) - mock_util.subp.assert_called_with(cmd, capture=True) - - @mock.patch("cloudinit.config.cc_ntp.util") - def test_reload_ntp_systemd_timesycnd(self, mock_util): - """Test service is restarted/reloaded (systemd/timesyncd)""" - service = 'systemd-timesycnd' cmd = ['systemctl', 'reload-or-restart', service] - cc_ntp.reload_ntp(service, systemd=True) mock_util.subp.assert_called_with(cmd, capture=True) + def test_ntp_rename_ntp_conf(self): + """When NTP_CONF exists, rename_ntp moves it.""" + ntpconf = self.tmp_path("ntp.conf", self.new_root) + util.write_file(ntpconf, "") + cc_ntp.rename_ntp_conf(confpath=ntpconf) + self.assertFalse(os.path.exists(ntpconf)) + self.assertTrue(os.path.exists("{0}.dist".format(ntpconf))) + def test_ntp_rename_ntp_conf_skip_missing(self): """When NTP_CONF doesn't exist rename_ntp doesn't create a file.""" ntpconf = self.tmp_path("ntp.conf", self.new_root) self.assertFalse(os.path.exists(ntpconf)) - with mock.patch("cloudinit.config.cc_ntp.NTP_CONF", ntpconf): - cc_ntp.rename_ntp_conf() + cc_ntp.rename_ntp_conf(confpath=ntpconf) self.assertFalse(os.path.exists("{0}.dist".format(ntpconf))) self.assertFalse(os.path.exists(ntpconf)) - def test_write_ntp_config_template_from_ntp_conf_tmpl_with_servers(self): - """write_ntp_config_template reads content from ntp.conf.tmpl. - - It reads ntp.conf.tmpl if present and renders the value from servers - key. When no pools key is defined, template is rendered using an empty - list for pools. - """ - distro = 'ubuntu' - cfg = { - 'servers': ['192.168.2.1', '192.168.2.2'] - } - mycloud = self._get_cloud(distro) - ntp_conf = self.tmp_path("ntp.conf", self.new_root) # Doesn't exist - # Create ntp.conf.tmpl - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.write_ntp_config_template(cfg, mycloud, ntp_conf) - content = util.read_file_or_url('file://' + ntp_conf).contents + def test_write_ntp_config_template_uses_ntp_conf_distro_no_servers(self): + """write_ntp_config_template reads from $client.conf.distro.tmpl""" + servers = [] + pools = ['10.0.0.1', '10.0.0.2'] + (confpath, template_fn) = self._generate_template() + mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' + with mock.patch(mock_path, self.new_root): + cc_ntp.write_ntp_config_template('ubuntu', + servers=servers, pools=pools, + path=confpath, + template_fn=template_fn, + template=None) + content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( - "servers ['192.168.2.1', '192.168.2.2']\npools []\n", - content.decode()) + "servers []\npools ['10.0.0.1', '10.0.0.2']\n", content.decode()) - def test_write_ntp_config_template_uses_ntp_conf_distro_no_servers(self): - """write_ntp_config_template reads content from ntp.conf.distro.tmpl. + def test_write_ntp_config_template_defaults_pools_w_empty_lists(self): + """write_ntp_config_template defaults pools servers upon empty config. - It reads ntp.conf..tmpl before attempting ntp.conf.tmpl. It - renders the value from the keys servers and pools. When no - servers value is present, template is rendered using an empty list. + When both pools and servers are empty, default NR_POOL_SERVERS get + configured. """ distro = 'ubuntu' - cfg = { - 'pools': ['10.0.0.1', '10.0.0.2'] - } - mycloud = self._get_cloud(distro) - ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist - # Create ntp.conf.tmpl which isn't read - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(b'NOT READ: ntp.conf..tmpl is primary') - # Create ntp.conf.tmpl. - with open('{0}.{1}.tmpl'.format(ntp_conf, distro), 'wb') as stream: - stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.write_ntp_config_template(cfg, mycloud, ntp_conf) - content = util.read_file_or_url('file://' + ntp_conf).contents + pools = cc_ntp.generate_server_names(distro) + servers = [] + (confpath, template_fn) = self._generate_template() + mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' + with mock.patch(mock_path, self.new_root): + cc_ntp.write_ntp_config_template(distro, + servers=servers, pools=pools, + path=confpath, + template_fn=template_fn, + template=None) + content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( - "servers []\npools ['10.0.0.1', '10.0.0.2']\n", + "servers []\npools {0}\n".format(pools), content.decode()) - def test_write_ntp_config_template_defaults_pools_when_empty_lists(self): - """write_ntp_config_template defaults pools servers upon empty config. + def test_defaults_pools_empty_lists_sles(self): + """write_ntp_config_template defaults opensuse pools upon empty config. When both pools and servers are empty, default NR_POOL_SERVERS get configured. """ - distro = 'ubuntu' - mycloud = self._get_cloud(distro) - ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist - # Create ntp.conf.tmpl - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.write_ntp_config_template({}, mycloud, ntp_conf) - content = util.read_file_or_url('file://' + ntp_conf).contents - default_pools = [ - "{0}.{1}.pool.ntp.org".format(x, distro) - for x in range(0, cc_ntp.NR_POOL_SERVERS)] + distro = 'sles' + default_pools = cc_ntp.generate_server_names(distro) + (confpath, template_fn) = self._generate_template() + + cc_ntp.write_ntp_config_template(distro, + servers=[], pools=[], + path=confpath, + template_fn=template_fn, + template=None) + content = util.read_file_or_url('file://' + confpath).contents + for pool in default_pools: + self.assertIn('opensuse', pool) self.assertEqual( - "servers []\npools {0}\n".format(default_pools), - content.decode()) + "servers []\npools {0}\n".format(default_pools), content.decode()) self.assertIn( "Adding distro default ntp pool servers: {0}".format( ",".join(default_pools)), self.logs.getvalue()) - @mock.patch("cloudinit.config.cc_ntp.ntp_installable") - def test_ntp_handler_mocked_template(self, m_ntp_install): - """Test ntp handler renders ubuntu ntp.conf template.""" - pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org'] - servers = ['192.168.23.3', '192.168.23.4'] - cfg = { - 'ntp': { - 'pools': pools, - 'servers': servers - } - } - mycloud = self._get_cloud('ubuntu') - ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist - m_ntp_install.return_value = True - - # Create ntp.conf.tmpl - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - with mock.patch.object(util, 'which', return_value=None): - cc_ntp.handle('notimportant', cfg, mycloud, None, None) - - content = util.read_file_or_url('file://' + ntp_conf).contents - self.assertEqual( - 'servers {0}\npools {1}\n'.format(servers, pools), - content.decode()) - - @mock.patch("cloudinit.config.cc_ntp.util") - def test_ntp_handler_mocked_template_snappy(self, m_util): - """Test ntp handler renders timesycnd.conf template on snappy.""" + def test_timesyncd_template(self): + """Test timesycnd template is correct""" pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org'] servers = ['192.168.23.3', '192.168.23.4'] - cfg = { - 'ntp': { - 'pools': pools, - 'servers': servers - } - } - mycloud = self._get_cloud('ubuntu') - m_util.system_is_snappy.return_value = True - - # Create timesyncd.conf.tmpl - tsyncd_conf = self.tmp_path("timesyncd.conf", self.new_root) - template = '{0}.tmpl'.format(tsyncd_conf) - with open(template, 'wb') as stream: - stream.write(TIMESYNCD_TEMPLATE) - - with mock.patch('cloudinit.config.cc_ntp.TIMESYNCD_CONF', tsyncd_conf): - cc_ntp.handle('notimportant', cfg, mycloud, None, None) - - content = util.read_file_or_url('file://' + tsyncd_conf).contents + (confpath, template_fn) = self._generate_template( + template=TIMESYNCD_TEMPLATE) + cc_ntp.write_ntp_config_template('ubuntu', + servers=servers, pools=pools, + path=confpath, + template_fn=template_fn, + template=None) + content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( "[Time]\nNTP=%s %s \n" % (" ".join(servers), " ".join(pools)), content.decode()) - def test_ntp_handler_real_distro_templates(self): - """Test ntp handler renders the shipped distro ntp.conf templates.""" + def test_distro_ntp_client_configs(self): + """Test we have updated ntp client configs on different distros""" + delta = copy.deepcopy(cc_ntp.DISTRO_CLIENT_CONFIG) + base = copy.deepcopy(cc_ntp.NTP_CLIENT_CONFIG) + # confirm no-delta distros match the base config + for distro in cc_ntp.distros: + if distro not in delta: + result = cc_ntp.distro_ntp_client_configs(distro) + self.assertEqual(base, result) + # for distros with delta, ensure the merged config values match + # what is set in the delta + for distro in delta.keys(): + result = cc_ntp.distro_ntp_client_configs(distro) + for client in delta[distro].keys(): + for key in delta[distro][client].keys(): + self.assertEqual(delta[distro][client][key], + result[client][key]) + + def test_ntp_handler_real_distro_ntp_templates(self): + """Test ntp handler renders the shipped distro ntp client templates.""" pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org'] servers = ['192.168.23.3', '192.168.23.4'] - cfg = { - 'ntp': { - 'pools': pools, - 'servers': servers - } - } - ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist - for distro in ('debian', 'ubuntu', 'fedora', 'rhel', 'sles'): - mycloud = self._get_cloud(distro) - root_dir = dirname(dirname(os.path.realpath(util.__file__))) - tmpl_file = os.path.join( - '{0}/templates/ntp.conf.{1}.tmpl'.format(root_dir, distro)) - # Create a copy in our tmp_dir - shutil.copy( - tmpl_file, - os.path.join(self.new_root, 'ntp.conf.%s.tmpl' % distro)) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - with mock.patch.object(util, 'which', return_value=[True]): - cc_ntp.handle('notimportant', cfg, mycloud, None, None) - - content = util.read_file_or_url('file://' + ntp_conf).contents - expected_servers = '\n'.join([ - 'server {0} iburst'.format(server) for server in servers]) - self.assertIn( - expected_servers, content.decode(), - 'failed to render ntp.conf for distro:{0}'.format(distro)) - expected_pools = '\n'.join([ - 'pool {0} iburst'.format(pool) for pool in pools]) - self.assertIn( - expected_pools, content.decode(), - 'failed to render ntp.conf for distro:{0}'.format(distro)) + for client in ['ntp', 'systemd-timesyncd', 'chrony']: + for distro in cc_ntp.distros: + distro_cfg = cc_ntp.distro_ntp_client_configs(distro) + ntpclient = distro_cfg[client] + confpath = ( + os.path.join(self.new_root, ntpclient.get('confpath')[1:])) + template = ntpclient.get('template_name') + # find sourcetree template file + root_dir = ( + dirname(dirname(os.path.realpath(util.__file__))) + + '/templates') + source_fn = self._get_template_path(template, distro, + basepath=root_dir) + template_fn = self._get_template_path(template, distro) + # don't fail if cloud-init doesn't have a template for + # a distro,client pair + if not os.path.exists(source_fn): + continue + # Create a copy in our tmp_dir + shutil.copy(source_fn, template_fn) + cc_ntp.write_ntp_config_template(distro, servers=servers, + pools=pools, path=confpath, + template_fn=template_fn) + content = util.read_file_or_url('file://' + confpath).contents + if client in ['ntp', 'chrony']: + expected_servers = '\n'.join([ + 'server {0} iburst'.format(srv) for srv in servers]) + print('distro=%s client=%s' % (distro, client)) + self.assertIn(expected_servers, content.decode(), + ('failed to render {0} conf' + ' for distro:{1}'.format(client, distro))) + expected_pools = '\n'.join([ + 'pool {0} iburst'.format(pool) for pool in pools]) + self.assertIn(expected_pools, content.decode(), + ('failed to render {0} conf' + ' for distro:{1}'.format(client, distro))) + elif client == 'systemd-timesyncd': + expected_content = ( + "# cloud-init generated file\n" + + "# See timesyncd.conf(5) for details.\n\n" + + "[Time]\nNTP=%s %s \n" % (" ".join(servers), + " ".join(pools))) + self.assertEqual(expected_content, content.decode()) def test_no_ntpcfg_does_nothing(self): """When no ntp section is defined handler logs a warning and noops.""" @@ -285,95 +296,99 @@ class TestNtp(FilesystemMockingTestCase): 'not present or disabled by cfg\n', self.logs.getvalue()) - def test_ntp_handler_schema_validation_allows_empty_ntp_config(self): + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_schema_validation_allows_empty_ntp_config(self, + m_select): """Ntp schema validation allows for an empty ntp: configuration.""" valid_empty_configs = [{'ntp': {}}, {'ntp': None}] - distro = 'ubuntu' - cc = self._get_cloud(distro) - ntp_conf = os.path.join(self.new_root, 'ntp.conf') - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(NTP_TEMPLATE) for valid_empty_config in valid_empty_configs: - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.handle('cc_ntp', valid_empty_config, cc, None, []) - with open(ntp_conf) as stream: - content = stream.read() - default_pools = [ - "{0}.{1}.pool.ntp.org".format(x, distro) - for x in range(0, cc_ntp.NR_POOL_SERVERS)] - self.assertEqual( - "servers []\npools {0}\n".format(default_pools), - content) - self.assertNotIn('Invalid config:', self.logs.getvalue()) + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro) + confpath = ntpconfig['confpath'] + m_select.return_value = ntpconfig + cc_ntp.handle('cc_ntp', valid_empty_config, mycloud, None, []) + content = util.read_file_or_url('file://' + confpath).contents + pools = cc_ntp.generate_server_names(mycloud.distro.name) + self.assertEqual( + "servers []\npools {0}\n".format(pools), content.decode()) + self.assertNotIn('Invalid config:', self.logs.getvalue()) @skipUnlessJsonSchema() - def test_ntp_handler_schema_validation_warns_non_string_item_type(self): + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_schema_validation_warns_non_string_item_type(self, + m_sel): """Ntp schema validation warns of non-strings in pools or servers. Schema validation is not strict, so ntp config is still be rendered. """ invalid_config = {'ntp': {'pools': [123], 'servers': ['valid', None]}} - cc = self._get_cloud('ubuntu') - ntp_conf = os.path.join(self.new_root, 'ntp.conf') - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.handle('cc_ntp', invalid_config, cc, None, []) - self.assertIn( - "Invalid config:\nntp.pools.0: 123 is not of type 'string'\n" - "ntp.servers.1: None is not of type 'string'", - self.logs.getvalue()) - with open(ntp_conf) as stream: - content = stream.read() - self.assertEqual("servers ['valid', None]\npools [123]\n", content) + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro) + confpath = ntpconfig['confpath'] + m_sel.return_value = ntpconfig + cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) + self.assertIn( + "Invalid config:\nntp.pools.0: 123 is not of type 'string'\n" + "ntp.servers.1: None is not of type 'string'", + self.logs.getvalue()) + content = util.read_file_or_url('file://' + confpath).contents + self.assertEqual("servers ['valid', None]\npools [123]\n", + content.decode()) @skipUnlessJsonSchema() - def test_ntp_handler_schema_validation_warns_of_non_array_type(self): + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_schema_validation_warns_of_non_array_type(self, + m_select): """Ntp schema validation warns of non-array pools or servers types. Schema validation is not strict, so ntp config is still be rendered. """ invalid_config = {'ntp': {'pools': 123, 'servers': 'non-array'}} - cc = self._get_cloud('ubuntu') - ntp_conf = os.path.join(self.new_root, 'ntp.conf') - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.handle('cc_ntp', invalid_config, cc, None, []) - self.assertIn( - "Invalid config:\nntp.pools: 123 is not of type 'array'\n" - "ntp.servers: 'non-array' is not of type 'array'", - self.logs.getvalue()) - with open(ntp_conf) as stream: - content = stream.read() - self.assertEqual("servers non-array\npools 123\n", content) + + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro) + confpath = ntpconfig['confpath'] + m_select.return_value = ntpconfig + cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) + self.assertIn( + "Invalid config:\nntp.pools: 123 is not of type 'array'\n" + "ntp.servers: 'non-array' is not of type 'array'", + self.logs.getvalue()) + content = util.read_file_or_url('file://' + confpath).contents + self.assertEqual("servers non-array\npools 123\n", + content.decode()) @skipUnlessJsonSchema() - def test_ntp_handler_schema_validation_warns_invalid_key_present(self): + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_schema_validation_warns_invalid_key_present(self, + m_select): """Ntp schema validation warns of invalid keys present in ntp config. Schema validation is not strict, so ntp config is still be rendered. """ invalid_config = { 'ntp': {'invalidkey': 1, 'pools': ['0.mycompany.pool.ntp.org']}} - cc = self._get_cloud('ubuntu') - ntp_conf = os.path.join(self.new_root, 'ntp.conf') - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.handle('cc_ntp', invalid_config, cc, None, []) - self.assertIn( - "Invalid config:\nntp: Additional properties are not allowed " - "('invalidkey' was unexpected)", - self.logs.getvalue()) - with open(ntp_conf) as stream: - content = stream.read() - self.assertEqual( - "servers []\npools ['0.mycompany.pool.ntp.org']\n", - content) + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro) + confpath = ntpconfig['confpath'] + m_select.return_value = ntpconfig + cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) + self.assertIn( + "Invalid config:\nntp: Additional properties are not allowed " + "('invalidkey' was unexpected)", + self.logs.getvalue()) + content = util.read_file_or_url('file://' + confpath).contents + self.assertEqual( + "servers []\npools ['0.mycompany.pool.ntp.org']\n", + content.decode()) @skipUnlessJsonSchema() - def test_ntp_handler_schema_validation_warns_of_duplicates(self): + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_schema_validation_warns_of_duplicates(self, m_select): """Ntp schema validation warns of duplicates in servers or pools. Schema validation is not strict, so ntp config is still be rendered. @@ -381,74 +396,334 @@ class TestNtp(FilesystemMockingTestCase): invalid_config = { 'ntp': {'pools': ['0.mypool.org', '0.mypool.org'], 'servers': ['10.0.0.1', '10.0.0.1']}} - cc = self._get_cloud('ubuntu') - ntp_conf = os.path.join(self.new_root, 'ntp.conf') - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.handle('cc_ntp', invalid_config, cc, None, []) - self.assertIn( - "Invalid config:\nntp.pools: ['0.mypool.org', '0.mypool.org'] has " - "non-unique elements\nntp.servers: ['10.0.0.1', '10.0.0.1'] has " - "non-unique elements", - self.logs.getvalue()) - with open(ntp_conf) as stream: - content = stream.read() - self.assertEqual( - "servers ['10.0.0.1', '10.0.0.1']\n" - "pools ['0.mypool.org', '0.mypool.org']\n", - content) + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro) + confpath = ntpconfig['confpath'] + m_select.return_value = ntpconfig + cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) + self.assertIn( + "Invalid config:\nntp.pools: ['0.mypool.org', '0.mypool.org']" + " has non-unique elements\nntp.servers: " + "['10.0.0.1', '10.0.0.1'] has non-unique elements", + self.logs.getvalue()) + content = util.read_file_or_url('file://' + confpath).contents + self.assertEqual( + "servers ['10.0.0.1', '10.0.0.1']\n" + "pools ['0.mypool.org', '0.mypool.org']\n", content.decode()) - @mock.patch("cloudinit.config.cc_ntp.ntp_installable") - def test_ntp_handler_timesyncd(self, m_ntp_install): + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_timesyncd(self, m_select): """Test ntp handler configures timesyncd""" - m_ntp_install.return_value = False - distro = 'ubuntu' - cfg = { - 'servers': ['192.168.2.1', '192.168.2.2'], - 'pools': ['0.mypool.org'], - } - mycloud = self._get_cloud(distro) - tsyncd_conf = self.tmp_path("timesyncd.conf", self.new_root) - # Create timesyncd.conf.tmpl - template = '{0}.tmpl'.format(tsyncd_conf) - print(template) - with open(template, 'wb') as stream: - stream.write(TIMESYNCD_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.TIMESYNCD_CONF', tsyncd_conf): - cc_ntp.write_ntp_config_template(cfg, mycloud, tsyncd_conf, - template='timesyncd.conf') - - content = util.read_file_or_url('file://' + tsyncd_conf).contents - self.assertEqual( - "[Time]\nNTP=192.168.2.1 192.168.2.2 0.mypool.org \n", - content.decode()) + servers = ['192.168.2.1', '192.168.2.2'] + pools = ['0.mypool.org'] + cfg = {'ntp': {'servers': servers, 'pools': pools}} + client = 'systemd-timesyncd' + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro, + client=client) + confpath = ntpconfig['confpath'] + m_select.return_value = ntpconfig + cc_ntp.handle('cc_ntp', cfg, mycloud, None, []) + content = util.read_file_or_url('file://' + confpath).contents + self.assertEqual( + "[Time]\nNTP=192.168.2.1 192.168.2.2 0.mypool.org \n", + content.decode()) + + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_enabled_false(self, m_select): + """Test ntp handler does not run if enabled: false """ + cfg = {'ntp': {'enabled': False}} + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + cc_ntp.handle('notimportant', cfg, mycloud, None, None) + self.assertEqual(0, m_select.call_count) + + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + @mock.patch("cloudinit.distros.Distro.uses_systemd") + def test_ntp_the_whole_package(self, m_sysd, m_select): + """Test enabled config renders template, and restarts service """ + cfg = {'ntp': {'enabled': True}} + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro) + confpath = ntpconfig['confpath'] + service_name = ntpconfig['service_name'] + m_select.return_value = ntpconfig + pools = cc_ntp.generate_server_names(mycloud.distro.name) + # force uses systemd path + m_sysd.return_value = True + with mock.patch('cloudinit.config.cc_ntp.util') as m_util: + # allow use of util.mergemanydict + m_util.mergemanydict.side_effect = util.mergemanydict + # default client is present + m_util.which.return_value = True + # use the config 'enabled' value + m_util.is_false.return_value = util.is_false( + cfg['ntp']['enabled']) + cc_ntp.handle('notimportant', cfg, mycloud, None, None) + m_util.subp.assert_called_with( + ['systemctl', 'reload-or-restart', + service_name], capture=True) + content = util.read_file_or_url('file://' + confpath).contents + self.assertEqual( + "servers []\npools {0}\n".format(pools), + content.decode()) + + def test_opensuse_picks_chrony(self): + """Test opensuse picks chrony or ntp on certain distro versions""" + # < 15.0 => ntp + self.m_sysinfo.return_value = {'dist': + ('openSUSE', '13.2', 'Harlequin')} + mycloud = self._get_cloud('opensuse') + expected_client = mycloud.distro.preferred_ntp_clients[0] + self.assertEqual('ntp', expected_client) + + # >= 15.0 and not openSUSE => chrony + self.m_sysinfo.return_value = {'dist': + ('SLES', '15.0', + 'SUSE Linux Enterprise Server 15')} + mycloud = self._get_cloud('sles') + expected_client = mycloud.distro.preferred_ntp_clients[0] + self.assertEqual('chrony', expected_client) + + # >= 15.0 and openSUSE and ver != 42 => chrony + self.m_sysinfo.return_value = {'dist': ('openSUSE Tumbleweed', + '20180326', + 'timbleweed')} + mycloud = self._get_cloud('opensuse') + expected_client = mycloud.distro.preferred_ntp_clients[0] + self.assertEqual('chrony', expected_client) + + def test_ubuntu_xenial_picks_ntp(self): + """Test Ubuntu picks ntp on xenial release""" + + self.m_sysinfo.return_value = {'dist': ('Ubuntu', '16.04', 'xenial')} + mycloud = self._get_cloud('ubuntu') + expected_client = mycloud.distro.preferred_ntp_clients[0] + self.assertEqual('ntp', expected_client) - def test_write_ntp_config_template_defaults_pools_empty_lists_sles(self): - """write_ntp_config_template defaults pools servers upon empty config. + @mock.patch('cloudinit.config.cc_ntp.util.which') + def test_snappy_system_picks_timesyncd(self, m_which): + """Test snappy systems prefer installed clients""" - When both pools and servers are empty, default NR_POOL_SERVERS get - configured. - """ - distro = 'sles' - mycloud = self._get_cloud(distro) - ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist - # Create ntp.conf.tmpl - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.write_ntp_config_template({}, mycloud, ntp_conf) - content = util.read_file_or_url('file://' + ntp_conf).contents - default_pools = [ - "{0}.opensuse.pool.ntp.org".format(x) - for x in range(0, cc_ntp.NR_POOL_SERVERS)] - self.assertEqual( - "servers []\npools {0}\n".format(default_pools), - content.decode()) - self.assertIn( - "Adding distro default ntp pool servers: {0}".format( - ",".join(default_pools)), - self.logs.getvalue()) + # we are on ubuntu-core here + self.m_snappy.return_value = True + # ubuntu core systems will have timesyncd installed + m_which.side_effect = iter([None, '/lib/systemd/systemd-timesyncd', + None, None, None]) + distro = 'ubuntu' + mycloud = self._get_cloud(distro) + distro_configs = cc_ntp.distro_ntp_client_configs(distro) + expected_client = 'systemd-timesyncd' + expected_cfg = distro_configs[expected_client] + expected_calls = [] + # we only get to timesyncd + for client in mycloud.distro.preferred_ntp_clients[0:2]: + cfg = distro_configs[client] + expected_calls.append(mock.call(cfg['check_exe'])) + result = cc_ntp.select_ntp_client(None, mycloud.distro) + m_which.assert_has_calls(expected_calls) + self.assertEqual(sorted(expected_cfg), sorted(cfg)) + self.assertEqual(sorted(expected_cfg), sorted(result)) + + @mock.patch('cloudinit.config.cc_ntp.util.which') + def test_ntp_distro_searches_all_preferred_clients(self, m_which): + """Test select_ntp_client search all distro perferred clients """ + # nothing is installed + m_which.return_value = None + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + distro_configs = cc_ntp.distro_ntp_client_configs(distro) + expected_client = mycloud.distro.preferred_ntp_clients[0] + expected_cfg = distro_configs[expected_client] + expected_calls = [] + for client in mycloud.distro.preferred_ntp_clients: + cfg = distro_configs[client] + expected_calls.append(mock.call(cfg['check_exe'])) + cc_ntp.select_ntp_client({}, mycloud.distro) + m_which.assert_has_calls(expected_calls) + self.assertEqual(sorted(expected_cfg), sorted(cfg)) + + @mock.patch('cloudinit.config.cc_ntp.util.which') + def test_user_cfg_ntp_client_auto_uses_distro_clients(self, m_which): + """Test user_cfg.ntp_client='auto' defaults to distro search""" + # nothing is installed + m_which.return_value = None + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + distro_configs = cc_ntp.distro_ntp_client_configs(distro) + expected_client = mycloud.distro.preferred_ntp_clients[0] + expected_cfg = distro_configs[expected_client] + expected_calls = [] + for client in mycloud.distro.preferred_ntp_clients: + cfg = distro_configs[client] + expected_calls.append(mock.call(cfg['check_exe'])) + cc_ntp.select_ntp_client('auto', mycloud.distro) + m_which.assert_has_calls(expected_calls) + self.assertEqual(sorted(expected_cfg), sorted(cfg)) + + @mock.patch('cloudinit.config.cc_ntp.write_ntp_config_template') + @mock.patch('cloudinit.cloud.Cloud.get_template_filename') + @mock.patch('cloudinit.config.cc_ntp.util.which') + def test_ntp_custom_client_overrides_installed_clients(self, m_which, + m_tmpfn, m_write): + """Test user client is installed despite other clients present """ + client = 'ntpdate' + cfg = {'ntp': {'ntp_client': client}} + for distro in cc_ntp.distros: + # client is not installed + m_which.side_effect = iter([None]) + mycloud = self._get_cloud(distro) + with mock.patch.object(mycloud.distro, + 'install_packages') as m_install: + cc_ntp.handle('notimportant', cfg, mycloud, None, None) + m_install.assert_called_with([client]) + m_which.assert_called_with(client) + + @mock.patch('cloudinit.config.cc_ntp.util.which') + def test_ntp_system_config_overrides_distro_builtin_clients(self, m_which): + """Test distro system_config overrides builtin preferred ntp clients""" + system_client = 'chrony' + sys_cfg = {'ntp_client': system_client} + # no clients installed + m_which.return_value = None + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro, sys_cfg=sys_cfg) + distro_configs = cc_ntp.distro_ntp_client_configs(distro) + expected_cfg = distro_configs[system_client] + result = cc_ntp.select_ntp_client(None, mycloud.distro) + self.assertEqual(sorted(expected_cfg), sorted(result)) + m_which.assert_has_calls([]) + + @mock.patch('cloudinit.config.cc_ntp.util.which') + def test_ntp_user_config_overrides_system_cfg(self, m_which): + """Test user-data overrides system_config ntp_client""" + system_client = 'chrony' + sys_cfg = {'ntp_client': system_client} + user_client = 'systemd-timesyncd' + # no clients installed + m_which.return_value = None + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro, sys_cfg=sys_cfg) + distro_configs = cc_ntp.distro_ntp_client_configs(distro) + expected_cfg = distro_configs[user_client] + result = cc_ntp.select_ntp_client(user_client, mycloud.distro) + self.assertEqual(sorted(expected_cfg), sorted(result)) + m_which.assert_has_calls([]) + + @mock.patch('cloudinit.config.cc_ntp.reload_ntp') + @mock.patch('cloudinit.config.cc_ntp.install_ntp_client') + def test_ntp_user_provided_config_with_template(self, m_install, m_reload): + custom = r'\n#MyCustomTemplate' + user_template = NTP_TEMPLATE + custom + confpath = os.path.join(self.new_root, 'etc/myntp/myntp.conf') + cfg = { + 'ntp': { + 'pools': ['mypool.org'], + 'ntp_client': 'myntpd', + 'config': { + 'check_exe': 'myntpd', + 'confpath': confpath, + 'packages': ['myntp'], + 'service_name': 'myntp', + 'template': user_template, + } + } + } + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' + with mock.patch(mock_path, self.new_root): + cc_ntp.handle('notimportant', cfg, mycloud, None, None) + content = util.read_file_or_url('file://' + confpath).contents + self.assertEqual( + "servers []\npools ['mypool.org']\n%s" % custom, + content.decode()) + + @mock.patch('cloudinit.config.cc_ntp.supplemental_schema_validation') + @mock.patch('cloudinit.config.cc_ntp.reload_ntp') + @mock.patch('cloudinit.config.cc_ntp.install_ntp_client') + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_user_provided_config_template_only(self, m_select, m_install, + m_reload, m_schema): + """Test custom template for default client""" + custom = r'\n#MyCustomTemplate' + user_template = NTP_TEMPLATE + custom + client = 'chrony' + cfg = { + 'pools': ['mypool.org'], + 'ntp_client': client, + 'config': { + 'template': user_template, + } + } + expected_merged_cfg = { + 'check_exe': 'chronyd', + 'confpath': '{tmpdir}/client.conf'.format(tmpdir=self.new_root), + 'template_name': 'client.conf', 'template': user_template, + 'service_name': 'chrony', 'packages': ['chrony']} + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(client=client, + distro=distro) + confpath = ntpconfig['confpath'] + m_select.return_value = ntpconfig + mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' + with mock.patch(mock_path, self.new_root): + cc_ntp.handle('notimportant', + {'ntp': cfg}, mycloud, None, None) + content = util.read_file_or_url('file://' + confpath).contents + self.assertEqual( + "servers []\npools ['mypool.org']\n%s" % custom, + content.decode()) + m_schema.assert_called_with(expected_merged_cfg) + + +class TestSupplementalSchemaValidation(CiTestCase): + + def test_error_on_missing_keys(self): + """ValueError raised reporting any missing required ntp:config keys""" + cfg = {} + match = (r'Invalid ntp configuration:\\nMissing required ntp:config' + ' keys: check_exe, confpath, packages, service_name') + with self.assertRaisesRegex(ValueError, match): + cc_ntp.supplemental_schema_validation(cfg) + + def test_error_requiring_either_template_or_template_name(self): + """ValueError raised if both template not template_name are None.""" + cfg = {'confpath': 'someconf', 'check_exe': '', 'service_name': '', + 'template': None, 'template_name': None, 'packages': []} + match = (r'Invalid ntp configuration:\\nEither ntp:config:template' + ' or ntp:config:template_name values are required') + with self.assertRaisesRegex(ValueError, match): + cc_ntp.supplemental_schema_validation(cfg) + + def test_error_on_non_list_values(self): + """ValueError raised when packages is not of type list.""" + cfg = {'confpath': 'someconf', 'check_exe': '', 'service_name': '', + 'template': 'asdf', 'template_name': None, 'packages': 'NOPE'} + match = (r'Invalid ntp configuration:\\nExpected a list of required' + ' package names for ntp:config:packages. Found \(NOPE\)') + with self.assertRaisesRegex(ValueError, match): + cc_ntp.supplemental_schema_validation(cfg) + + def test_error_on_non_string_values(self): + """ValueError raised for any values expected as string type.""" + cfg = {'confpath': 1, 'check_exe': 2, 'service_name': 3, + 'template': 4, 'template_name': 5, 'packages': []} + errors = [ + 'Expected a config file path ntp:config:confpath. Found (1)', + 'Expected a string type for ntp:config:check_exe. Found (2)', + 'Expected a string type for ntp:config:service_name. Found (3)', + 'Expected a string type for ntp:config:template. Found (4)', + 'Expected a string type for ntp:config:template_name. Found (5)'] + with self.assertRaises(ValueError) as context_mgr: + cc_ntp.supplemental_schema_validation(cfg) + error_msg = str(context_mgr.exception) + for error in errors: + self.assertIn(error, error_msg) # vi: ts=4 expandtab -- cgit v1.2.3 From 4b86ab9a25b512420ecfe98953a3f3a6e4b4bba1 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 12 Apr 2018 15:51:07 -0600 Subject: renderer: support unicode in render_from_file. If a file passed to render_from_file had non-ascii text then jinja in python2 would decode as ascii, which would cause UnicodeDecodeError. This issue can be re-created in python2 with just: 'can\xe2\x80\x99t'.decode() The solution here is to explicitly pass in unicode supporting type (py3 str, py2 unicode). Those are six.text_type. Then jinja does not try to decode. The reason we hit this is that load_file calls decode_binary. decode_binary believes it has no work to do if it got a six.string_types. isinstance('can\xe2\x80\x99t', six.string_types) == True So it returns the original string which will blow up for jinja. Our fix here then is to load the file in binary mode and explicitly decode it to utf-8. Then in python2 we'll have a unicode type and in python3 we'll have a string type. --- cloudinit/templater.py | 10 +++++- templates/chrony.conf.debian.tmpl | 2 +- templates/chrony.conf.ubuntu.tmpl | 2 +- tests/unittests/test_handler/test_handler_ntp.py | 4 +-- tests/unittests/test_templating.py | 41 +++++++++++++++++++++++- 5 files changed, 53 insertions(+), 6 deletions(-) (limited to 'tests') diff --git a/cloudinit/templater.py b/cloudinit/templater.py index b3ea64e4..9a087e1c 100644 --- a/cloudinit/templater.py +++ b/cloudinit/templater.py @@ -121,7 +121,11 @@ def detect_template(text): def render_from_file(fn, params): if not params: params = {} - template_type, renderer, content = detect_template(util.load_file(fn)) + # jinja in python2 uses unicode internally. All py2 str will be decoded. + # If it is given a str that has non-ascii then it will raise a + # UnicodeDecodeError. So we explicitly convert to unicode type here. + template_type, renderer, content = detect_template( + util.load_file(fn, decode=False).decode('utf-8')) LOG.debug("Rendering content of '%s' using renderer %s", fn, template_type) return renderer(content, params) @@ -132,11 +136,15 @@ def render_to_file(fn, outfn, params, mode=0o644): def render_string_to_file(content, outfn, params, mode=0o644): + """Render string (or py2 unicode) to file. + Warning: py2 str with non-ascii chars will cause UnicodeDecodeError.""" contents = render_string(content, params) util.write_file(outfn, contents, mode=mode) def render_string(content, params): + """Render string (or py2 unicode). + Warning: py2 str with non-ascii chars will cause UnicodeDecodeError.""" if not params: params = {} template_type, renderer, content = detect_template(content) diff --git a/templates/chrony.conf.debian.tmpl b/templates/chrony.conf.debian.tmpl index e72bfee1..661bf04e 100644 --- a/templates/chrony.conf.debian.tmpl +++ b/templates/chrony.conf.debian.tmpl @@ -30,7 +30,7 @@ logdir /var/log/chrony maxupdateskew 100.0 # This directive enables kernel synchronisation (every 11 minutes) of the -# real-time clock. Note that it can't be used along with the 'rtcfile' directive. +# real-time clock. Note that it can’t be used along with the 'rtcfile' directive. rtcsync # Step the system clock instead of slewing it if the adjustment is larger than diff --git a/templates/chrony.conf.ubuntu.tmpl b/templates/chrony.conf.ubuntu.tmpl index da7f16a4..50a6f518 100644 --- a/templates/chrony.conf.ubuntu.tmpl +++ b/templates/chrony.conf.ubuntu.tmpl @@ -34,7 +34,7 @@ logdir /var/log/chrony maxupdateskew 100.0 # This directive enables kernel synchronisation (every 11 minutes) of the -# real-time clock. Note that it can't be used along with the 'rtcfile' directive. +# real-time clock. Note that it can’t be used along with the 'rtcfile' directive. rtcsync # Step the system clock instead of slewing it if the adjustment is larger than diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py index 1b3ca570..02676aa6 100644 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ b/tests/unittests/test_handler/test_handler_ntp.py @@ -272,12 +272,12 @@ class TestNtp(FilesystemMockingTestCase): expected_servers = '\n'.join([ 'server {0} iburst'.format(srv) for srv in servers]) print('distro=%s client=%s' % (distro, client)) - self.assertIn(expected_servers, content.decode(), + self.assertIn(expected_servers, content.decode('utf-8'), ('failed to render {0} conf' ' for distro:{1}'.format(client, distro))) expected_pools = '\n'.join([ 'pool {0} iburst'.format(pool) for pool in pools]) - self.assertIn(expected_pools, content.decode(), + self.assertIn(expected_pools, content.decode('utf-8'), ('failed to render {0} conf' ' for distro:{1}'.format(client, distro))) elif client == 'systemd-timesyncd': diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py index 53154d33..1080e135 100644 --- a/tests/unittests/test_templating.py +++ b/tests/unittests/test_templating.py @@ -10,6 +10,7 @@ from cloudinit.tests import helpers as test_helpers import textwrap from cloudinit import templater +from cloudinit.util import load_file, write_file try: import Cheetah @@ -19,7 +20,17 @@ except ImportError: HAS_CHEETAH = False -class TestTemplates(test_helpers.TestCase): +class TestTemplates(test_helpers.CiTestCase): + jinja_utf8 = b'It\xe2\x80\x99s not ascii, {{name}}\n' + jinja_utf8_rbob = b'It\xe2\x80\x99s not ascii, bob\n'.decode('utf-8') + + @staticmethod + def add_header(renderer, data): + """Return text (py2 unicode/py3 str) with template header.""" + if isinstance(data, bytes): + data = data.decode('utf-8') + return "## template: %s\n" % renderer + data + def test_render_basic(self): in_data = textwrap.dedent(""" ${b} @@ -106,4 +117,32 @@ $a,$b''' 'codename': codename}) self.assertEqual(ex_data, out_data) + def test_jinja_nonascii_render_to_string(self): + """Test jinja render_to_string with non-ascii content.""" + self.assertEqual( + templater.render_string( + self.add_header("jinja", self.jinja_utf8), {"name": "bob"}), + self.jinja_utf8_rbob) + + def test_jinja_nonascii_render_to_file(self): + """Test jinja render_to_file of a filename with non-ascii content.""" + tmpl_fn = self.tmp_path("j-render-to-file.template") + out_fn = self.tmp_path("j-render-to-file.out") + write_file(filename=tmpl_fn, omode="wb", + content=self.add_header( + "jinja", self.jinja_utf8).encode('utf-8')) + templater.render_to_file(tmpl_fn, out_fn, {"name": "bob"}) + result = load_file(out_fn, decode=False).decode('utf-8') + self.assertEqual(result, self.jinja_utf8_rbob) + + def test_jinja_nonascii_render_from_file(self): + """Test jinja render_from_file with non-ascii content.""" + tmpl_fn = self.tmp_path("j-render-from-file.template") + write_file(tmpl_fn, omode="wb", + content=self.add_header( + "jinja", self.jinja_utf8).encode('utf-8')) + result = templater.render_from_file(tmpl_fn, {"name": "bob"}) + self.assertEqual(result, self.jinja_utf8_rbob) + + # vi: ts=4 expandtab -- cgit v1.2.3 From acca826adf39ddfedde78cfbfc47e81a06c6f42a Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 18 Apr 2018 03:35:41 -0600 Subject: pycodestyle: Fix invalid escape sequences in string literals. Python has deprecated these invalid string literals now https://bugs.python.org/issue27364 and pycodestyle is identifying them with a W605 warning. https://github.com/PyCQA/pycodestyle/pull/676 So basically, any use of \ not followed by one of [\'"abfnrtv] or \ooo (octal) \xhh (hex) or a newline is invalid. This is most comomnly seen for us in regex. To solve, you either: a.) use a raw string r'...' b.) correctly escape the \ that was not intended to be interpreted. --- cloudinit/analyze/__main__.py | 2 +- cloudinit/config/cc_apt_configure.py | 2 +- cloudinit/config/cc_power_state_change.py | 2 +- cloudinit/config/cc_rsyslog.py | 4 ++-- cloudinit/distros/freebsd.py | 6 +++--- cloudinit/util.py | 6 +++--- tests/cloud_tests/testcases/base.py | 2 +- tests/unittests/test_util.py | 2 +- 8 files changed, 13 insertions(+), 13 deletions(-) (limited to 'tests') diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py index 3ba5903f..f8613656 100644 --- a/cloudinit/analyze/__main__.py +++ b/cloudinit/analyze/__main__.py @@ -69,7 +69,7 @@ def analyze_blame(name, args): """ (infh, outfh) = configure_io(args) blame_format = ' %ds (%n)' - r = re.compile('(^\s+\d+\.\d+)', re.MULTILINE) + r = re.compile(r'(^\s+\d+\.\d+)', re.MULTILINE) for idx, record in enumerate(show.show_events(_get_events(infh), blame_format)): srecs = sorted(filter(r.match, record), reverse=True) diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 5b9cbca0..afaca464 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -121,7 +121,7 @@ and https protocols respectively. The ``proxy`` key also exists as an alias for All source entries in ``apt-sources`` that match regex in ``add_apt_repo_match`` will be added to the system using ``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults -to ``^[\w-]+:\w`` +to ``^[\\w-]+:\\w`` **Add source list entries:** diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 4da3a588..50b37470 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -74,7 +74,7 @@ def givecmdline(pid): if util.is_FreeBSD(): (output, _err) = util.subp(['procstat', '-c', str(pid)]) line = output.splitlines()[1] - m = re.search('\d+ (\w|\.|-)+\s+(/\w.+)', line) + m = re.search(r'\d+ (\w|\.|-)+\s+(/\w.+)', line) return m.group(2) else: return util.load_file("/proc/%s/cmdline" % pid) diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py index af08788c..27d2366c 100644 --- a/cloudinit/config/cc_rsyslog.py +++ b/cloudinit/config/cc_rsyslog.py @@ -203,8 +203,8 @@ LOG = logging.getLogger(__name__) COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*') HOST_PORT_RE = re.compile( r'^(?P[@]{0,2})' - '(([[](?P[^\]]*)[\]])|(?P[^:]*))' - '([:](?P[0-9]+))?$') + r'(([[](?P[^\]]*)[\]])|(?P[^:]*))' + r'([:](?P[0-9]+))?$') def reload_syslog(command=DEF_RELOAD, systemd=False): diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 754d3df6..099fac5c 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -110,7 +110,7 @@ class Distro(distros.Distro): if dev.startswith('lo'): return dev - n = re.search('\d+$', dev) + n = re.search(r'\d+$', dev) index = n.group(0) (out, err) = util.subp(['ifconfig', '-a']) @@ -118,7 +118,7 @@ class Distro(distros.Distro): if len(x.split()) > 0] bsddev = 'NOT_FOUND' for line in ifconfigoutput: - m = re.match('^\w+', line) + m = re.match(r'^\w+', line) if m: if m.group(0).startswith('lo'): continue @@ -128,7 +128,7 @@ class Distro(distros.Distro): break # Replace the index with the one we're after. - bsddev = re.sub('\d+$', index, bsddev) + bsddev = re.sub(r'\d+$', index, bsddev) LOG.debug("Using network interface %s", bsddev) return bsddev diff --git a/cloudinit/util.py b/cloudinit/util.py index acdc0d85..1717b529 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1446,7 +1446,7 @@ def get_config_logfiles(cfg): for fmt in get_output_cfg(cfg, None): if not fmt: continue - match = re.match('(?P\||>+)\s*(?P.*)', fmt) + match = re.match(r'(?P\||>+)\s*(?P.*)', fmt) if not match: continue target = match.group('target') @@ -2275,8 +2275,8 @@ def parse_mount(path): # the regex is a bit complex. to better understand this regex see: # https://regex101.com/r/2F6c1k/1 # https://regex101.com/r/T2en7a/1 - regex = r'^(/dev/[\S]+|.*zroot\S*?) on (/[\S]*) ' + \ - '(?=(?:type)[\s]+([\S]+)|\(([^,]*))' + regex = (r'^(/dev/[\S]+|.*zroot\S*?) on (/[\S]*) ' + r'(?=(?:type)[\s]+([\S]+)|\(([^,]*))') for line in mount_locs: m = re.search(regex, line) if not m: diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py index 7598d469..4fda8f91 100644 --- a/tests/cloud_tests/testcases/base.py +++ b/tests/cloud_tests/testcases/base.py @@ -235,7 +235,7 @@ class CloudTestCase(unittest.TestCase): 'found unexpected kvm availability-zone %s' % v1_data['availability-zone']) self.assertIsNotNone( - re.match('[\da-f]{8}(-[\da-f]{4}){3}-[\da-f]{12}', + re.match(r'[\da-f]{8}(-[\da-f]{4}){3}-[\da-f]{12}', v1_data['instance-id']), 'kvm instance-id is not a UUID: %s' % v1_data['instance-id']) self.assertIn('ubuntu', v1_data['local-hostname']) diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 50101906..7cbd5538 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -800,7 +800,7 @@ class TestSubp(helpers.CiTestCase): os.chmod(noshebang, os.stat(noshebang).st_mode | stat.S_IEXEC) self.assertRaisesRegex(util.ProcessExecutionError, - 'Missing #! in script\?', + r'Missing #! in script\?', util.subp, (noshebang,)) def test_returns_none_if_no_capture(self): -- cgit v1.2.3 From 4c573d0e0173d2b1e99a383c54a0a6c957aa1cbb Mon Sep 17 00:00:00 2001 From: Mike Gerdts Date: Wed, 18 Apr 2018 13:55:17 -0400 Subject: DataSourceSmartOS: fix hang when metadata service is down If the metadata service in the host is down while a guest that uses DataSourceSmartOS is booting, the request from the guest falls into the bit bucket. When the metadata service is eventually started, the guest has no awareness of this and does not resend the request. This results in cloud-init hanging forever with a guest reboot as the only recovery option. This fix updates the metadata protocol to implement the initialization phase, just as is implemented by mdata-get and related utilities. The initialization phase includes draining all pending data from the serial port, writing an empty command and getting an expected error message in reply. If the initialization phase times out, it is retried every five seconds. Each timeout results in a warning message: "Timeout while initializing metadata client. Is the host metadata service running?" By default, warning messages are logged to the console, thus the reason for a hung boot is readily apparent. LP: #1667735 --- cloudinit/sources/DataSourceSmartOS.py | 117 +++++++++++++++++++++--- tests/unittests/test_datasource/test_smartos.py | 103 ++++++++++++++++++++- 2 files changed, 204 insertions(+), 16 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 5dd8a125..c8998b40 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -1,4 +1,5 @@ # Copyright (C) 2013 Canonical Ltd. +# Copyright (c) 2018, Joyent, Inc. # # Author: Ben Howard # @@ -21,6 +22,7 @@ import base64 import binascii +import errno import json import os import random @@ -229,6 +231,9 @@ class DataSourceSmartOS(sources.DataSource): self.md_client) return False + # Open once for many requests, rather than once for each request + self.md_client.open_transport() + for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items(): smartos_noun, strip = attribute md[ci_noun] = self.md_client.get(smartos_noun, strip=strip) @@ -236,6 +241,8 @@ class DataSourceSmartOS(sources.DataSource): for ci_noun, smartos_noun in SMARTOS_ATTRIB_JSON.items(): md[ci_noun] = self.md_client.get_json(smartos_noun) + self.md_client.close_transport() + # @datadictionary: This key may contain a program that is written # to a file in the filesystem of the guest on each boot and then # executed. It may be of any format that would be considered @@ -316,6 +323,10 @@ class JoyentMetadataFetchException(Exception): pass +class JoyentMetadataTimeoutException(JoyentMetadataFetchException): + pass + + class JoyentMetadataClient(object): """ A client implementing v2 of the Joyent Metadata Protocol Specification. @@ -360,6 +371,47 @@ class JoyentMetadataClient(object): LOG.debug('Value "%s" found.', value) return value + def _readline(self): + """ + Reads a line a byte at a time until \n is encountered. Returns an + ascii string with the trailing newline removed. + + If a timeout (per-byte) is set and it expires, a + JoyentMetadataFetchException will be thrown. + """ + response = [] + + def as_ascii(): + return b''.join(response).decode('ascii') + + msg = "Partial response: '%s'" + while True: + try: + byte = self.fp.read(1) + if len(byte) == 0: + raise JoyentMetadataTimeoutException(msg % as_ascii()) + if byte == b'\n': + return as_ascii() + response.append(byte) + except OSError as exc: + if exc.errno == errno.EAGAIN: + raise JoyentMetadataTimeoutException(msg % as_ascii()) + raise + + def _write(self, msg): + self.fp.write(msg.encode('ascii')) + self.fp.flush() + + def _negotiate(self): + LOG.debug('Negotiating protocol V2') + self._write('NEGOTIATE V2\n') + response = self._readline() + LOG.debug('read "%s"', response) + if response != 'V2_OK': + raise JoyentMetadataFetchException( + 'Invalid response "%s" to "NEGOTIATE V2"' % response) + LOG.debug('Negotiation complete') + def request(self, rtype, param=None): request_id = '{0:08x}'.format(random.randint(0, 0xffffffff)) message_body = ' '.join((request_id, rtype,)) @@ -374,18 +426,11 @@ class JoyentMetadataClient(object): self.open_transport() need_close = True - self.fp.write(msg.encode('ascii')) - self.fp.flush() - - response = bytearray() - response.extend(self.fp.read(1)) - while response[-1:] != b'\n': - response.extend(self.fp.read(1)) - + self._write(msg) + response = self._readline() if need_close: self.close_transport() - response = response.rstrip().decode('ascii') LOG.debug('Read "%s" from metadata transport.', response) if 'SUCCESS' not in response: @@ -450,6 +495,7 @@ class JoyentMetadataSocketClient(JoyentMetadataClient): sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.connect(self.socketpath) self.fp = sock.makefile('rwb') + self._negotiate() def exists(self): return os.path.exists(self.socketpath) @@ -459,8 +505,9 @@ class JoyentMetadataSocketClient(JoyentMetadataClient): class JoyentMetadataSerialClient(JoyentMetadataClient): - def __init__(self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM): - super(JoyentMetadataSerialClient, self).__init__(smartos_type) + def __init__(self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM, + fp=None): + super(JoyentMetadataSerialClient, self).__init__(smartos_type, fp) self.device = device self.timeout = timeout @@ -468,10 +515,50 @@ class JoyentMetadataSerialClient(JoyentMetadataClient): return os.path.exists(self.device) def open_transport(self): - ser = serial.Serial(self.device, timeout=self.timeout) - if not ser.isOpen(): - raise SystemError("Unable to open %s" % self.device) - self.fp = ser + if self.fp is None: + ser = serial.Serial(self.device, timeout=self.timeout) + if not ser.isOpen(): + raise SystemError("Unable to open %s" % self.device) + self.fp = ser + self._flush() + self._negotiate() + + def _flush(self): + LOG.debug('Flushing input') + # Read any pending data + timeout = self.fp.timeout + self.fp.timeout = 0.1 + while True: + try: + self._readline() + except JoyentMetadataTimeoutException: + break + LOG.debug('Input empty') + + # Send a newline and expect "invalid command". Keep trying until + # successful. Retry rather frequently so that the "Is the host + # metadata service running" appears on the console soon after someone + # attaches in an effort to debug. + if timeout > 5: + self.fp.timeout = 5 + else: + self.fp.timeout = timeout + while True: + LOG.debug('Writing newline, expecting "invalid command"') + self._write('\n') + try: + response = self._readline() + if response == 'invalid command': + break + if response == 'FAILURE': + LOG.debug('Got "FAILURE". Retrying.') + continue + LOG.warning('Unexpected response "%s" during flush', response) + except JoyentMetadataTimeoutException: + LOG.warning('Timeout while initializing metadata client. ' + + 'Is the host metadata service running?') + LOG.debug('Got "invalid command". Flush complete.') + self.fp.timeout = timeout def __repr__(self): return "%s(device=%s, timeout=%s)" % ( diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 88bae5f9..2bea7a17 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -1,4 +1,5 @@ # Copyright (C) 2013 Canonical Ltd. +# Copyright (c) 2018, Joyent, Inc. # # Author: Ben Howard # @@ -324,6 +325,7 @@ class PsuedoJoyentClient(object): if data is None: data = MOCK_RETURNS.copy() self.data = data + self._is_open = False return def get(self, key, default=None, strip=False): @@ -344,6 +346,14 @@ class PsuedoJoyentClient(object): def exists(self): return True + def open_transport(self): + assert(not self._is_open) + self._is_open = True + + def close_transport(self): + assert(self._is_open) + self._is_open = False + class TestSmartOSDataSource(FilesystemMockingTestCase): def setUp(self): @@ -592,8 +602,46 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): mydscfg['disk_aliases']['FOO']) +class ShortReader(object): + """Implements a 'read' interface for bytes provided. + much like io.BytesIO but the 'endbyte' acts as if EOF. + When it is reached a short will be returned.""" + def __init__(self, initial_bytes, endbyte=b'\0'): + self.data = initial_bytes + self.index = 0 + self.len = len(self.data) + self.endbyte = endbyte + + @property + def emptied(self): + return self.index >= self.len + + def read(self, size=-1): + """Read size bytes but not past a null.""" + if size == 0 or self.index >= self.len: + return b'' + + rsize = size + if size < 0 or size + self.index > self.len: + rsize = self.len - self.index + + next_null = self.data.find(self.endbyte, self.index, rsize) + if next_null >= 0: + rsize = next_null - self.index + 1 + i = self.index + self.index += rsize + ret = self.data[i:i + rsize] + if len(ret) and ret[-1:] == self.endbyte: + ret = ret[:-1] + return ret + + class TestJoyentMetadataClient(FilesystemMockingTestCase): + invalid = b'invalid command\n' + failure = b'FAILURE\n' + v2_ok = b'V2_OK\n' + def setUp(self): super(TestJoyentMetadataClient, self).setUp() @@ -636,6 +684,11 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase): return DataSourceSmartOS.JoyentMetadataClient( fp=self.serial, smartos_type=DataSourceSmartOS.SMARTOS_ENV_KVM) + def _get_serial_client(self): + self.serial.timeout = 1 + return DataSourceSmartOS.JoyentMetadataSerialClient(None, + fp=self.serial) + def assertEndsWith(self, haystack, prefix): self.assertTrue(haystack.endswith(prefix), "{0} does not end with '{1}'".format( @@ -646,12 +699,14 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase): "{0} does not start with '{1}'".format( repr(haystack), prefix)) + def assertNoMoreSideEffects(self, obj): + self.assertRaises(StopIteration, obj) + def test_get_metadata_writes_a_single_line(self): client = self._get_client() client.get('some_key') self.assertEqual(1, self.serial.write.call_count) written_line = self.serial.write.call_args[0][0] - print(type(written_line)) self.assertEndsWith(written_line.decode('ascii'), b'\n'.decode('ascii')) self.assertEqual(1, written_line.count(b'\n')) @@ -737,6 +792,52 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase): client._checksum = lambda _: self.response_parts['crc'] self.assertIsNone(client.get('some_key')) + def test_negotiate(self): + client = self._get_client() + reader = ShortReader(self.v2_ok) + client.fp.read.side_effect = reader.read + client._negotiate() + self.assertTrue(reader.emptied) + + def test_negotiate_short_response(self): + client = self._get_client() + # chopped '\n' from v2_ok. + reader = ShortReader(self.v2_ok[:-1] + b'\0') + client.fp.read.side_effect = reader.read + self.assertRaises(DataSourceSmartOS.JoyentMetadataTimeoutException, + client._negotiate) + self.assertTrue(reader.emptied) + + def test_negotiate_bad_response(self): + client = self._get_client() + reader = ShortReader(b'garbage\n' + self.v2_ok) + client.fp.read.side_effect = reader.read + self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, + client._negotiate) + self.assertEqual(self.v2_ok, client.fp.read()) + + def test_serial_open_transport(self): + client = self._get_serial_client() + reader = ShortReader(b'garbage\0' + self.invalid + self.v2_ok) + client.fp.read.side_effect = reader.read + client.open_transport() + self.assertTrue(reader.emptied) + + def test_flush_failure(self): + client = self._get_serial_client() + reader = ShortReader(b'garbage' + b'\0' + self.failure + + self.invalid + self.v2_ok) + client.fp.read.side_effect = reader.read + client.open_transport() + self.assertTrue(reader.emptied) + + def test_flush_many_timeouts(self): + client = self._get_serial_client() + reader = ShortReader(b'\0' * 100 + self.invalid + self.v2_ok) + client.fp.read.side_effect = reader.read + client.open_transport() + self.assertTrue(reader.emptied) + class TestNetworkConversion(TestCase): def test_convert_simple(self): -- cgit v1.2.3 From 6d48d265a0548a2dc23e587f2a335d4e38e8db90 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 18 Apr 2018 15:22:42 -0600 Subject: net: Depend on iproute2's ip instead of net-tools ifconfig or route MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The net-tools package is deprecated and will eventually be dropped. Use "ip route", "link" or "address" instead of "ifconfig" or "route" calls. Cloud-init can now run in an environment that no longer has net-tools. This affects the network and route printing emitted to cloud-config-output.log as well as the cc_disable_ec2_metadata module. Additional changes:  - separate readResource and resourceLocation into standalone test    functions  - Fix ipv4 address rows to report scopes represented by ip addr show  - Formatted route/address ouput now handles multiple ipv4 and ipv6    addresses on a single interface Co-authored-by: James Hogarth Co-authored-by: Robert Schweikert --- cloudinit/config/cc_disable_ec2_metadata.py | 14 +- .../config/tests/test_disable_ec2_metadata.py | 50 +++ cloudinit/net/network_state.py | 11 +- cloudinit/netinfo.py | 345 ++++++++++++++++----- cloudinit/tests/helpers.py | 40 +-- cloudinit/tests/test_netinfo.py | 186 ++++++----- tests/data/netinfo/netdev-formatted-output | 10 + tests/data/netinfo/new-ifconfig-output | 18 ++ tests/data/netinfo/old-ifconfig-output | 18 ++ tests/data/netinfo/route-formatted-output | 22 ++ tests/data/netinfo/sample-ipaddrshow-output | 13 + tests/data/netinfo/sample-iproute-output-v4 | 3 + tests/data/netinfo/sample-iproute-output-v6 | 11 + tests/data/netinfo/sample-route-output-v4 | 5 + tests/data/netinfo/sample-route-output-v6 | 13 + tests/unittests/test_filters/test_launch_index.py | 10 +- tests/unittests/test_merging.py | 2 +- tests/unittests/test_runs/test_merge_run.py | 2 +- tests/unittests/test_util.py | 14 +- 19 files changed, 583 insertions(+), 204 deletions(-) create mode 100644 cloudinit/config/tests/test_disable_ec2_metadata.py create mode 100644 tests/data/netinfo/netdev-formatted-output create mode 100644 tests/data/netinfo/new-ifconfig-output create mode 100644 tests/data/netinfo/old-ifconfig-output create mode 100644 tests/data/netinfo/route-formatted-output create mode 100644 tests/data/netinfo/sample-ipaddrshow-output create mode 100644 tests/data/netinfo/sample-iproute-output-v4 create mode 100644 tests/data/netinfo/sample-iproute-output-v6 create mode 100644 tests/data/netinfo/sample-route-output-v4 create mode 100644 tests/data/netinfo/sample-route-output-v6 (limited to 'tests') diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py index c56319b5..885b3138 100644 --- a/cloudinit/config/cc_disable_ec2_metadata.py +++ b/cloudinit/config/cc_disable_ec2_metadata.py @@ -32,13 +32,23 @@ from cloudinit.settings import PER_ALWAYS frequency = PER_ALWAYS -REJECT_CMD = ['route', 'add', '-host', '169.254.169.254', 'reject'] +REJECT_CMD_IF = ['route', 'add', '-host', '169.254.169.254', 'reject'] +REJECT_CMD_IP = ['ip', 'route', 'add', 'prohibit', '169.254.169.254'] def handle(name, cfg, _cloud, log, _args): disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False) if disabled: - util.subp(REJECT_CMD, capture=False) + reject_cmd = None + if util.which('ip'): + reject_cmd = REJECT_CMD_IP + elif util.which('ifconfig'): + reject_cmd = REJECT_CMD_IF + else: + log.error(('Neither "route" nor "ip" command found, unable to ' + 'manipulate routing table')) + return + util.subp(reject_cmd, capture=False) else: log.debug(("Skipping module named %s," " disabling the ec2 route not enabled"), name) diff --git a/cloudinit/config/tests/test_disable_ec2_metadata.py b/cloudinit/config/tests/test_disable_ec2_metadata.py new file mode 100644 index 00000000..67646b03 --- /dev/null +++ b/cloudinit/config/tests/test_disable_ec2_metadata.py @@ -0,0 +1,50 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Tests cc_disable_ec2_metadata handler""" + +import cloudinit.config.cc_disable_ec2_metadata as ec2_meta + +from cloudinit.tests.helpers import CiTestCase, mock + +import logging + +LOG = logging.getLogger(__name__) + +DISABLE_CFG = {'disable_ec2_metadata': 'true'} + + +class TestEC2MetadataRoute(CiTestCase): + + with_logs = True + + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which') + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp') + def test_disable_ifconfig(self, m_subp, m_which): + """Set the route if ifconfig command is available""" + m_which.side_effect = lambda x: x if x == 'ifconfig' else None + ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None) + m_subp.assert_called_with( + ['route', 'add', '-host', '169.254.169.254', 'reject'], + capture=False) + + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which') + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp') + def test_disable_ip(self, m_subp, m_which): + """Set the route if ip command is available""" + m_which.side_effect = lambda x: x if x == 'ip' else None + ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None) + m_subp.assert_called_with( + ['ip', 'route', 'add', 'prohibit', '169.254.169.254'], + capture=False) + + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which') + @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp') + def test_disable_no_tool(self, m_subp, m_which): + """Log error when neither route nor ip commands are available""" + m_which.return_value = None # Find neither ifconfig nor ip + ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None) + self.assertEqual( + [mock.call('ip'), mock.call('ifconfig')], m_which.call_args_list) + m_subp.assert_not_called() + +# vi: ts=4 expandtab diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 6d63e5c5..72c803eb 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -7,6 +7,8 @@ import copy import functools import logging +import socket +import struct import six @@ -886,12 +888,9 @@ def net_prefix_to_ipv4_mask(prefix): This is the inverse of ipv4_mask_to_net_prefix. 24 -> "255.255.255.0" Also supports input as a string.""" - - mask = [0, 0, 0, 0] - for i in list(range(0, int(prefix))): - idx = int(i / 8) - mask[idx] = mask[idx] + (1 << (7 - i % 8)) - return ".".join([str(x) for x in mask]) + mask = socket.inet_ntoa( + struct.pack(">I", (0xffffffff << (32 - int(prefix)) & 0xffffffff))) + return mask def ipv4_mask_to_net_prefix(mask): diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index 993b26cf..f0906160 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -8,9 +8,11 @@ # # This file is part of cloud-init. See LICENSE file for license information. +from copy import copy, deepcopy import re from cloudinit import log as logging +from cloudinit.net.network_state import net_prefix_to_ipv4_mask from cloudinit import util from cloudinit.simpletable import SimpleTable @@ -18,18 +20,90 @@ from cloudinit.simpletable import SimpleTable LOG = logging.getLogger() -def netdev_info(empty=""): - fields = ("hwaddr", "addr", "bcast", "mask") - (ifcfg_out, _err) = util.subp(["ifconfig", "-a"], rcs=[0, 1]) +DEFAULT_NETDEV_INFO = { + "ipv4": [], + "ipv6": [], + "hwaddr": "", + "up": False +} + + +def _netdev_info_iproute(ipaddr_out): + """ + Get network device dicts from ip route and ip link info. + + @param ipaddr_out: Output string from 'ip addr show' command. + + @returns: A dict of device info keyed by network device name containing + device configuration values. + @raise: TypeError if ipaddr_out isn't a string. + """ devs = {} - for line in str(ifcfg_out).splitlines(): + dev_name = None + for num, line in enumerate(ipaddr_out.splitlines()): + m = re.match(r'^\d+:\s(?P[^:]+):\s+<(?P\S+)>\s+.*', line) + if m: + dev_name = m.group('dev').lower().split('@')[0] + flags = m.group('flags').split(',') + devs[dev_name] = { + 'ipv4': [], 'ipv6': [], 'hwaddr': '', + 'up': bool('UP' in flags and 'LOWER_UP' in flags), + } + elif 'inet6' in line: + m = re.match( + r'\s+inet6\s(?P\S+)\sscope\s(?P\S+).*', line) + if not m: + LOG.warning( + 'Could not parse ip addr show: (line:%d) %s', num, line) + continue + devs[dev_name]['ipv6'].append(m.groupdict()) + elif 'inet' in line: + m = re.match( + r'\s+inet\s(?P\S+)(\sbrd\s(?P\S+))?\sscope\s' + r'(?P\S+).*', line) + if not m: + LOG.warning( + 'Could not parse ip addr show: (line:%d) %s', num, line) + continue + match = m.groupdict() + cidr4 = match.pop('cidr4') + addr, _, prefix = cidr4.partition('/') + if not prefix: + prefix = '32' + devs[dev_name]['ipv4'].append({ + 'ip': addr, + 'bcast': match['bcast'] if match['bcast'] else '', + 'mask': net_prefix_to_ipv4_mask(prefix), + 'scope': match['scope']}) + elif 'link' in line: + m = re.match( + r'\s+link/(?P\S+)\s(?P\S+).*', line) + if not m: + LOG.warning( + 'Could not parse ip addr show: (line:%d) %s', num, line) + continue + if m.group('link_type') == 'ether': + devs[dev_name]['hwaddr'] = m.group('hwaddr') + else: + devs[dev_name]['hwaddr'] = '' + else: + continue + return devs + + +def _netdev_info_ifconfig(ifconfig_data): + # fields that need to be returned in devs for each dev + devs = {} + for line in ifconfig_data.splitlines(): if len(line) == 0: continue if line[0] not in ("\t", " "): curdev = line.split()[0] - devs[curdev] = {"up": False} - for field in fields: - devs[curdev][field] = "" + # current ifconfig pops a ':' on the end of the device + if curdev.endswith(':'): + curdev = curdev[:-1] + if curdev not in devs: + devs[curdev] = deepcopy(DEFAULT_NETDEV_INFO) toks = line.lower().strip().split() if toks[0] == "up": devs[curdev]['up'] = True @@ -39,41 +113,50 @@ def netdev_info(empty=""): if re.search(r"flags=\d+", toks[i + 1]) + if res: + devs[curdev]['ipv6'][-1]['scope6'] = res.group(1) + return devs + + +def netdev_info(empty=""): + devs = {} + if util.which('ip'): + # Try iproute first of all + (ipaddr_out, _err) = util.subp(["ip", "addr", "show"]) + devs = _netdev_info_iproute(ipaddr_out) + elif util.which('ifconfig'): + # Fall back to net-tools if iproute2 is not present + (ifcfg_out, _err) = util.subp(["ifconfig", "-a"], rcs=[0, 1]) + devs = _netdev_info_ifconfig(ifcfg_out) + else: + LOG.warning( + "Could not print networks: missing 'ip' and 'ifconfig' commands") if empty != "": for (_devname, dev) in devs.items(): @@ -84,14 +167,94 @@ def netdev_info(empty=""): return devs -def route_info(): - (route_out, _err) = util.subp(["netstat", "-rn"], rcs=[0, 1]) +def _netdev_route_info_iproute(iproute_data): + """ + Get network route dicts from ip route info. + + @param iproute_data: Output string from ip route command. + + @returns: A dict containing ipv4 and ipv6 route entries as lists. Each + item in the list is a route dictionary representing destination, + gateway, flags, genmask and interface information. + """ + + routes = {} + routes['ipv4'] = [] + routes['ipv6'] = [] + entries = iproute_data.splitlines() + default_route_entry = { + 'destination': '', 'flags': '', 'gateway': '', 'genmask': '', + 'iface': '', 'metric': ''} + for line in entries: + entry = copy(default_route_entry) + if not line: + continue + toks = line.split() + flags = ['U'] + if toks[0] == "default": + entry['destination'] = "0.0.0.0" + entry['genmask'] = "0.0.0.0" + else: + if '/' in toks[0]: + (addr, cidr) = toks[0].split("/") + else: + addr = toks[0] + cidr = '32' + flags.append("H") + entry['genmask'] = net_prefix_to_ipv4_mask(cidr) + entry['destination'] = addr + entry['genmask'] = net_prefix_to_ipv4_mask(cidr) + entry['gateway'] = "0.0.0.0" + for i in range(len(toks)): + if toks[i] == "via": + entry['gateway'] = toks[i + 1] + flags.insert(1, "G") + if toks[i] == "dev": + entry["iface"] = toks[i + 1] + if toks[i] == "metric": + entry['metric'] = toks[i + 1] + entry['flags'] = ''.join(flags) + routes['ipv4'].append(entry) + try: + (iproute_data6, _err6) = util.subp( + ["ip", "--oneline", "-6", "route", "list", "table", "all"], + rcs=[0, 1]) + except util.ProcessExecutionError: + pass + else: + entries6 = iproute_data6.splitlines() + for line in entries6: + entry = {} + if not line: + continue + toks = line.split() + if toks[0] == "default": + entry['destination'] = "::/0" + entry['flags'] = "UG" + else: + entry['destination'] = toks[0] + entry['gateway'] = "::" + entry['flags'] = "U" + for i in range(len(toks)): + if toks[i] == "via": + entry['gateway'] = toks[i + 1] + entry['flags'] = "UG" + if toks[i] == "dev": + entry["iface"] = toks[i + 1] + if toks[i] == "metric": + entry['metric'] = toks[i + 1] + if toks[i] == "expires": + entry['flags'] = entry['flags'] + 'e' + routes['ipv6'].append(entry) + return routes + +def _netdev_route_info_netstat(route_data): routes = {} routes['ipv4'] = [] routes['ipv6'] = [] - entries = route_out.splitlines()[1:] + entries = route_data.splitlines() for line in entries: if not line: continue @@ -101,8 +264,8 @@ def route_info(): # default 10.65.0.1 UGS 0 34920 vtnet0 # # Linux netstat shows 2 more: - # Destination Gateway Genmask Flags MSS Window irtt Iface - # 0.0.0.0 10.65.0.1 0.0.0.0 UG 0 0 0 eth0 + # Destination Gateway Genmask Flags Metric Ref Use Iface + # 0.0.0.0 10.65.0.1 0.0.0.0 UG 0 0 0 eth0 if (len(toks) < 6 or toks[0] == "Kernel" or toks[0] == "Destination" or toks[0] == "Internet" or toks[0] == "Internet6" or toks[0] == "Routing"): @@ -125,31 +288,57 @@ def route_info(): routes['ipv4'].append(entry) try: - (route_out6, _err6) = util.subp(["netstat", "-A", "inet6", "-n"], - rcs=[0, 1]) + (route_data6, _err6) = util.subp( + ["netstat", "-A", "inet6", "--route", "--numeric"], rcs=[0, 1]) except util.ProcessExecutionError: pass else: - entries6 = route_out6.splitlines()[1:] + entries6 = route_data6.splitlines() for line in entries6: if not line: continue toks = line.split() - if (len(toks) < 6 or toks[0] == "Kernel" or + if (len(toks) < 7 or toks[0] == "Kernel" or + toks[0] == "Destination" or toks[0] == "Internet" or toks[0] == "Proto" or toks[0] == "Active"): continue entry = { - 'proto': toks[0], - 'recv-q': toks[1], - 'send-q': toks[2], - 'local address': toks[3], - 'foreign address': toks[4], - 'state': toks[5], + 'destination': toks[0], + 'gateway': toks[1], + 'flags': toks[2], + 'metric': toks[3], + 'ref': toks[4], + 'use': toks[5], + 'iface': toks[6], } + # skip lo interface on ipv6 + if entry['iface'] == "lo": + continue + # strip /128 from address if it's included + if entry['destination'].endswith('/128'): + entry['destination'] = re.sub( + r'\/128$', '', entry['destination']) routes['ipv6'].append(entry) return routes +def route_info(): + routes = {} + if util.which('ip'): + # Try iproute first of all + (iproute_out, _err) = util.subp(["ip", "-o", "route", "list"]) + routes = _netdev_route_info_iproute(iproute_out) + elif util.which('netstat'): + # Fall back to net-tools if iproute2 is not present + (route_out, _err) = util.subp( + ["netstat", "--route", "--numeric", "--extend"], rcs=[0, 1]) + routes = _netdev_route_info_netstat(route_out) + else: + LOG.warning( + "Could not print routes: missing 'ip' and 'netstat' commands") + return routes + + def getgateway(): try: routes = route_info() @@ -166,21 +355,30 @@ def netdev_pformat(): lines = [] try: netdev = netdev_info(empty=".") - except Exception: - lines.append(util.center("Net device info failed", '!', 80)) + except Exception as e: + lines.append( + util.center( + "Net device info failed ({error})".format(error=str(e)), + '!', 80)) else: + if not netdev: + return '\n' fields = ['Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address'] tbl = SimpleTable(fields) - for (dev, d) in sorted(netdev.items()): - tbl.add_row([dev, d["up"], d["addr"], d["mask"], ".", d["hwaddr"]]) - if d.get('addr6'): - tbl.add_row([dev, d["up"], - d["addr6"], ".", d.get("scope6"), d["hwaddr"]]) + for (dev, data) in sorted(netdev.items()): + for addr in data.get('ipv4'): + tbl.add_row( + [dev, data["up"], addr["ip"], addr["mask"], + addr.get('scope', '.'), data["hwaddr"]]) + for addr in data.get('ipv6'): + tbl.add_row( + [dev, data["up"], addr["ip"], ".", addr["scope6"], + data["hwaddr"]]) netdev_s = tbl.get_string() max_len = len(max(netdev_s.splitlines(), key=len)) header = util.center("Net device info", "+", max_len) lines.extend([header, netdev_s]) - return "\n".join(lines) + return "\n".join(lines) + "\n" def route_pformat(): @@ -188,7 +386,10 @@ def route_pformat(): try: routes = route_info() except Exception as e: - lines.append(util.center('Route info failed', '!', 80)) + lines.append( + util.center( + 'Route info failed ({error})'.format(error=str(e)), + '!', 80)) util.logexc(LOG, "Route info failed: %s" % e) else: if routes.get('ipv4'): @@ -205,20 +406,20 @@ def route_pformat(): header = util.center("Route IPv4 info", "+", max_len) lines.extend([header, route_s]) if routes.get('ipv6'): - fields_v6 = ['Route', 'Proto', 'Recv-Q', 'Send-Q', - 'Local Address', 'Foreign Address', 'State'] + fields_v6 = ['Route', 'Destination', 'Gateway', 'Interface', + 'Flags'] tbl_v6 = SimpleTable(fields_v6) for (n, r) in enumerate(routes.get('ipv6')): route_id = str(n) - tbl_v6.add_row([route_id, r['proto'], - r['recv-q'], r['send-q'], - r['local address'], r['foreign address'], - r['state']]) + if r['iface'] == 'lo': + continue + tbl_v6.add_row([route_id, r['destination'], + r['gateway'], r['iface'], r['flags']]) route_s = tbl_v6.get_string() max_len = len(max(route_s.splitlines(), key=len)) header = util.center("Route IPv6 info", "+", max_len) lines.extend([header, route_s]) - return "\n".join(lines) + return "\n".join(lines) + "\n" def debug_info(prefix='ci-info: '): diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 999b1d7c..82fd347b 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -190,35 +190,11 @@ class ResourceUsingTestCase(CiTestCase): super(ResourceUsingTestCase, self).setUp() self.resource_path = None - def resourceLocation(self, subname=None): - if self.resource_path is None: - paths = [ - os.path.join('tests', 'data'), - os.path.join('data'), - os.path.join(os.pardir, 'tests', 'data'), - os.path.join(os.pardir, 'data'), - ] - for p in paths: - if os.path.isdir(p): - self.resource_path = p - break - self.assertTrue((self.resource_path and - os.path.isdir(self.resource_path)), - msg="Unable to locate test resource data path!") - if not subname: - return self.resource_path - return os.path.join(self.resource_path, subname) - - def readResource(self, name): - where = self.resourceLocation(name) - with open(where, 'r') as fh: - return fh.read() - def getCloudPaths(self, ds=None): tmpdir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, tmpdir) cp = ch.Paths({'cloud_dir': tmpdir, - 'templates_dir': self.resourceLocation()}, + 'templates_dir': resourceLocation()}, ds=ds) return cp @@ -234,7 +210,7 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): ResourceUsingTestCase.tearDown(self) def replicateTestRoot(self, example_root, target_root): - real_root = self.resourceLocation() + real_root = resourceLocation() real_root = os.path.join(real_root, 'roots', example_root) for (dir_path, _dirnames, filenames) in os.walk(real_root): real_path = dir_path @@ -399,6 +375,18 @@ def wrap_and_call(prefix, mocks, func, *args, **kwargs): p.stop() +def resourceLocation(subname=None): + path = os.path.join('tests', 'data') + if not subname: + return path + return os.path.join(path, subname) + + +def readResource(name, mode='r'): + with open(resourceLocation(name), mode) as fh: + return fh.read() + + try: skipIf = unittest.skipIf except AttributeError: diff --git a/cloudinit/tests/test_netinfo.py b/cloudinit/tests/test_netinfo.py index 7dea2e41..2537c1c2 100644 --- a/cloudinit/tests/test_netinfo.py +++ b/cloudinit/tests/test_netinfo.py @@ -2,105 +2,121 @@ """Tests netinfo module functions and classes.""" +from copy import copy + from cloudinit.netinfo import netdev_pformat, route_pformat -from cloudinit.tests.helpers import CiTestCase, mock +from cloudinit.tests.helpers import CiTestCase, mock, readResource # Example ifconfig and route output -SAMPLE_IFCONFIG_OUT = """\ -enp0s25 Link encap:Ethernet HWaddr 50:7b:9d:2c:af:91 - inet addr:192.168.2.18 Bcast:192.168.2.255 Mask:255.255.255.0 - inet6 addr: fe80::8107:2b92:867e:f8a6/64 Scope:Link - UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1 - RX packets:8106427 errors:55 dropped:0 overruns:0 frame:37 - TX packets:9339739 errors:0 dropped:0 overruns:0 carrier:0 - collisions:0 txqueuelen:1000 - RX bytes:4953721719 (4.9 GB) TX bytes:7731890194 (7.7 GB) - Interrupt:20 Memory:e1200000-e1220000 - -lo Link encap:Local Loopback - inet addr:127.0.0.1 Mask:255.0.0.0 - inet6 addr: ::1/128 Scope:Host - UP LOOPBACK RUNNING MTU:65536 Metric:1 - RX packets:579230851 errors:0 dropped:0 overruns:0 frame:0 - TX packets:579230851 errors:0 dropped:0 overruns:0 carrier:0 - collisions:0 txqueuelen:1 -""" - -SAMPLE_ROUTE_OUT = '\n'.join([ - '0.0.0.0 192.168.2.1 0.0.0.0 UG 0 0 0' - ' enp0s25', - '0.0.0.0 192.168.2.1 0.0.0.0 UG 0 0 0' - ' wlp3s0', - '192.168.2.0 0.0.0.0 255.255.255.0 U 0 0 0' - ' enp0s25']) - - -NETDEV_FORMATTED_OUT = '\n'.join([ - '+++++++++++++++++++++++++++++++++++++++Net device info+++++++++++++++++++' - '++++++++++++++++++++', - '+---------+------+------------------------------+---------------+-------+' - '-------------------+', - '| Device | Up | Address | Mask | Scope |' - ' Hw-Address |', - '+---------+------+------------------------------+---------------+-------+' - '-------------------+', - '| enp0s25 | True | 192.168.2.18 | 255.255.255.0 | . |' - ' 50:7b:9d:2c:af:91 |', - '| enp0s25 | True | fe80::8107:2b92:867e:f8a6/64 | . | link |' - ' 50:7b:9d:2c:af:91 |', - '| lo | True | 127.0.0.1 | 255.0.0.0 | . |' - ' . |', - '| lo | True | ::1/128 | . | host |' - ' . |', - '+---------+------+------------------------------+---------------+-------+' - '-------------------+']) - -ROUTE_FORMATTED_OUT = '\n'.join([ - '+++++++++++++++++++++++++++++Route IPv4 info++++++++++++++++++++++++++' - '+++', - '+-------+-------------+-------------+---------------+-----------+-----' - '--+', - '| Route | Destination | Gateway | Genmask | Interface | Flags' - ' |', - '+-------+-------------+-------------+---------------+-----------+' - '-------+', - '| 0 | 0.0.0.0 | 192.168.2.1 | 0.0.0.0 | wlp3s0 |' - ' UG |', - '| 1 | 192.168.2.0 | 0.0.0.0 | 255.255.255.0 | enp0s25 |' - ' U |', - '+-------+-------------+-------------+---------------+-----------+' - '-------+', - '++++++++++++++++++++++++++++++++++++++++Route IPv6 info++++++++++' - '++++++++++++++++++++++++++++++', - '+-------+-------------+-------------+---------------+---------------+' - '-----------------+-------+', - '| Route | Proto | Recv-Q | Send-Q | Local Address |' - ' Foreign Address | State |', - '+-------+-------------+-------------+---------------+---------------+' - '-----------------+-------+', - '| 0 | 0.0.0.0 | 192.168.2.1 | 0.0.0.0 | UG |' - ' 0 | 0 |', - '| 1 | 192.168.2.0 | 0.0.0.0 | 255.255.255.0 | U |' - ' 0 | 0 |', - '+-------+-------------+-------------+---------------+---------------+' - '-----------------+-------+']) +SAMPLE_OLD_IFCONFIG_OUT = readResource("netinfo/old-ifconfig-output") +SAMPLE_NEW_IFCONFIG_OUT = readResource("netinfo/new-ifconfig-output") +SAMPLE_IPADDRSHOW_OUT = readResource("netinfo/sample-ipaddrshow-output") +SAMPLE_ROUTE_OUT_V4 = readResource("netinfo/sample-route-output-v4") +SAMPLE_ROUTE_OUT_V6 = readResource("netinfo/sample-route-output-v6") +SAMPLE_IPROUTE_OUT_V4 = readResource("netinfo/sample-iproute-output-v4") +SAMPLE_IPROUTE_OUT_V6 = readResource("netinfo/sample-iproute-output-v6") +NETDEV_FORMATTED_OUT = readResource("netinfo/netdev-formatted-output") +ROUTE_FORMATTED_OUT = readResource("netinfo/route-formatted-output") class TestNetInfo(CiTestCase): maxDiff = None + with_logs = True + + @mock.patch('cloudinit.netinfo.util.which') + @mock.patch('cloudinit.netinfo.util.subp') + def test_netdev_old_nettools_pformat(self, m_subp, m_which): + """netdev_pformat properly rendering old nettools info.""" + m_subp.return_value = (SAMPLE_OLD_IFCONFIG_OUT, '') + m_which.side_effect = lambda x: x if x == 'ifconfig' else None + content = netdev_pformat() + self.assertEqual(NETDEV_FORMATTED_OUT, content) + @mock.patch('cloudinit.netinfo.util.which') @mock.patch('cloudinit.netinfo.util.subp') - def test_netdev_pformat(self, m_subp): - """netdev_pformat properly rendering network device information.""" - m_subp.return_value = (SAMPLE_IFCONFIG_OUT, '') + def test_netdev_new_nettools_pformat(self, m_subp, m_which): + """netdev_pformat properly rendering netdev new nettools info.""" + m_subp.return_value = (SAMPLE_NEW_IFCONFIG_OUT, '') + m_which.side_effect = lambda x: x if x == 'ifconfig' else None content = netdev_pformat() self.assertEqual(NETDEV_FORMATTED_OUT, content) + @mock.patch('cloudinit.netinfo.util.which') + @mock.patch('cloudinit.netinfo.util.subp') + def test_netdev_iproute_pformat(self, m_subp, m_which): + """netdev_pformat properly rendering ip route info.""" + m_subp.return_value = (SAMPLE_IPADDRSHOW_OUT, '') + m_which.side_effect = lambda x: x if x == 'ip' else None + content = netdev_pformat() + new_output = copy(NETDEV_FORMATTED_OUT) + # ip route show describes global scopes on ipv4 addresses + # whereas ifconfig does not. Add proper global/host scope to output. + new_output = new_output.replace('| . | 50:7b', '| global | 50:7b') + new_output = new_output.replace( + '255.0.0.0 | . |', '255.0.0.0 | host |') + self.assertEqual(new_output, content) + + @mock.patch('cloudinit.netinfo.util.which') + @mock.patch('cloudinit.netinfo.util.subp') + def test_netdev_warn_on_missing_commands(self, m_subp, m_which): + """netdev_pformat warns when missing both ip and 'netstat'.""" + m_which.return_value = None # Niether ip nor netstat found + content = netdev_pformat() + self.assertEqual('\n', content) + self.assertEqual( + "WARNING: Could not print networks: missing 'ip' and 'ifconfig'" + " commands\n", + self.logs.getvalue()) + m_subp.assert_not_called() + + @mock.patch('cloudinit.netinfo.util.which') @mock.patch('cloudinit.netinfo.util.subp') - def test_route_pformat(self, m_subp): - """netdev_pformat properly rendering network device information.""" - m_subp.return_value = (SAMPLE_ROUTE_OUT, '') + def test_route_nettools_pformat(self, m_subp, m_which): + """route_pformat properly rendering nettools route info.""" + + def subp_netstat_route_selector(*args, **kwargs): + if args[0] == ['netstat', '--route', '--numeric', '--extend']: + return (SAMPLE_ROUTE_OUT_V4, '') + if args[0] == ['netstat', '-A', 'inet6', '--route', '--numeric']: + return (SAMPLE_ROUTE_OUT_V6, '') + raise Exception('Unexpected subp call %s' % args[0]) + + m_subp.side_effect = subp_netstat_route_selector + m_which.side_effect = lambda x: x if x == 'netstat' else None content = route_pformat() self.assertEqual(ROUTE_FORMATTED_OUT, content) + + @mock.patch('cloudinit.netinfo.util.which') + @mock.patch('cloudinit.netinfo.util.subp') + def test_route_iproute_pformat(self, m_subp, m_which): + """route_pformat properly rendering ip route info.""" + + def subp_iproute_selector(*args, **kwargs): + if ['ip', '-o', 'route', 'list'] == args[0]: + return (SAMPLE_IPROUTE_OUT_V4, '') + v6cmd = ['ip', '--oneline', '-6', 'route', 'list', 'table', 'all'] + if v6cmd == args[0]: + return (SAMPLE_IPROUTE_OUT_V6, '') + raise Exception('Unexpected subp call %s' % args[0]) + + m_subp.side_effect = subp_iproute_selector + m_which.side_effect = lambda x: x if x == 'ip' else None + content = route_pformat() + self.assertEqual(ROUTE_FORMATTED_OUT, content) + + @mock.patch('cloudinit.netinfo.util.which') + @mock.patch('cloudinit.netinfo.util.subp') + def test_route_warn_on_missing_commands(self, m_subp, m_which): + """route_pformat warns when missing both ip and 'netstat'.""" + m_which.return_value = None # Niether ip nor netstat found + content = route_pformat() + self.assertEqual('\n', content) + self.assertEqual( + "WARNING: Could not print routes: missing 'ip' and 'netstat'" + " commands\n", + self.logs.getvalue()) + m_subp.assert_not_called() + +# vi: ts=4 expandtab diff --git a/tests/data/netinfo/netdev-formatted-output b/tests/data/netinfo/netdev-formatted-output new file mode 100644 index 00000000..283ab4a4 --- /dev/null +++ b/tests/data/netinfo/netdev-formatted-output @@ -0,0 +1,10 @@ ++++++++++++++++++++++++++++++++++++++++Net device info++++++++++++++++++++++++++++++++++++++++ ++---------+------+------------------------------+---------------+--------+-------------------+ +| Device | Up | Address | Mask | Scope | Hw-Address | ++---------+------+------------------------------+---------------+--------+-------------------+ +| enp0s25 | True | 192.168.2.18 | 255.255.255.0 | . | 50:7b:9d:2c:af:91 | +| enp0s25 | True | fe80::7777:2222:1111:eeee/64 | . | global | 50:7b:9d:2c:af:91 | +| enp0s25 | True | fe80::8107:2b92:867e:f8a6/64 | . | link | 50:7b:9d:2c:af:91 | +| lo | True | 127.0.0.1 | 255.0.0.0 | . | . | +| lo | True | ::1/128 | . | host | . | ++---------+------+------------------------------+---------------+--------+-------------------+ diff --git a/tests/data/netinfo/new-ifconfig-output b/tests/data/netinfo/new-ifconfig-output new file mode 100644 index 00000000..83d4ad16 --- /dev/null +++ b/tests/data/netinfo/new-ifconfig-output @@ -0,0 +1,18 @@ +enp0s25: flags=4163 mtu 1500 + inet 192.168.2.18 netmask 255.255.255.0 broadcast 192.168.2.255 + inet6 fe80::7777:2222:1111:eeee prefixlen 64 scopeid 0x30 + inet6 fe80::8107:2b92:867e:f8a6 prefixlen 64 scopeid 0x20 + ether 50:7b:9d:2c:af:91 txqueuelen 1000 (Ethernet) + RX packets 3017 bytes 10601563 (10.1 MiB) + RX errors 0 dropped 39 overruns 0 frame 0 + TX packets 2627 bytes 196976 (192.3 KiB) + TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 + +lo: flags=73 mtu 65536 + inet 127.0.0.1 netmask 255.0.0.0 + inet6 ::1 prefixlen 128 scopeid 0x10 + loop txqueuelen 1 (Local Loopback) + RX packets 0 bytes 0 (0.0 B) + RX errors 0 dropped 0 overruns 0 frame 0 + TX packets 0 bytes 0 (0.0 B) + TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 diff --git a/tests/data/netinfo/old-ifconfig-output b/tests/data/netinfo/old-ifconfig-output new file mode 100644 index 00000000..e01f763e --- /dev/null +++ b/tests/data/netinfo/old-ifconfig-output @@ -0,0 +1,18 @@ +enp0s25 Link encap:Ethernet HWaddr 50:7b:9d:2c:af:91 + inet addr:192.168.2.18 Bcast:192.168.2.255 Mask:255.255.255.0 + inet6 addr: fe80::7777:2222:1111:eeee/64 Scope:Global + inet6 addr: fe80::8107:2b92:867e:f8a6/64 Scope:Link + UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1 + RX packets:8106427 errors:55 dropped:0 overruns:0 frame:37 + TX packets:9339739 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1000 + RX bytes:4953721719 (4.9 GB) TX bytes:7731890194 (7.7 GB) + Interrupt:20 Memory:e1200000-e1220000 + +lo Link encap:Local Loopback + inet addr:127.0.0.1 Mask:255.0.0.0 + inet6 addr: ::1/128 Scope:Host + UP LOOPBACK RUNNING MTU:65536 Metric:1 + RX packets:579230851 errors:0 dropped:0 overruns:0 frame:0 + TX packets:579230851 errors:0 dropped:0 overruns:0 carrier:0 + collisions:0 txqueuelen:1 diff --git a/tests/data/netinfo/route-formatted-output b/tests/data/netinfo/route-formatted-output new file mode 100644 index 00000000..9d2c5dd3 --- /dev/null +++ b/tests/data/netinfo/route-formatted-output @@ -0,0 +1,22 @@ ++++++++++++++++++++++++++++++Route IPv4 info+++++++++++++++++++++++++++++ ++-------+-------------+-------------+---------------+-----------+-------+ +| Route | Destination | Gateway | Genmask | Interface | Flags | ++-------+-------------+-------------+---------------+-----------+-------+ +| 0 | 0.0.0.0 | 192.168.2.1 | 0.0.0.0 | enp0s25 | UG | +| 1 | 0.0.0.0 | 192.168.2.1 | 0.0.0.0 | wlp3s0 | UG | +| 2 | 192.168.2.0 | 0.0.0.0 | 255.255.255.0 | enp0s25 | U | ++-------+-------------+-------------+---------------+-----------+-------+ ++++++++++++++++++++++++++++++++++++Route IPv6 info+++++++++++++++++++++++++++++++++++ ++-------+---------------------------+---------------------------+-----------+-------+ +| Route | Destination | Gateway | Interface | Flags | ++-------+---------------------------+---------------------------+-----------+-------+ +| 0 | 2a00:abcd:82ae:cd33::657 | :: | enp0s25 | Ue | +| 1 | 2a00:abcd:82ae:cd33::/64 | :: | enp0s25 | U | +| 2 | 2a00:abcd:82ae:cd33::/56 | fe80::32ee:54de:cd43:b4e1 | enp0s25 | UG | +| 3 | fd81:123f:654::657 | :: | enp0s25 | U | +| 4 | fd81:123f:654::/64 | :: | enp0s25 | U | +| 5 | fd81:123f:654::/48 | fe80::32ee:54de:cd43:b4e1 | enp0s25 | UG | +| 6 | fe80::abcd:ef12:bc34:da21 | :: | enp0s25 | U | +| 7 | fe80::/64 | :: | enp0s25 | U | +| 8 | ::/0 | fe80::32ee:54de:cd43:b4e1 | enp0s25 | UG | ++-------+---------------------------+---------------------------+-----------+-------+ diff --git a/tests/data/netinfo/sample-ipaddrshow-output b/tests/data/netinfo/sample-ipaddrshow-output new file mode 100644 index 00000000..b2fa2672 --- /dev/null +++ b/tests/data/netinfo/sample-ipaddrshow-output @@ -0,0 +1,13 @@ +1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo\ valid_lft forever preferred_lft forever + inet6 ::1/128 scope host \ valid_lft forever preferred_lft forever +2: enp0s25: mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000 + link/ether 50:7b:9d:2c:af:91 brd ff:ff:ff:ff:ff:ff + inet 192.168.2.18/24 brd 192.168.2.255 scope global dynamic enp0s25 + valid_lft 84174sec preferred_lft 84174sec + inet6 fe80::7777:2222:1111:eeee/64 scope global + valid_lft forever preferred_lft forever + inet6 fe80::8107:2b92:867e:f8a6/64 scope link + valid_lft forever preferred_lft forever + diff --git a/tests/data/netinfo/sample-iproute-output-v4 b/tests/data/netinfo/sample-iproute-output-v4 new file mode 100644 index 00000000..904cb034 --- /dev/null +++ b/tests/data/netinfo/sample-iproute-output-v4 @@ -0,0 +1,3 @@ +default via 192.168.2.1 dev enp0s25 proto static metric 100 +default via 192.168.2.1 dev wlp3s0 proto static metric 150 +192.168.2.0/24 dev enp0s25 proto kernel scope link src 192.168.2.18 metric 100 diff --git a/tests/data/netinfo/sample-iproute-output-v6 b/tests/data/netinfo/sample-iproute-output-v6 new file mode 100644 index 00000000..12bb1c12 --- /dev/null +++ b/tests/data/netinfo/sample-iproute-output-v6 @@ -0,0 +1,11 @@ +2a00:abcd:82ae:cd33::657 dev enp0s25 proto kernel metric 256 expires 2334sec pref medium +2a00:abcd:82ae:cd33::/64 dev enp0s25 proto ra metric 100 pref medium +2a00:abcd:82ae:cd33::/56 via fe80::32ee:54de:cd43:b4e1 dev enp0s25 proto ra metric 100 pref medium +fd81:123f:654::657 dev enp0s25 proto kernel metric 256 pref medium +fd81:123f:654::/64 dev enp0s25 proto ra metric 100 pref medium +fd81:123f:654::/48 via fe80::32ee:54de:cd43:b4e1 dev enp0s25 proto ra metric 100 pref medium +fe80::abcd:ef12:bc34:da21 dev enp0s25 proto static metric 100 pref medium +fe80::/64 dev enp0s25 proto kernel metric 256 pref medium +default via fe80::32ee:54de:cd43:b4e1 dev enp0s25 proto static metric 100 pref medium +local ::1 dev lo table local proto none metric 0 pref medium +local 2600:1f16:b80:ad00:90a:c915:bca6:5ff2 dev lo table local proto none metric 0 pref medium diff --git a/tests/data/netinfo/sample-route-output-v4 b/tests/data/netinfo/sample-route-output-v4 new file mode 100644 index 00000000..ecc31d96 --- /dev/null +++ b/tests/data/netinfo/sample-route-output-v4 @@ -0,0 +1,5 @@ +Kernel IP routing table +Destination Gateway Genmask Flags Metric Ref Use Iface +0.0.0.0 192.168.2.1 0.0.0.0 UG 100 0 0 enp0s25 +0.0.0.0 192.168.2.1 0.0.0.0 UG 150 0 0 wlp3s0 +192.168.2.0 0.0.0.0 255.255.255.0 U 100 0 0 enp0s25 diff --git a/tests/data/netinfo/sample-route-output-v6 b/tests/data/netinfo/sample-route-output-v6 new file mode 100644 index 00000000..4712b73c --- /dev/null +++ b/tests/data/netinfo/sample-route-output-v6 @@ -0,0 +1,13 @@ +Kernel IPv6 routing table +Destination Next Hop Flag Met Re Use If +2a00:abcd:82ae:cd33::657/128 :: Ue 256 1 0 enp0s25 +2a00:abcd:82ae:cd33::/64 :: U 100 1 0 enp0s25 +2a00:abcd:82ae:cd33::/56 fe80::32ee:54de:cd43:b4e1 UG 100 1 0 enp0s25 +fd81:123f:654::657/128 :: U 256 1 0 enp0s25 +fd81:123f:654::/64 :: U 100 1 0 enp0s25 +fd81:123f:654::/48 fe80::32ee:54de:cd43:b4e1 UG 100 1 0 enp0s25 +fe80::abcd:ef12:bc34:da21/128 :: U 100 1 2 enp0s25 +fe80::/64 :: U 256 1 16880 enp0s25 +::/0 fe80::32ee:54de:cd43:b4e1 UG 100 1 0 enp0s25 +::/0 :: !n -1 1424956 lo +::1/128 :: Un 0 4 26289 lo diff --git a/tests/unittests/test_filters/test_launch_index.py b/tests/unittests/test_filters/test_launch_index.py index 6364d38e..e1a5d2c8 100644 --- a/tests/unittests/test_filters/test_launch_index.py +++ b/tests/unittests/test_filters/test_launch_index.py @@ -55,7 +55,7 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase): return True def testMultiEmailIndex(self): - test_data = self.readResource('filter_cloud_multipart_2.email') + test_data = helpers.readResource('filter_cloud_multipart_2.email') ud_proc = ud.UserDataProcessor(self.getCloudPaths()) message = ud_proc.process(test_data) self.assertTrue(count_messages(message) > 0) @@ -70,7 +70,7 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase): self.assertCounts(message, expected_counts) def testHeaderEmailIndex(self): - test_data = self.readResource('filter_cloud_multipart_header.email') + test_data = helpers.readResource('filter_cloud_multipart_header.email') ud_proc = ud.UserDataProcessor(self.getCloudPaths()) message = ud_proc.process(test_data) self.assertTrue(count_messages(message) > 0) @@ -85,7 +85,7 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase): self.assertCounts(message, expected_counts) def testConfigEmailIndex(self): - test_data = self.readResource('filter_cloud_multipart_1.email') + test_data = helpers.readResource('filter_cloud_multipart_1.email') ud_proc = ud.UserDataProcessor(self.getCloudPaths()) message = ud_proc.process(test_data) self.assertTrue(count_messages(message) > 0) @@ -99,7 +99,7 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase): self.assertCounts(message, expected_counts) def testNoneIndex(self): - test_data = self.readResource('filter_cloud_multipart.yaml') + test_data = helpers.readResource('filter_cloud_multipart.yaml') ud_proc = ud.UserDataProcessor(self.getCloudPaths()) message = ud_proc.process(test_data) start_count = count_messages(message) @@ -108,7 +108,7 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase): self.assertTrue(self.equivalentMessage(message, filtered_message)) def testIndexes(self): - test_data = self.readResource('filter_cloud_multipart.yaml') + test_data = helpers.readResource('filter_cloud_multipart.yaml') ud_proc = ud.UserDataProcessor(self.getCloudPaths()) message = ud_proc.process(test_data) start_count = count_messages(message) diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py index f51358da..3a5072c7 100644 --- a/tests/unittests/test_merging.py +++ b/tests/unittests/test_merging.py @@ -100,7 +100,7 @@ def make_dict(max_depth, seed=None): class TestSimpleRun(helpers.ResourceUsingTestCase): def _load_merge_files(self): - merge_root = self.resourceLocation('merge_sources') + merge_root = helpers.resourceLocation('merge_sources') tests = [] source_ids = collections.defaultdict(list) expected_files = {} diff --git a/tests/unittests/test_runs/test_merge_run.py b/tests/unittests/test_runs/test_merge_run.py index 5d3f1ca3..d1ac4942 100644 --- a/tests/unittests/test_runs/test_merge_run.py +++ b/tests/unittests/test_runs/test_merge_run.py @@ -25,7 +25,7 @@ class TestMergeRun(helpers.FilesystemMockingTestCase): 'cloud_init_modules': ['write-files'], 'system_info': {'paths': {'run_dir': new_root}} } - ud = self.readResource('user_data.1.txt') + ud = helpers.readResource('user_data.1.txt') cloud_cfg = util.yaml_dumps(cfg) util.ensure_dir(os.path.join(new_root, 'etc', 'cloud')) util.write_file(os.path.join(new_root, 'etc', diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 7cbd5538..e04ea031 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -325,7 +325,7 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase): def test_precise_ext4_root(self): - lines = self.readResource('mountinfo_precise_ext4.txt').splitlines() + lines = helpers.readResource('mountinfo_precise_ext4.txt').splitlines() expected = ('/dev/mapper/vg0-root', 'ext4', '/') self.assertEqual(expected, util.parse_mount_info('/', lines)) @@ -347,7 +347,7 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase): self.assertEqual(expected, util.parse_mount_info('/run/lock', lines)) def test_raring_btrfs_root(self): - lines = self.readResource('mountinfo_raring_btrfs.txt').splitlines() + lines = helpers.readResource('mountinfo_raring_btrfs.txt').splitlines() expected = ('/dev/vda1', 'btrfs', '/') self.assertEqual(expected, util.parse_mount_info('/', lines)) @@ -373,7 +373,7 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase): m_os.path.exists.return_value = True # mock subp command from util.get_mount_info_fs_on_zpool zpool_output.return_value = ( - self.readResource('zpool_status_simple.txt'), '' + helpers.readResource('zpool_status_simple.txt'), '' ) # save function return values and do asserts ret = util.get_device_info_from_zpool('vmzroot') @@ -406,7 +406,7 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase): m_os.path.exists.return_value = True # mock subp command from util.get_mount_info_fs_on_zpool zpool_output.return_value = ( - self.readResource('zpool_status_simple.txt'), 'error' + helpers.readResource('zpool_status_simple.txt'), 'error' ) # save function return values and do asserts ret = util.get_device_info_from_zpool('vmzroot') @@ -414,7 +414,8 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase): @mock.patch('cloudinit.util.subp') def test_parse_mount_with_ext(self, mount_out): - mount_out.return_value = (self.readResource('mount_parse_ext.txt'), '') + mount_out.return_value = ( + helpers.readResource('mount_parse_ext.txt'), '') # this one is valid and exists in mount_parse_ext.txt ret = util.parse_mount('/var') self.assertEqual(('/dev/mapper/vg00-lv_var', 'ext4', '/var'), ret) @@ -430,7 +431,8 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase): @mock.patch('cloudinit.util.subp') def test_parse_mount_with_zfs(self, mount_out): - mount_out.return_value = (self.readResource('mount_parse_zfs.txt'), '') + mount_out.return_value = ( + helpers.readResource('mount_parse_zfs.txt'), '') # this one is valid and exists in mount_parse_zfs.txt ret = util.parse_mount('/var') self.assertEqual(('vmzroot/ROOT/freebsd/var', 'zfs', '/var'), ret) -- cgit v1.2.3 From 6811926fdb991ad02ad9c0134c1d4bbe82ef87e1 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 18 Apr 2018 20:37:07 -0600 Subject: Schema: do not warn on duplicate items in commands. runcmd, bootcmd, snap/commands, ubuntu-advantage/commands would log warning (and fail if strict) on duplicate values in the commands. But those should be allowed. Example, it is perfectly valid to do: runcmd: ['sleep 1', 'sleep 1'] LP: #1764264 --- cloudinit/config/cc_bootcmd.py | 1 - cloudinit/config/cc_runcmd.py | 1 - cloudinit/config/cc_snap.py | 1 - cloudinit/config/cc_ubuntu_advantage.py | 1 - cloudinit/config/tests/test_snap.py | 36 ++++++++++++++++++++++ .../unittests/test_handler/test_handler_bootcmd.py | 24 ++++++++++++++- .../unittests/test_handler/test_handler_runcmd.py | 25 +++++++++++++-- 7 files changed, 82 insertions(+), 7 deletions(-) (limited to 'tests') diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py index 233da1ef..db64f0a6 100644 --- a/cloudinit/config/cc_bootcmd.py +++ b/cloudinit/config/cc_bootcmd.py @@ -63,7 +63,6 @@ schema = { 'additionalProperties': False, 'minItems': 1, 'required': [], - 'uniqueItems': True } } } diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py index 539cbd5d..b6f6c807 100644 --- a/cloudinit/config/cc_runcmd.py +++ b/cloudinit/config/cc_runcmd.py @@ -66,7 +66,6 @@ schema = { 'additionalProperties': False, 'minItems': 1, 'required': [], - 'uniqueItems': True } } } diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py index 34a53fd4..a7a03214 100644 --- a/cloudinit/config/cc_snap.py +++ b/cloudinit/config/cc_snap.py @@ -110,7 +110,6 @@ schema = { 'additionalItems': False, # Reject non-string & non-list 'minItems': 1, 'minProperties': 1, - 'uniqueItems': True }, 'squashfuse_in_container': { 'type': 'boolean' diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py index 16b1868b..29d18c96 100644 --- a/cloudinit/config/cc_ubuntu_advantage.py +++ b/cloudinit/config/cc_ubuntu_advantage.py @@ -87,7 +87,6 @@ schema = { 'additionalItems': False, # Reject non-string & non-list 'minItems': 1, 'minProperties': 1, - 'uniqueItems': True } }, 'additionalProperties': False, # Reject keys not in schema diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py index c5b4a9de..492d2d46 100644 --- a/cloudinit/config/tests/test_snap.py +++ b/cloudinit/config/tests/test_snap.py @@ -340,6 +340,42 @@ class TestSchema(CiTestCase): {'snap': {'assertions': {'01': 'also valid'}}}, schema) self.assertEqual('', self.logs.getvalue()) + def test_duplicates_are_fine_array_array(self): + """Duplicated commands array/array entries are allowed.""" + byebye = ["echo", "bye"] + try: + cfg = {'snap': {'commands': [byebye, byebye]}} + validate_cloudconfig_schema(cfg, schema, strict=True) + except schema.SchemaValidationError as e: + self.fail("command entries can be duplicate.") + + def test_duplicates_are_fine_array_string(self): + """Duplicated commands array/string entries are allowed.""" + byebye = "echo bye" + try: + cfg = {'snap': {'commands': [byebye, byebye]}} + validate_cloudconfig_schema(cfg, schema, strict=True) + except schema.SchemaValidationError as e: + self.fail("command entries can be duplicate.") + + def test_duplicates_are_fine_dict_array(self): + """Duplicated commands dict/array entries are allowed.""" + byebye = ["echo", "bye"] + try: + cfg = {'snap': {'commands': {'00': byebye, '01': byebye}}} + validate_cloudconfig_schema(cfg, schema, strict=True) + except schema.SchemaValidationError as e: + self.fail("command entries can be duplicate.") + + def test_duplicates_are_fine_dict_string(self): + """Duplicated commands dict/string entries are allowed.""" + byebye = "echo bye" + try: + cfg = {'snap': {'commands': {'00': byebye, '01': byebye}}} + validate_cloudconfig_schema(cfg, schema, strict=True) + except schema.SchemaValidationError as e: + self.fail("command entries can be duplicate.") + class TestHandle(CiTestCase): diff --git a/tests/unittests/test_handler/test_handler_bootcmd.py b/tests/unittests/test_handler/test_handler_bootcmd.py index 29fc25e4..c3abedde 100644 --- a/tests/unittests/test_handler/test_handler_bootcmd.py +++ b/tests/unittests/test_handler/test_handler_bootcmd.py @@ -1,6 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.config import cc_bootcmd +from cloudinit.config import cc_bootcmd, schema from cloudinit.sources import DataSourceNone from cloudinit import (distros, helpers, cloud, util) from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema @@ -138,4 +138,26 @@ class TestBootcmd(CiTestCase): self.logs.getvalue()) +class TestSchema(CiTestCase): + """Directly test schema rather than through handle.""" + + def test_duplicates_are_fine_array_array(self): + """Duplicated array entries are allowed.""" + try: + byebye = ["echo", "bye"] + schema.validate_cloudconfig_schema( + {'bootcmd': [byebye, byebye]}, cc_bootcmd.schema, strict=True) + except schema.SchemaValidationError as e: + self.fail("runcmd entries as list are allowed to be duplicates.") + + def test_duplicates_are_fine_array_string(self): + """Duplicated array entries entries are allowed in values of array.""" + try: + byebye = "echo bye" + schema.validate_cloudconfig_schema( + {'bootcmd': [byebye, byebye]}, cc_bootcmd.schema, strict=True) + except schema.SchemaValidationError as e: + self.fail("runcmd entries are allowed to be duplicates.") + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_runcmd.py b/tests/unittests/test_handler/test_handler_runcmd.py index dbbb2717..ee1981d1 100644 --- a/tests/unittests/test_handler/test_handler_runcmd.py +++ b/tests/unittests/test_handler/test_handler_runcmd.py @@ -1,10 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.config import cc_runcmd +from cloudinit.config import cc_runcmd, schema from cloudinit.sources import DataSourceNone from cloudinit import (distros, helpers, cloud, util) from cloudinit.tests.helpers import ( - FilesystemMockingTestCase, skipUnlessJsonSchema) + CiTestCase, FilesystemMockingTestCase, skipUnlessJsonSchema) import logging import os @@ -99,4 +99,25 @@ class TestRuncmd(FilesystemMockingTestCase): self.assertEqual(0o700, stat.S_IMODE(file_stat.st_mode)) +class TestSchema(CiTestCase): + """Directly test schema rather than through handle.""" + + def test_duplicates_are_fine_array(self): + """Duplicated array entries are allowed.""" + try: + byebye = ["echo", "bye"] + schema.validate_cloudconfig_schema( + {'runcmd': [byebye, byebye]}, cc_runcmd.schema, strict=True) + except schema.SchemaValidationError as e: + self.fail("runcmd entries as list are allowed to be duplicates.") + + def test_duplicates_are_fine_string(self): + """Duplicated string entries are allowed.""" + try: + byebye = "echo bye" + schema.validate_cloudconfig_schema( + {'runcmd': [byebye, byebye]}, cc_runcmd.schema, strict=True) + except schema.SchemaValidationError as e: + self.fail("runcmd entries are allowed to be duplicates.") + # vi: ts=4 expandtab -- cgit v1.2.3 From 1081962eacf2814fea6f4fa3255c530de14e4a24 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 19 Apr 2018 21:30:08 -0600 Subject: pylint: pay attention to unused variable warnings. This enables warnings produced by pylint for unused variables (W0612), and fixes the existing errors. --- .pylintrc | 2 +- cloudinit/analyze/dump.py | 2 +- cloudinit/cmd/tests/test_main.py | 6 ++-- cloudinit/config/cc_apt_configure.py | 2 +- cloudinit/config/cc_emit_upstart.py | 2 +- cloudinit/config/cc_resizefs.py | 8 ++---- cloudinit/config/cc_rh_subscription.py | 18 ++++++------ cloudinit/config/cc_snap.py | 4 +-- cloudinit/config/cc_snappy.py | 4 +-- cloudinit/config/cc_ubuntu_advantage.py | 4 +-- cloudinit/config/schema.py | 4 +-- cloudinit/distros/freebsd.py | 2 +- cloudinit/distros/ubuntu.py | 2 +- cloudinit/net/__init__.py | 2 +- cloudinit/net/cmdline.py | 2 +- cloudinit/net/dhcp.py | 2 +- cloudinit/net/sysconfig.py | 2 +- cloudinit/reporting/events.py | 2 +- cloudinit/sources/DataSourceAliYun.py | 2 +- cloudinit/sources/DataSourceAzure.py | 33 +++++++++------------- cloudinit/sources/DataSourceMAAS.py | 2 +- cloudinit/sources/DataSourceOVF.py | 2 +- cloudinit/sources/DataSourceOpenStack.py | 4 +-- cloudinit/sources/helpers/digitalocean.py | 7 ++--- cloudinit/sources/helpers/openstack.py | 2 +- cloudinit/sources/helpers/vmware/imc/config_nic.py | 2 +- .../sources/helpers/vmware/imc/config_passwd.py | 4 +-- .../sources/helpers/vmware/imc/guestcust_util.py | 4 +-- cloudinit/sources/tests/test_init.py | 2 +- cloudinit/templater.py | 2 +- cloudinit/tests/helpers.py | 2 +- cloudinit/tests/test_util.py | 2 +- cloudinit/url_helper.py | 2 +- cloudinit/util.py | 2 +- tests/cloud_tests/bddeb.py | 2 +- tests/cloud_tests/collect.py | 3 +- tests/cloud_tests/platforms/instances.py | 2 +- tests/cloud_tests/platforms/lxd/instance.py | 10 +++---- tests/cloud_tests/setup_image.py | 11 ++++---- tests/cloud_tests/testcases/base.py | 2 +- .../testcases/examples/including_user_groups.py | 2 +- tests/cloud_tests/testcases/modules/user_groups.py | 2 +- tests/cloud_tests/util.py | 2 +- tests/unittests/test__init__.py | 2 +- tests/unittests/test_datasource/test_azure.py | 4 +-- tests/unittests/test_datasource/test_maas.py | 4 +-- tests/unittests/test_datasource/test_nocloud.py | 3 -- .../test_handler/test_handler_apt_source_v3.py | 2 +- tests/unittests/test_handler/test_handler_ntp.py | 2 +- tests/unittests/test_templating.py | 4 +-- tests/unittests/test_util.py | 6 ++-- 51 files changed, 95 insertions(+), 112 deletions(-) (limited to 'tests') diff --git a/.pylintrc b/.pylintrc index 0bdfa59d..3bfa0c81 100644 --- a/.pylintrc +++ b/.pylintrc @@ -28,7 +28,7 @@ jobs=4 # W0703(broad-except) # W1401(anomalous-backslash-in-string) -disable=C, F, I, R, W0105, W0107, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0612, W0613, W0621, W0622, W0631, W0703, W1401 +disable=C, F, I, R, W0105, W0107, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0613, W0621, W0622, W0631, W0703, W1401 [REPORTS] diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py index b071aa19..1f3060d0 100644 --- a/cloudinit/analyze/dump.py +++ b/cloudinit/analyze/dump.py @@ -112,7 +112,7 @@ def parse_ci_logline(line): return None event_description = stage_to_description[event_name] else: - (pymodloglvl, event_type, event_name) = eventstr.split()[0:3] + (_pymodloglvl, event_type, event_name) = eventstr.split()[0:3] event_description = eventstr.split(event_name)[1].strip() event = { diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py index dbe421c0..e2c54ae8 100644 --- a/cloudinit/cmd/tests/test_main.py +++ b/cloudinit/cmd/tests/test_main.py @@ -56,7 +56,7 @@ class TestMain(FilesystemMockingTestCase): cmdargs = myargs( debug=False, files=None, force=False, local=False, reporter=None, subcommand='init') - (item1, item2) = wrap_and_call( + (_item1, item2) = wrap_and_call( 'cloudinit.cmd.main', {'util.close_stdin': True, 'netinfo.debug_info': 'my net debug info', @@ -85,7 +85,7 @@ class TestMain(FilesystemMockingTestCase): cmdargs = myargs( debug=False, files=None, force=False, local=False, reporter=None, subcommand='init') - (item1, item2) = wrap_and_call( + (_item1, item2) = wrap_and_call( 'cloudinit.cmd.main', {'util.close_stdin': True, 'netinfo.debug_info': 'my net debug info', @@ -133,7 +133,7 @@ class TestMain(FilesystemMockingTestCase): self.assertEqual(main.LOG, log) self.assertIsNone(args) - (item1, item2) = wrap_and_call( + (_item1, item2) = wrap_and_call( 'cloudinit.cmd.main', {'util.close_stdin': True, 'netinfo.debug_info': 'my net debug info', diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index afaca464..e18944ec 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -378,7 +378,7 @@ def apply_debconf_selections(cfg, target=None): # get a complete list of packages listed in input pkgs_cfgd = set() - for key, content in selsets.items(): + for _key, content in selsets.items(): for line in content.splitlines(): if line.startswith("#"): continue diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py index 69dc2d5e..eb9fbe66 100644 --- a/cloudinit/config/cc_emit_upstart.py +++ b/cloudinit/config/cc_emit_upstart.py @@ -43,7 +43,7 @@ def is_upstart_system(): del myenv['UPSTART_SESSION'] check_cmd = ['initctl', 'version'] try: - (out, err) = util.subp(check_cmd, env=myenv) + (out, _err) = util.subp(check_cmd, env=myenv) return 'upstart' in out except util.ProcessExecutionError as e: LOG.debug("'%s' returned '%s', not using upstart", diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 013e69b5..82f29e10 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -89,13 +89,11 @@ def _resize_zfs(mount_point, devpth): def _get_dumpfs_output(mount_point): - dumpfs_res, err = util.subp(['dumpfs', '-m', mount_point]) - return dumpfs_res + return util.subp(['dumpfs', '-m', mount_point])[0] def _get_gpart_output(part): - gpart_res, err = util.subp(['gpart', 'show', part]) - return gpart_res + return util.subp(['gpart', 'show', part])[0] def _can_skip_resize_ufs(mount_point, devpth): @@ -113,7 +111,7 @@ def _can_skip_resize_ufs(mount_point, devpth): if not line.startswith('#'): newfs_cmd = shlex.split(line) opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:' - optlist, args = getopt.getopt(newfs_cmd[1:], opt_value) + optlist, _args = getopt.getopt(newfs_cmd[1:], opt_value) for o, a in optlist: if o == "-s": cur_fs_sz = int(a) diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py index 530808ce..1c679430 100644 --- a/cloudinit/config/cc_rh_subscription.py +++ b/cloudinit/config/cc_rh_subscription.py @@ -209,8 +209,7 @@ class SubscriptionManager(object): cmd.append("--serverurl={0}".format(self.server_hostname)) try: - return_out, return_err = self._sub_man_cli(cmd, - logstring_val=True) + return_out = self._sub_man_cli(cmd, logstring_val=True)[0] except util.ProcessExecutionError as e: if e.stdout == "": self.log_warn("Registration failed due " @@ -233,8 +232,7 @@ class SubscriptionManager(object): # Attempting to register the system only try: - return_out, return_err = self._sub_man_cli(cmd, - logstring_val=True) + return_out = self._sub_man_cli(cmd, logstring_val=True)[0] except util.ProcessExecutionError as e: if e.stdout == "": self.log_warn("Registration failed due " @@ -257,7 +255,7 @@ class SubscriptionManager(object): .format(self.servicelevel)] try: - return_out, return_err = self._sub_man_cli(cmd) + return_out = self._sub_man_cli(cmd)[0] except util.ProcessExecutionError as e: if e.stdout.rstrip() != '': for line in e.stdout.split("\n"): @@ -275,7 +273,7 @@ class SubscriptionManager(object): def _set_auto_attach(self): cmd = ['attach', '--auto'] try: - return_out, return_err = self._sub_man_cli(cmd) + return_out = self._sub_man_cli(cmd)[0] except util.ProcessExecutionError as e: self.log_warn("Auto-attach failed with: {0}".format(e)) return False @@ -294,12 +292,12 @@ class SubscriptionManager(object): # Get all available pools cmd = ['list', '--available', '--pool-only'] - results, errors = self._sub_man_cli(cmd) + results = self._sub_man_cli(cmd)[0] available = (results.rstrip()).split("\n") # Get all consumed pools cmd = ['list', '--consumed', '--pool-only'] - results, errors = self._sub_man_cli(cmd) + results = self._sub_man_cli(cmd)[0] consumed = (results.rstrip()).split("\n") return available, consumed @@ -311,14 +309,14 @@ class SubscriptionManager(object): ''' cmd = ['repos', '--list-enabled'] - return_out, return_err = self._sub_man_cli(cmd) + return_out = self._sub_man_cli(cmd)[0] active_repos = [] for repo in return_out.split("\n"): if "Repo ID:" in repo: active_repos.append((repo.split(':')[1]).strip()) cmd = ['repos', '--list-disabled'] - return_out, return_err = self._sub_man_cli(cmd) + return_out = self._sub_man_cli(cmd)[0] inactive_repos = [] for repo in return_out.split("\n"): diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py index a7a03214..90724b81 100644 --- a/cloudinit/config/cc_snap.py +++ b/cloudinit/config/cc_snap.py @@ -203,12 +203,12 @@ def maybe_install_squashfuse(cloud): return try: cloud.distro.update_package_sources() - except Exception as e: + except Exception: util.logexc(LOG, "Package update failed") raise try: cloud.distro.install_packages(['squashfuse']) - except Exception as e: + except Exception: util.logexc(LOG, "Failed to install squashfuse") raise diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py index bab80bbe..15bee2d3 100644 --- a/cloudinit/config/cc_snappy.py +++ b/cloudinit/config/cc_snappy.py @@ -213,7 +213,7 @@ def render_snap_op(op, name, path=None, cfgfile=None, config=None): def read_installed_packages(): ret = [] - for (name, date, version, dev) in read_pkg_data(): + for (name, _date, _version, dev) in read_pkg_data(): if dev: ret.append(NAMESPACE_DELIM.join([name, dev])) else: @@ -222,7 +222,7 @@ def read_installed_packages(): def read_pkg_data(): - out, err = util.subp([SNAPPY_CMD, "list"]) + out, _err = util.subp([SNAPPY_CMD, "list"]) pkg_data = [] for line in out.splitlines()[1:]: toks = line.split(sep=None, maxsplit=3) diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py index 29d18c96..5e082bd6 100644 --- a/cloudinit/config/cc_ubuntu_advantage.py +++ b/cloudinit/config/cc_ubuntu_advantage.py @@ -148,12 +148,12 @@ def maybe_install_ua_tools(cloud): return try: cloud.distro.update_package_sources() - except Exception as e: + except Exception: util.logexc(LOG, "Package update failed") raise try: cloud.distro.install_packages(['ubuntu-advantage-tools']) - except Exception as e: + except Exception: util.logexc(LOG, "Failed to install ubuntu-advantage-tools") raise diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index ca7d0d5b..76826e05 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -297,8 +297,8 @@ def get_schema(): configs_dir = os.path.dirname(os.path.abspath(__file__)) potential_handlers = find_modules(configs_dir) - for (fname, mod_name) in potential_handlers.items(): - mod_locs, looked_locs = importer.find_module( + for (_fname, mod_name) in potential_handlers.items(): + mod_locs, _looked_locs = importer.find_module( mod_name, ['cloudinit.config'], ['schema']) if mod_locs: mod = importer.import_module(mod_locs[0]) diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 099fac5c..5b1718a4 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -113,7 +113,7 @@ class Distro(distros.Distro): n = re.search(r'\d+$', dev) index = n.group(0) - (out, err) = util.subp(['ifconfig', '-a']) + (out, _err) = util.subp(['ifconfig', '-a']) ifconfigoutput = [x for x in (out.strip()).splitlines() if len(x.split()) > 0] bsddev = 'NOT_FOUND' diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py index fdc1f622..68154104 100644 --- a/cloudinit/distros/ubuntu.py +++ b/cloudinit/distros/ubuntu.py @@ -25,7 +25,7 @@ class Distro(debian.Distro): def preferred_ntp_clients(self): """The preferred ntp client is dependent on the version.""" if not self._preferred_ntp_clients: - (name, version, codename) = util.system_info()['dist'] + (_name, _version, codename) = util.system_info()['dist'] # Xenial cloud-init only installed ntp, UbuntuCore has timesyncd. if codename == "xenial" and not util.system_is_snappy(): self._preferred_ntp_clients = ['ntp'] diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index f69c0ef2..80054546 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -295,7 +295,7 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True): def _version_2(netcfg): renames = [] - for key, ent in netcfg.get('ethernets', {}).items(): + for ent in netcfg.get('ethernets', {}).values(): # only rename if configured to do so name = ent.get('set-name') if not name: diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py index 9e9fe0fe..f89a0f73 100755 --- a/cloudinit/net/cmdline.py +++ b/cloudinit/net/cmdline.py @@ -65,7 +65,7 @@ def _klibc_to_config_entry(content, mac_addrs=None): iface['mac_address'] = mac_addrs[name] # Handle both IPv4 and IPv6 values - for v, pre in (('ipv4', 'IPV4'), ('ipv6', 'IPV6')): + for pre in ('IPV4', 'IPV6'): # if no IPV4ADDR or IPV6ADDR, then go on. if pre + "ADDR" not in data: continue diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py index 087c0c03..12cf5097 100644 --- a/cloudinit/net/dhcp.py +++ b/cloudinit/net/dhcp.py @@ -216,7 +216,7 @@ def networkd_get_option_from_leases(keyname, leases_d=None): if leases_d is None: leases_d = NETWORKD_LEASES_DIR leases = networkd_load_leases(leases_d=leases_d) - for ifindex, data in sorted(leases.items()): + for _ifindex, data in sorted(leases.items()): if data.get(keyname): return data[keyname] return None diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 39d89c46..7a7f5093 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -364,7 +364,7 @@ class Renderer(renderer.Renderer): @classmethod def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets): - for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): + for _, subnet in enumerate(subnets, start=len(iface_cfg.children)): for route in subnet.get('routes', []): is_ipv6 = subnet.get('ipv6') or is_ipv6_addr(route['gateway']) diff --git a/cloudinit/reporting/events.py b/cloudinit/reporting/events.py index 4f62d2f9..e5dfab33 100644 --- a/cloudinit/reporting/events.py +++ b/cloudinit/reporting/events.py @@ -192,7 +192,7 @@ class ReportEventStack(object): def _childrens_finish_info(self): for cand_result in (status.FAIL, status.WARN): - for name, (value, msg) in self.children.items(): + for _name, (value, _msg) in self.children.items(): if value == cand_result: return (value, self.message) return (self.result, self.message) diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py index 22279d09..858e0827 100644 --- a/cloudinit/sources/DataSourceAliYun.py +++ b/cloudinit/sources/DataSourceAliYun.py @@ -45,7 +45,7 @@ def _is_aliyun(): def parse_public_keys(public_keys): keys = [] - for key_id, key_body in public_keys.items(): + for _key_id, key_body in public_keys.items(): if isinstance(key_body, str): keys.append(key_body.strip()) elif isinstance(key_body, list): diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 0ee622e2..a71197a6 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -107,31 +107,24 @@ def find_dev_from_busdev(camcontrol_out, busdev): return None -def get_dev_storvsc_sysctl(): +def execute_or_debug(cmd, fail_ret=None): try: - sysctl_out, err = util.subp(['sysctl', 'dev.storvsc']) + return util.subp(cmd)[0] except util.ProcessExecutionError: - LOG.debug("Fail to execute sysctl dev.storvsc") - sysctl_out = "" - return sysctl_out + LOG.debug("Failed to execute: %s", ' '.join(cmd)) + return fail_ret + + +def get_dev_storvsc_sysctl(): + return execute_or_debug(["sysctl", "dev.storvsc"], fail_ret="") def get_camcontrol_dev_bus(): - try: - camcontrol_b_out, err = util.subp(['camcontrol', 'devlist', '-b']) - except util.ProcessExecutionError: - LOG.debug("Fail to execute camcontrol devlist -b") - return None - return camcontrol_b_out + return execute_or_debug(['camcontrol', 'devlist', '-b']) def get_camcontrol_dev(): - try: - camcontrol_out, err = util.subp(['camcontrol', 'devlist']) - except util.ProcessExecutionError: - LOG.debug("Fail to execute camcontrol devlist") - return None - return camcontrol_out + return execute_or_debug(['camcontrol', 'devlist']) def get_resource_disk_on_freebsd(port_id): @@ -474,7 +467,7 @@ class DataSourceAzure(sources.DataSource): before we go into our polling loop.""" try: get_metadata_from_fabric(None, lease['unknown-245']) - except Exception as exc: + except Exception: LOG.warning( "Error communicating with Azure fabric; You may experience." "connectivity issues.", exc_info=True) @@ -492,7 +485,7 @@ class DataSourceAzure(sources.DataSource): jump back into the polling loop in order to retrieve the ovf_env.""" if not ret: return False - (md, self.userdata_raw, cfg, files) = ret + (_md, self.userdata_raw, cfg, _files) = ret path = REPROVISION_MARKER_FILE if (cfg.get('PreprovisionedVm') is True or os.path.isfile(path)): @@ -528,7 +521,7 @@ class DataSourceAzure(sources.DataSource): self.ds_cfg['agent_command']) try: fabric_data = metadata_func() - except Exception as exc: + except Exception: LOG.warning( "Error communicating with Azure fabric; You may experience." "connectivity issues.", exc_info=True) diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 6ac88635..aa56addb 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -204,7 +204,7 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None, seed_url = seed_url[:-1] md = {} - for path, dictname, binary, optional in DS_FIELDS: + for path, _dictname, binary, optional in DS_FIELDS: if version is None: url = "%s/%s" % (seed_url, path) else: diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index dc914a72..178ccb0f 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -556,7 +556,7 @@ def search_file(dirpath, filename): if not dirpath or not filename: return None - for root, dirs, files in os.walk(dirpath): + for root, _dirs, files in os.walk(dirpath): if filename in files: return os.path.join(root, filename) diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index e55a7638..fb166ae1 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -86,7 +86,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): md_urls.append(md_url) url2base[md_url] = url - (max_wait, timeout, retries) = self._get_url_settings() + (max_wait, timeout, _retries) = self._get_url_settings() start_time = time.time() avail_url = url_helper.wait_for_url(urls=md_urls, max_wait=max_wait, timeout=timeout) @@ -106,7 +106,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): except IOError: return False - (max_wait, timeout, retries) = self._get_url_settings() + (_max_wait, timeout, retries) = self._get_url_settings() try: results = util.log_time(LOG.debug, diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py index 693f8d5c..0e7cccac 100644 --- a/cloudinit/sources/helpers/digitalocean.py +++ b/cloudinit/sources/helpers/digitalocean.py @@ -41,10 +41,9 @@ def assign_ipv4_link_local(nic=None): "address") try: - (result, _err) = util.subp(ip_addr_cmd) + util.subp(ip_addr_cmd) LOG.debug("assigned ip4LL address '%s' to '%s'", addr, nic) - - (result, _err) = util.subp(ip_link_cmd) + util.subp(ip_link_cmd) LOG.debug("brought device '%s' up", nic) except Exception: util.logexc(LOG, "ip4LL address assignment of '%s' to '%s' failed." @@ -75,7 +74,7 @@ def del_ipv4_link_local(nic=None): ip_addr_cmd = ['ip', 'addr', 'flush', 'dev', nic] try: - (result, _err) = util.subp(ip_addr_cmd) + util.subp(ip_addr_cmd) LOG.debug("removed ip4LL addresses from %s", nic) except Exception as e: diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 26f3168d..a4cf0667 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -638,7 +638,7 @@ def convert_net_json(network_json=None, known_macs=None): known_macs = net.get_interfaces_by_mac() # go through and fill out the link_id_info with names - for link_id, info in link_id_info.items(): + for _link_id, info in link_id_info.items(): if info.get('name'): continue if info.get('mac') in known_macs: diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py index 2d8900e2..3ef8c624 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_nic.py +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py @@ -73,7 +73,7 @@ class NicConfigurator(object): The mac address(es) are in the lower case """ cmd = ['ip', 'addr', 'show'] - (output, err) = util.subp(cmd) + output, _err = util.subp(cmd) sections = re.split(r'\n\d+: ', '\n' + output)[1:] macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))' diff --git a/cloudinit/sources/helpers/vmware/imc/config_passwd.py b/cloudinit/sources/helpers/vmware/imc/config_passwd.py index 75cfbaaf..8c91fa41 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_passwd.py +++ b/cloudinit/sources/helpers/vmware/imc/config_passwd.py @@ -56,10 +56,10 @@ class PasswordConfigurator(object): LOG.info('Expiring password.') for user in uidUserList: try: - out, err = util.subp(['passwd', '--expire', user]) + util.subp(['passwd', '--expire', user]) except util.ProcessExecutionError as e: if os.path.exists('/usr/bin/chage'): - out, e = util.subp(['chage', '-d', '0', user]) + util.subp(['chage', '-d', '0', user]) else: LOG.warning('Failed to expire password for %s with error: ' '%s', user, e) diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py index 44075255..a590f323 100644 --- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py @@ -91,7 +91,7 @@ def enable_nics(nics): for attempt in range(0, enableNicsWaitRetries): logger.debug("Trying to connect interfaces, attempt %d", attempt) - (out, err) = set_customization_status( + (out, _err) = set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_RUNNING, GuestCustEventEnum.GUESTCUST_EVENT_ENABLE_NICS, nics) @@ -104,7 +104,7 @@ def enable_nics(nics): return for count in range(0, enableNicsWaitCount): - (out, err) = set_customization_status( + (out, _err) = set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_RUNNING, GuestCustEventEnum.GUESTCUST_EVENT_QUERY_NICS, nics) diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index e7fda22a..452e9219 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -278,7 +278,7 @@ class TestDataSource(CiTestCase): base_args = get_args(DataSource.get_hostname) # pylint: disable=W1505 # Import all DataSource subclasses so we can inspect them. modules = util.find_modules(os.path.dirname(os.path.dirname(__file__))) - for loc, name in modules.items(): + for _loc, name in modules.items(): mod_locs, _ = importer.find_module(name, ['cloudinit.sources'], []) if mod_locs: importer.import_module(mod_locs[0]) diff --git a/cloudinit/templater.py b/cloudinit/templater.py index 9a087e1c..7e7acb86 100644 --- a/cloudinit/templater.py +++ b/cloudinit/templater.py @@ -147,7 +147,7 @@ def render_string(content, params): Warning: py2 str with non-ascii chars will cause UnicodeDecodeError.""" if not params: params = {} - template_type, renderer, content = detect_template(content) + _template_type, renderer, content = detect_template(content) return renderer(content, params) # vi: ts=4 expandtab diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 82fd347b..5aada6e7 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -334,7 +334,7 @@ def dir2dict(startdir, prefix=None): flist = {} if prefix is None: prefix = startdir - for root, dirs, files in os.walk(startdir): + for root, _dirs, files in os.walk(startdir): for fname in files: fpath = os.path.join(root, fname) key = fpath[len(prefix):] diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index 3f37dbb6..76eed076 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -135,7 +135,7 @@ class TestGetHostnameFqdn(CiTestCase): def test_get_hostname_fqdn_from_passes_metadata_only_to_cloud(self): """Calls to cloud.get_hostname pass the metadata_only parameter.""" mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') - hostname, fqdn = util.get_hostname_fqdn( + _hn, _fqdn = util.get_hostname_fqdn( cfg={}, cloud=mycloud, metadata_only=True) self.assertEqual( [{'fqdn': True, 'metadata_only': True}, diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 03a573af..1de07b1c 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -519,7 +519,7 @@ def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret, resource_owner_secret=token_secret, signature_method=oauth1.SIGNATURE_PLAINTEXT, timestamp=timestamp) - uri, signed_headers, body = client.sign(url) + _uri, signed_headers, _body = client.sign(url) return signed_headers # vi: ts=4 expandtab diff --git a/cloudinit/util.py b/cloudinit/util.py index 1717b529..310758dd 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2214,7 +2214,7 @@ def parse_mtab(path): def find_freebsd_part(label_part): if label_part.startswith("/dev/label/"): target_label = label_part[5:] - (label_part, err) = subp(['glabel', 'status', '-s']) + (label_part, _err) = subp(['glabel', 'status', '-s']) for labels in label_part.split("\n"): items = labels.split() if len(items) > 0 and items[0].startswith(target_label): diff --git a/tests/cloud_tests/bddeb.py b/tests/cloud_tests/bddeb.py index b9cfcfa6..f04d0cd4 100644 --- a/tests/cloud_tests/bddeb.py +++ b/tests/cloud_tests/bddeb.py @@ -113,7 +113,7 @@ def bddeb(args): @return_value: fail count """ LOG.info('preparing to build cloud-init deb') - (res, failed) = run_stage('build deb', [partial(setup_build, args)]) + _res, failed = run_stage('build deb', [partial(setup_build, args)]) return failed # vi: ts=4 expandtab diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py index d4f9135b..1ba72856 100644 --- a/tests/cloud_tests/collect.py +++ b/tests/cloud_tests/collect.py @@ -25,7 +25,8 @@ def collect_script(instance, base_dir, script, script_name): script.encode(), rcs=False, description='collect: {}'.format(script_name)) if err: - LOG.debug("collect script %s had stderr: %s", script_name, err) + LOG.debug("collect script %s exited '%s' and had stderr: %s", + script_name, err, exit) if not isinstance(out, bytes): raise util.PlatformError( "Collection of '%s' returned type %s, expected bytes: %s" % diff --git a/tests/cloud_tests/platforms/instances.py b/tests/cloud_tests/platforms/instances.py index 3bad021f..cc439d29 100644 --- a/tests/cloud_tests/platforms/instances.py +++ b/tests/cloud_tests/platforms/instances.py @@ -108,7 +108,7 @@ class Instance(TargetBase): return client except (ConnectionRefusedError, AuthenticationException, BadHostKeyException, ConnectionResetError, SSHException, - OSError) as e: + OSError): retries -= 1 time.sleep(10) diff --git a/tests/cloud_tests/platforms/lxd/instance.py b/tests/cloud_tests/platforms/lxd/instance.py index 0d957bca..1c17c781 100644 --- a/tests/cloud_tests/platforms/lxd/instance.py +++ b/tests/cloud_tests/platforms/lxd/instance.py @@ -152,9 +152,8 @@ class LXDInstance(Instance): return fp.read() try: - stdout, stderr = subp( - ['lxc', 'console', '--show-log', self.name], decode=False) - return stdout + return subp(['lxc', 'console', '--show-log', self.name], + decode=False)[0] except ProcessExecutionError as e: raise PlatformError( "console log", @@ -214,11 +213,10 @@ def _has_proper_console_support(): reason = "LXD Driver version not 3.x+ (%s)" % dver else: try: - stdout, stderr = subp(['lxc', 'console', '--help'], - decode=False) + stdout = subp(['lxc', 'console', '--help'], decode=False)[0] if not (b'console' in stdout and b'log' in stdout): reason = "no '--log' in lxc console --help" - except ProcessExecutionError as e: + except ProcessExecutionError: reason = "no 'console' command in lxc client" if reason: diff --git a/tests/cloud_tests/setup_image.py b/tests/cloud_tests/setup_image.py index 6d242115..4e195709 100644 --- a/tests/cloud_tests/setup_image.py +++ b/tests/cloud_tests/setup_image.py @@ -25,10 +25,9 @@ def installed_package_version(image, package, ensure_installed=True): else: raise NotImplementedError - msg = 'query version for package: {}'.format(package) - (out, err, exit) = image.execute( - cmd, description=msg, rcs=(0,) if ensure_installed else range(0, 256)) - return out.strip() + return image.execute( + cmd, description='query version for package: {}'.format(package), + rcs=(0,) if ensure_installed else range(0, 256))[0].strip() def install_deb(args, image): @@ -54,7 +53,7 @@ def install_deb(args, image): remote_path], description=msg) # check installed deb version matches package fmt = ['-W', "--showformat=${Version}"] - (out, err, exit) = image.execute(['dpkg-deb'] + fmt + [remote_path]) + out = image.execute(['dpkg-deb'] + fmt + [remote_path])[0] expected_version = out.strip() found_version = installed_package_version(image, 'cloud-init') if expected_version != found_version: @@ -85,7 +84,7 @@ def install_rpm(args, image): image.execute(['rpm', '-U', remote_path], description=msg) fmt = ['--queryformat', '"%{VERSION}"'] - (out, err, exit) = image.execute(['rpm', '-q'] + fmt + [remote_path]) + (out, _err, _exit) = image.execute(['rpm', '-q'] + fmt + [remote_path]) expected_version = out.strip() found_version = installed_package_version(image, 'cloud-init') if expected_version != found_version: diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py index 4fda8f91..0d1916b4 100644 --- a/tests/cloud_tests/testcases/base.py +++ b/tests/cloud_tests/testcases/base.py @@ -159,7 +159,7 @@ class CloudTestCase(unittest.TestCase): expected_net_keys = [ 'public-ipv4s', 'ipv4-associations', 'local-hostname', 'public-hostname'] - for mac, mac_data in macs.items(): + for mac_data in macs.values(): for key in expected_net_keys: self.assertIn(key, mac_data) self.assertIsNotNone( diff --git a/tests/cloud_tests/testcases/examples/including_user_groups.py b/tests/cloud_tests/testcases/examples/including_user_groups.py index 93b7a82d..4067348d 100644 --- a/tests/cloud_tests/testcases/examples/including_user_groups.py +++ b/tests/cloud_tests/testcases/examples/including_user_groups.py @@ -42,7 +42,7 @@ class TestUserGroups(base.CloudTestCase): def test_user_root_in_secret(self): """Test root user is in 'secret' group.""" - user, _, groups = self.get_data_file('root_groups').partition(":") + _user, _, groups = self.get_data_file('root_groups').partition(":") self.assertIn("secret", groups.split(), msg="User root is not in group 'secret'") diff --git a/tests/cloud_tests/testcases/modules/user_groups.py b/tests/cloud_tests/testcases/modules/user_groups.py index 93b7a82d..4067348d 100644 --- a/tests/cloud_tests/testcases/modules/user_groups.py +++ b/tests/cloud_tests/testcases/modules/user_groups.py @@ -42,7 +42,7 @@ class TestUserGroups(base.CloudTestCase): def test_user_root_in_secret(self): """Test root user is in 'secret' group.""" - user, _, groups = self.get_data_file('root_groups').partition(":") + _user, _, groups = self.get_data_file('root_groups').partition(":") self.assertIn("secret", groups.split(), msg="User root is not in group 'secret'") diff --git a/tests/cloud_tests/util.py b/tests/cloud_tests/util.py index 3dd4996d..06f7d865 100644 --- a/tests/cloud_tests/util.py +++ b/tests/cloud_tests/util.py @@ -358,7 +358,7 @@ class TargetBase(object): # when sh is invoked with '-c', then the first argument is "$0" # which is commonly understood as the "program name". # 'read_data' is the program name, and 'remote_path' is '$1' - stdout, stderr, rc = self._execute( + stdout, _stderr, rc = self._execute( ["sh", "-c", 'exec cat "$1"', 'read_data', remote_path]) if rc != 0: raise RuntimeError("Failed to read file '%s'" % remote_path) diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py index 25878d7a..f1ab02e9 100644 --- a/tests/unittests/test__init__.py +++ b/tests/unittests/test__init__.py @@ -214,7 +214,7 @@ class TestCmdlineUrl(CiTestCase): def test_no_key_found(self, m_read): cmdline = "ro mykey=http://example.com/foo root=foo" fpath = self.tmp_path("ccpath") - lvl, msg = main.attempt_cmdline_url( + lvl, _msg = main.attempt_cmdline_url( fpath, network=True, cmdline=cmdline) m_read.assert_not_called() diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 3e8b7913..88fe76c7 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -214,7 +214,7 @@ scbus-1 on xpt0 bus 0 self.assertIn(tag, x) def tags_equal(x, y): - for x_tag, x_val in x.items(): + for x_val in x.values(): y_val = y.get(x_val.tag) self.assertEqual(x_val.text, y_val.text) @@ -1216,7 +1216,7 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): fake_resp.return_value = mock.MagicMock(status_code=200, text=content, content=content) dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - md, ud, cfg, d = dsa._reprovision() + md, _ud, cfg, _d = dsa._reprovision() self.assertEqual(md['local-hostname'], hostname) self.assertEqual(cfg['system_info']['default_user']['name'], username) self.assertEqual(fake_resp.call_args_list, diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index 6e4031cf..c84d067e 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -53,7 +53,7 @@ class TestMAASDataSource(CiTestCase): my_d = os.path.join(self.tmp, "valid_extra") populate_dir(my_d, data) - ud, md, vd = DataSourceMAAS.read_maas_seed_dir(my_d) + ud, md, _vd = DataSourceMAAS.read_maas_seed_dir(my_d) self.assertEqual(userdata, ud) for key in ('instance-id', 'local-hostname'): @@ -149,7 +149,7 @@ class TestMAASDataSource(CiTestCase): 'meta-data/local-hostname': 'test-hostname', 'meta-data/vendor-data': yaml.safe_dump(expected_vd).encode(), } - ud, md, vd = self.mock_read_maas_seed_url( + _ud, md, vd = self.mock_read_maas_seed_url( valid, "http://example.com/foo") self.assertEqual(valid['meta-data/instance-id'], md['instance-id']) diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index 70d50de4..cdbd1e1a 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -51,9 +51,6 @@ class TestNoCloudDataSource(CiTestCase): class PsuedoException(Exception): pass - def my_find_devs_with(*args, **kwargs): - raise PsuedoException - self.mocks.enter_context( mock.patch.object(util, 'find_devs_with', side_effect=PsuedoException)) diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py index 7bb1b7c4..e486862d 100644 --- a/tests/unittests/test_handler/test_handler_apt_source_v3.py +++ b/tests/unittests/test_handler/test_handler_apt_source_v3.py @@ -528,7 +528,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): expected = sorted([npre + suff for opre, npre, suff in files]) # create files - for (opre, npre, suff) in files: + for (opre, _npre, suff) in files: fpath = os.path.join(apt_lists_d, opre + suff) util.write_file(fpath, content=fpath) diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py index 02676aa6..17c53559 100644 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ b/tests/unittests/test_handler/test_handler_ntp.py @@ -76,7 +76,7 @@ class TestNtp(FilesystemMockingTestCase): template = TIMESYNCD_TEMPLATE else: template = NTP_TEMPLATE - (confpath, template_fn) = self._generate_template(template=template) + (confpath, _template_fn) = self._generate_template(template=template) ntpconfig = copy.deepcopy(dcfg[client]) ntpconfig['confpath'] = confpath ntpconfig['template_name'] = os.path.basename(confpath) diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py index 1080e135..20c87efa 100644 --- a/tests/unittests/test_templating.py +++ b/tests/unittests/test_templating.py @@ -50,12 +50,12 @@ class TestTemplates(test_helpers.CiTestCase): def test_detection(self): blob = "## template:cheetah" - (template_type, renderer, contents) = templater.detect_template(blob) + (template_type, _renderer, contents) = templater.detect_template(blob) self.assertIn("cheetah", template_type) self.assertEqual("", contents.strip()) blob = "blahblah $blah" - (template_type, renderer, contents) = templater.detect_template(blob) + (template_type, _renderer, _contents) = templater.detect_template(blob) self.assertIn("cheetah", template_type) self.assertEqual(blob, contents) diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index e04ea031..84941c7d 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -774,11 +774,11 @@ class TestSubp(helpers.CiTestCase): def test_subp_reads_env(self): with mock.patch.dict("os.environ", values={'FOO': 'BAR'}): - out, err = util.subp(self.printenv + ['FOO'], capture=True) + out, _err = util.subp(self.printenv + ['FOO'], capture=True) self.assertEqual('FOO=BAR', out.splitlines()[0]) def test_subp_env_and_update_env(self): - out, err = util.subp( + out, _err = util.subp( self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True, env={'FOO': 'BAR'}, update_env={'HOME': '/myhome', 'K2': 'V2'}) @@ -788,7 +788,7 @@ class TestSubp(helpers.CiTestCase): def test_subp_update_env(self): extra = {'FOO': 'BAR', 'HOME': '/root', 'K1': 'V1'} with mock.patch.dict("os.environ", values=extra): - out, err = util.subp( + out, _err = util.subp( self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True, update_env={'HOME': '/myhome', 'K2': 'V2'}) -- cgit v1.2.3 From 4952a8545b61ceb2fe26a933d2f64020d0281703 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 20 Apr 2018 12:22:23 -0600 Subject: set_passwords: Add newline to end of sshd config, only restart if updated. This admittedly does a fairly extensive re-factor to simply add a newline to the end of sshd_config. It makes the ssh_config updating portion of set_passwords more testable and adds tests for that. The new function is in 'update_ssh_config_lines' which allows you to update a config with multiple changes even though only a single one is currently used. We also only restart the ssh daemon now if a change was made to the config file. Before it was always restarted if the user specified a value for ssh_pwauth other than 'unchanged'. Thanks to Lorens Kockum for initial diagnosis and patch. LP: #1677205 --- cloudinit/config/cc_set_passwords.py | 105 ++++++++++++--------------- cloudinit/config/tests/test_set_passwords.py | 71 ++++++++++++++++++ cloudinit/ssh_util.py | 70 ++++++++++++++++-- tests/unittests/test_sshutil.py | 97 ++++++++++++++++++++++++- 4 files changed, 273 insertions(+), 70 deletions(-) create mode 100644 cloudinit/config/tests/test_set_passwords.py (limited to 'tests') diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index bb24d57f..5ef97376 100755 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -68,16 +68,57 @@ import re import sys from cloudinit.distros import ug_util -from cloudinit import ssh_util +from cloudinit import log as logging +from cloudinit.ssh_util import update_ssh_config from cloudinit import util from string import ascii_letters, digits +LOG = logging.getLogger(__name__) + # We are removing certain 'painful' letters/numbers PW_SET = (''.join([x for x in ascii_letters + digits if x not in 'loLOI01'])) +def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"): + """Apply sshd PasswordAuthentication changes. + + @param pw_auth: config setting from 'pw_auth'. + Best given as True, False, or "unchanged". + @param service_cmd: The service command list (['service']) + @param service_name: The name of the sshd service for the system. + + @return: None""" + cfg_name = "PasswordAuthentication" + if service_cmd is None: + service_cmd = ["service"] + + if util.is_true(pw_auth): + cfg_val = 'yes' + elif util.is_false(pw_auth): + cfg_val = 'no' + else: + bmsg = "Leaving ssh config '%s' unchanged." % cfg_name + if pw_auth is None or pw_auth.lower() == 'unchanged': + LOG.debug("%s ssh_pwauth=%s", bmsg, pw_auth) + else: + LOG.warning("%s Unrecognized value: ssh_pwauth=%s", bmsg, pw_auth) + return + + updated = update_ssh_config({cfg_name: cfg_val}) + if not updated: + LOG.debug("No need to restart ssh service, %s not updated.", cfg_name) + return + + if 'systemctl' in service_cmd: + cmd = list(service_cmd) + ["restart", service_name] + else: + cmd = list(service_cmd) + [service_name, "restart"] + util.subp(cmd) + LOG.debug("Restarted the ssh daemon.") + + def handle(_name, cfg, cloud, log, args): if len(args) != 0: # if run from command line, and give args, wipe the chpasswd['list'] @@ -170,65 +211,9 @@ def handle(_name, cfg, cloud, log, args): if expired_users: log.debug("Expired passwords for: %s users", expired_users) - change_pwauth = False - pw_auth = None - if 'ssh_pwauth' in cfg: - if util.is_true(cfg['ssh_pwauth']): - change_pwauth = True - pw_auth = 'yes' - elif util.is_false(cfg['ssh_pwauth']): - change_pwauth = True - pw_auth = 'no' - elif str(cfg['ssh_pwauth']).lower() == 'unchanged': - log.debug('Leaving auth line unchanged') - change_pwauth = False - elif not str(cfg['ssh_pwauth']).strip(): - log.debug('Leaving auth line unchanged') - change_pwauth = False - elif not cfg['ssh_pwauth']: - log.debug('Leaving auth line unchanged') - change_pwauth = False - else: - msg = 'Unrecognized value %s for ssh_pwauth' % cfg['ssh_pwauth'] - util.logexc(log, msg) - - if change_pwauth: - replaced_auth = False - - # See: man sshd_config - old_lines = ssh_util.parse_ssh_config(ssh_util.DEF_SSHD_CFG) - new_lines = [] - i = 0 - for (i, line) in enumerate(old_lines): - # Keywords are case-insensitive and arguments are case-sensitive - if line.key == 'passwordauthentication': - log.debug("Replacing auth line %s with %s", i + 1, pw_auth) - replaced_auth = True - line.value = pw_auth - new_lines.append(line) - - if not replaced_auth: - log.debug("Adding new auth line %s", i + 1) - replaced_auth = True - new_lines.append(ssh_util.SshdConfigLine('', - 'PasswordAuthentication', - pw_auth)) - - lines = [str(l) for l in new_lines] - util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines), - copy_mode=True) - - try: - cmd = cloud.distro.init_cmd # Default service - cmd.append(cloud.distro.get_option('ssh_svcname', 'ssh')) - cmd.append('restart') - if 'systemctl' in cmd: # Switch action ordering - cmd[1], cmd[2] = cmd[2], cmd[1] - cmd = filter(None, cmd) # Remove empty arguments - util.subp(cmd) - log.debug("Restarted the ssh daemon") - except Exception: - util.logexc(log, "Restarting of the ssh daemon failed") + handle_ssh_pwauth( + cfg.get('ssh_pwauth'), service_cmd=cloud.distro.init_cmd, + service_name=cloud.distro.get_option('ssh_svcname', 'ssh')) if len(errors): log.debug("%s errors occured, re-raising the last one", len(errors)) diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py new file mode 100644 index 00000000..b051ec82 --- /dev/null +++ b/cloudinit/config/tests/test_set_passwords.py @@ -0,0 +1,71 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import mock + +from cloudinit.config import cc_set_passwords as setpass +from cloudinit.tests.helpers import CiTestCase +from cloudinit import util + +MODPATH = "cloudinit.config.cc_set_passwords." + + +class TestHandleSshPwauth(CiTestCase): + """Test cc_set_passwords handling of ssh_pwauth in handle_ssh_pwauth.""" + + with_logs = True + + @mock.patch(MODPATH + "util.subp") + def test_unknown_value_logs_warning(self, m_subp): + setpass.handle_ssh_pwauth("floo") + self.assertIn("Unrecognized value: ssh_pwauth=floo", + self.logs.getvalue()) + m_subp.assert_not_called() + + @mock.patch(MODPATH + "update_ssh_config", return_value=True) + @mock.patch(MODPATH + "util.subp") + def test_systemctl_as_service_cmd(self, m_subp, m_update_ssh_config): + """If systemctl in service cmd: systemctl restart name.""" + setpass.handle_ssh_pwauth( + True, service_cmd=["systemctl"], service_name="myssh") + self.assertEqual(mock.call(["systemctl", "restart", "myssh"]), + m_subp.call_args) + + @mock.patch(MODPATH + "update_ssh_config", return_value=True) + @mock.patch(MODPATH + "util.subp") + def test_service_as_service_cmd(self, m_subp, m_update_ssh_config): + """If systemctl in service cmd: systemctl restart name.""" + setpass.handle_ssh_pwauth( + True, service_cmd=["service"], service_name="myssh") + self.assertEqual(mock.call(["service", "myssh", "restart"]), + m_subp.call_args) + + @mock.patch(MODPATH + "update_ssh_config", return_value=False) + @mock.patch(MODPATH + "util.subp") + def test_not_restarted_if_not_updated(self, m_subp, m_update_ssh_config): + """If config is not updated, then no system restart should be done.""" + setpass.handle_ssh_pwauth(True) + m_subp.assert_not_called() + self.assertIn("No need to restart ssh", self.logs.getvalue()) + + @mock.patch(MODPATH + "update_ssh_config", return_value=True) + @mock.patch(MODPATH + "util.subp") + def test_unchanged_does_nothing(self, m_subp, m_update_ssh_config): + """If 'unchanged', then no updates to config and no restart.""" + setpass.handle_ssh_pwauth( + "unchanged", service_cmd=["systemctl"], service_name="myssh") + m_update_ssh_config.assert_not_called() + m_subp.assert_not_called() + + @mock.patch(MODPATH + "util.subp") + def test_valid_change_values(self, m_subp): + """If value is a valid changen value, then update should be called.""" + upname = MODPATH + "update_ssh_config" + optname = "PasswordAuthentication" + for value in util.FALSE_STRINGS + util.TRUE_STRINGS: + optval = "yes" if value in util.TRUE_STRINGS else "no" + with mock.patch(upname, return_value=False) as m_update: + setpass.handle_ssh_pwauth(value) + m_update.assert_called_with({optname: optval}) + m_subp.assert_not_called() + +# vi: ts=4 expandtab diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 882517f5..73c31772 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -279,24 +279,28 @@ class SshdConfigLine(object): def parse_ssh_config(fname): + if not os.path.isfile(fname): + return [] + return parse_ssh_config_lines(util.load_file(fname).splitlines()) + + +def parse_ssh_config_lines(lines): # See: man sshd_config # The file contains keyword-argument pairs, one per line. # Lines starting with '#' and empty lines are interpreted as comments. # Note: key-words are case-insensitive and arguments are case-sensitive - lines = [] - if not os.path.isfile(fname): - return lines - for line in util.load_file(fname).splitlines(): + ret = [] + for line in lines: line = line.strip() if not line or line.startswith("#"): - lines.append(SshdConfigLine(line)) + ret.append(SshdConfigLine(line)) continue try: key, val = line.split(None, 1) except ValueError: key, val = line.split('=', 1) - lines.append(SshdConfigLine(line, key, val)) - return lines + ret.append(SshdConfigLine(line, key, val)) + return ret def parse_ssh_config_map(fname): @@ -310,4 +314,56 @@ def parse_ssh_config_map(fname): ret[line.key] = line.value return ret + +def update_ssh_config(updates, fname=DEF_SSHD_CFG): + """Read fname, and update if changes are necessary. + + @param updates: dictionary of desired values {Option: value} + @return: boolean indicating if an update was done.""" + lines = parse_ssh_config(fname) + changed = update_ssh_config_lines(lines=lines, updates=updates) + if changed: + util.write_file( + fname, "\n".join([str(l) for l in lines]) + "\n", copy_mode=True) + return len(changed) != 0 + + +def update_ssh_config_lines(lines, updates): + """Update the ssh config lines per updates. + + @param lines: array of SshdConfigLine. This array is updated in place. + @param updates: dictionary of desired values {Option: value} + @return: A list of keys in updates that were changed.""" + found = set() + changed = [] + + # Keywords are case-insensitive and arguments are case-sensitive + casemap = dict([(k.lower(), k) for k in updates.keys()]) + + for (i, line) in enumerate(lines, start=1): + if not line.key: + continue + if line.key in casemap: + key = casemap[line.key] + value = updates[key] + found.add(key) + if line.value == value: + LOG.debug("line %d: option %s already set to %s", + i, key, value) + else: + changed.append(key) + LOG.debug("line %d: option %s updated %s -> %s", i, + key, line.value, value) + line.value = value + + if len(found) != len(updates): + for key, value in updates.items(): + if key in found: + continue + changed.append(key) + lines.append(SshdConfigLine('', key, value)) + LOG.debug("line %d: option %s added with %s", + len(lines), key, value) + return changed + # vi: ts=4 expandtab diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py index 4c62c8be..73ae897f 100644 --- a/tests/unittests/test_sshutil.py +++ b/tests/unittests/test_sshutil.py @@ -4,6 +4,7 @@ from mock import patch from cloudinit import ssh_util from cloudinit.tests import helpers as test_helpers +from cloudinit import util VALID_CONTENT = { @@ -56,7 +57,7 @@ TEST_OPTIONS = ( 'user \"root\".\';echo;sleep 10"') -class TestAuthKeyLineParser(test_helpers.TestCase): +class TestAuthKeyLineParser(test_helpers.CiTestCase): def test_simple_parse(self): # test key line with common 3 fields (keytype, base64, comment) @@ -126,7 +127,7 @@ class TestAuthKeyLineParser(test_helpers.TestCase): self.assertFalse(key.valid()) -class TestUpdateAuthorizedKeys(test_helpers.TestCase): +class TestUpdateAuthorizedKeys(test_helpers.CiTestCase): def test_new_keys_replace(self): """new entries with the same base64 should replace old.""" @@ -168,7 +169,7 @@ class TestUpdateAuthorizedKeys(test_helpers.TestCase): self.assertEqual(expected, found) -class TestParseSSHConfig(test_helpers.TestCase): +class TestParseSSHConfig(test_helpers.CiTestCase): def setUp(self): self.load_file_patch = patch('cloudinit.ssh_util.util.load_file') @@ -235,4 +236,94 @@ class TestParseSSHConfig(test_helpers.TestCase): self.assertEqual('foo', ret[0].key) self.assertEqual('bar', ret[0].value) + +class TestUpdateSshConfigLines(test_helpers.CiTestCase): + """Test the update_ssh_config_lines method.""" + exlines = [ + "#PasswordAuthentication yes", + "UsePAM yes", + "# Comment line", + "AcceptEnv LANG LC_*", + "X11Forwarding no", + ] + pwauth = "PasswordAuthentication" + + def check_line(self, line, opt, val): + self.assertEqual(line.key, opt.lower()) + self.assertEqual(line.value, val) + self.assertIn(opt, str(line)) + self.assertIn(val, str(line)) + + def test_new_option_added(self): + """A single update of non-existing option.""" + lines = ssh_util.parse_ssh_config_lines(list(self.exlines)) + result = ssh_util.update_ssh_config_lines(lines, {'MyKey': 'MyVal'}) + self.assertEqual(['MyKey'], result) + self.check_line(lines[-1], "MyKey", "MyVal") + + def test_commented_out_not_updated_but_appended(self): + """Implementation does not un-comment and update lines.""" + lines = ssh_util.parse_ssh_config_lines(list(self.exlines)) + result = ssh_util.update_ssh_config_lines(lines, {self.pwauth: "no"}) + self.assertEqual([self.pwauth], result) + self.check_line(lines[-1], self.pwauth, "no") + + def test_single_option_updated(self): + """A single update should have change made and line updated.""" + opt, val = ("UsePAM", "no") + lines = ssh_util.parse_ssh_config_lines(list(self.exlines)) + result = ssh_util.update_ssh_config_lines(lines, {opt: val}) + self.assertEqual([opt], result) + self.check_line(lines[1], opt, val) + + def test_multiple_updates_with_add(self): + """Verify multiple updates some added some changed, some not.""" + updates = {"UsePAM": "no", "X11Forwarding": "no", "NewOpt": "newval", + "AcceptEnv": "LANG ADD LC_*"} + lines = ssh_util.parse_ssh_config_lines(list(self.exlines)) + result = ssh_util.update_ssh_config_lines(lines, updates) + self.assertEqual(set(["UsePAM", "NewOpt", "AcceptEnv"]), set(result)) + self.check_line(lines[3], "AcceptEnv", updates["AcceptEnv"]) + + def test_return_empty_if_no_changes(self): + """If there are no changes, then return should be empty list.""" + updates = {"UsePAM": "yes"} + lines = ssh_util.parse_ssh_config_lines(list(self.exlines)) + result = ssh_util.update_ssh_config_lines(lines, updates) + self.assertEqual([], result) + self.assertEqual(self.exlines, [str(l) for l in lines]) + + def test_keycase_not_modified(self): + """Original case of key should not be changed on update. + This behavior is to keep original config as much intact as can be.""" + updates = {"usepam": "no"} + lines = ssh_util.parse_ssh_config_lines(list(self.exlines)) + result = ssh_util.update_ssh_config_lines(lines, updates) + self.assertEqual(["usepam"], result) + self.assertEqual("UsePAM no", str(lines[1])) + + +class TestUpdateSshConfig(test_helpers.CiTestCase): + cfgdata = '\n'.join(["#Option val", "MyKey ORIG_VAL", ""]) + + def test_modified(self): + mycfg = self.tmp_path("ssh_config_1") + util.write_file(mycfg, self.cfgdata) + ret = ssh_util.update_ssh_config({"MyKey": "NEW_VAL"}, mycfg) + self.assertTrue(ret) + found = util.load_file(mycfg) + self.assertEqual(self.cfgdata.replace("ORIG_VAL", "NEW_VAL"), found) + # assert there is a newline at end of file (LP: #1677205) + self.assertEqual('\n', found[-1]) + + def test_not_modified(self): + mycfg = self.tmp_path("ssh_config_2") + util.write_file(mycfg, self.cfgdata) + with patch("cloudinit.ssh_util.util.write_file") as m_write_file: + ret = ssh_util.update_ssh_config({"MyKey": "ORIG_VAL"}, mycfg) + self.assertFalse(ret) + self.assertEqual(self.cfgdata, util.load_file(mycfg)) + m_write_file.assert_not_called() + + # vi: ts=4 expandtab -- cgit v1.2.3 From 5037252ca5dc54c6d978b23dba063ac5fabc98fa Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 20 Apr 2018 20:25:48 -0400 Subject: schema: in validation, raise ImportError if strict but no jsonschema. validate_cloudconfig_schema with strict=True would not actually validate if there was no jsonschema available. That seems kind of strange. The change here is to make it raise an exception if strict was passed in. And then to fix the one test that needed a skipIfJsonSchema wrapper. --- cloudinit/config/tests/test_snap.py | 41 +++++++----------- cloudinit/config/tests/test_ubuntu_advantage.py | 30 +++++++++++++- cloudinit/tests/helpers.py | 19 +++++++++ .../unittests/test_handler/test_handler_bootcmd.py | 42 +++++++++---------- .../unittests/test_handler/test_handler_runcmd.py | 48 +++++++++++----------- 5 files changed, 104 insertions(+), 76 deletions(-) (limited to 'tests') diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py index 492d2d46..34c80f1e 100644 --- a/cloudinit/config/tests/test_snap.py +++ b/cloudinit/config/tests/test_snap.py @@ -9,7 +9,7 @@ from cloudinit.config.cc_snap import ( from cloudinit.config.schema import validate_cloudconfig_schema from cloudinit import util from cloudinit.tests.helpers import ( - CiTestCase, mock, wrap_and_call, skipUnlessJsonSchema) + CiTestCase, SchemaTestCaseMixin, mock, wrap_and_call, skipUnlessJsonSchema) SYSTEM_USER_ASSERTION = """\ @@ -245,9 +245,10 @@ class TestRunCommands(CiTestCase): @skipUnlessJsonSchema() -class TestSchema(CiTestCase): +class TestSchema(CiTestCase, SchemaTestCaseMixin): with_logs = True + schema = schema def test_schema_warns_on_snap_not_as_dict(self): """If the snap configuration is not a dict, emit a warning.""" @@ -342,39 +343,27 @@ class TestSchema(CiTestCase): def test_duplicates_are_fine_array_array(self): """Duplicated commands array/array entries are allowed.""" - byebye = ["echo", "bye"] - try: - cfg = {'snap': {'commands': [byebye, byebye]}} - validate_cloudconfig_schema(cfg, schema, strict=True) - except schema.SchemaValidationError as e: - self.fail("command entries can be duplicate.") + self.assertSchemaValid( + {'commands': [["echo", "bye"], ["echo" "bye"]]}, + "command entries can be duplicate.") def test_duplicates_are_fine_array_string(self): """Duplicated commands array/string entries are allowed.""" - byebye = "echo bye" - try: - cfg = {'snap': {'commands': [byebye, byebye]}} - validate_cloudconfig_schema(cfg, schema, strict=True) - except schema.SchemaValidationError as e: - self.fail("command entries can be duplicate.") + self.assertSchemaValid( + {'commands': ["echo bye", "echo bye"]}, + "command entries can be duplicate.") def test_duplicates_are_fine_dict_array(self): """Duplicated commands dict/array entries are allowed.""" - byebye = ["echo", "bye"] - try: - cfg = {'snap': {'commands': {'00': byebye, '01': byebye}}} - validate_cloudconfig_schema(cfg, schema, strict=True) - except schema.SchemaValidationError as e: - self.fail("command entries can be duplicate.") + self.assertSchemaValid( + {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}}, + "command entries can be duplicate.") def test_duplicates_are_fine_dict_string(self): """Duplicated commands dict/string entries are allowed.""" - byebye = "echo bye" - try: - cfg = {'snap': {'commands': {'00': byebye, '01': byebye}}} - validate_cloudconfig_schema(cfg, schema, strict=True) - except schema.SchemaValidationError as e: - self.fail("command entries can be duplicate.") + self.assertSchemaValid( + {'commands': {'00': "echo bye", '01': "echo bye"}}, + "command entries can be duplicate.") class TestHandle(CiTestCase): diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py index f2a59faf..f1beeff8 100644 --- a/cloudinit/config/tests/test_ubuntu_advantage.py +++ b/cloudinit/config/tests/test_ubuntu_advantage.py @@ -7,7 +7,8 @@ from cloudinit.config.cc_ubuntu_advantage import ( handle, maybe_install_ua_tools, run_commands, schema) from cloudinit.config.schema import validate_cloudconfig_schema from cloudinit import util -from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema +from cloudinit.tests.helpers import ( + CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema) # Module path used in mocks @@ -105,9 +106,10 @@ class TestRunCommands(CiTestCase): @skipUnlessJsonSchema() -class TestSchema(CiTestCase): +class TestSchema(CiTestCase, SchemaTestCaseMixin): with_logs = True + schema = schema def test_schema_warns_on_ubuntu_advantage_not_as_dict(self): """If ubuntu-advantage configuration is not a dict, emit a warning.""" @@ -169,6 +171,30 @@ class TestSchema(CiTestCase): {'ubuntu-advantage': {'commands': {'01': 'also valid'}}}, schema) self.assertEqual('', self.logs.getvalue()) + def test_duplicates_are_fine_array_array(self): + """Duplicated commands array/array entries are allowed.""" + self.assertSchemaValid( + {'commands': [["echo", "bye"], ["echo" "bye"]]}, + "command entries can be duplicate.") + + def test_duplicates_are_fine_array_string(self): + """Duplicated commands array/string entries are allowed.""" + self.assertSchemaValid( + {'commands': ["echo bye", "echo bye"]}, + "command entries can be duplicate.") + + def test_duplicates_are_fine_dict_array(self): + """Duplicated commands dict/array entries are allowed.""" + self.assertSchemaValid( + {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}}, + "command entries can be duplicate.") + + def test_duplicates_are_fine_dict_string(self): + """Duplicated commands dict/string entries are allowed.""" + self.assertSchemaValid( + {'commands': {'00': "echo bye", '01': "echo bye"}}, + "command entries can be duplicate.") + class TestHandle(CiTestCase): diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 5aada6e7..4999f1f6 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -24,6 +24,8 @@ try: except ImportError: from ConfigParser import ConfigParser +from cloudinit.config.schema import ( + SchemaValidationError, validate_cloudconfig_schema) from cloudinit import helpers as ch from cloudinit import util @@ -312,6 +314,23 @@ class HttprettyTestCase(CiTestCase): super(HttprettyTestCase, self).tearDown() +class SchemaTestCaseMixin(unittest2.TestCase): + + def assertSchemaValid(self, cfg, msg="Valid Schema failed validation."): + """Assert the config is valid per self.schema. + + If there is only one top level key in the schema properties, then + the cfg will be put under that key.""" + props = list(self.schema.get('properties')) + # put cfg under top level key if there is only one in the schema + if len(props) == 1: + cfg = {props[0]: cfg} + try: + validate_cloudconfig_schema(cfg, self.schema, strict=True) + except SchemaValidationError: + self.fail(msg) + + def populate_dir(path, files): if not os.path.exists(path): os.makedirs(path) diff --git a/tests/unittests/test_handler/test_handler_bootcmd.py b/tests/unittests/test_handler/test_handler_bootcmd.py index c3abedde..b1375269 100644 --- a/tests/unittests/test_handler/test_handler_bootcmd.py +++ b/tests/unittests/test_handler/test_handler_bootcmd.py @@ -1,9 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.config import cc_bootcmd, schema +from cloudinit.config.cc_bootcmd import handle, schema from cloudinit.sources import DataSourceNone from cloudinit import (distros, helpers, cloud, util) -from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema +from cloudinit.tests.helpers import ( + CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema) import logging import tempfile @@ -50,7 +51,7 @@ class TestBootcmd(CiTestCase): """When the provided config doesn't contain bootcmd, skip it.""" cfg = {} mycloud = self._get_cloud('ubuntu') - cc_bootcmd.handle('notimportant', cfg, mycloud, LOG, None) + handle('notimportant', cfg, mycloud, LOG, None) self.assertIn( "Skipping module named notimportant, no 'bootcmd' key", self.logs.getvalue()) @@ -60,7 +61,7 @@ class TestBootcmd(CiTestCase): invalid_config = {'bootcmd': 1} cc = self._get_cloud('ubuntu') with self.assertRaises(TypeError) as context_manager: - cc_bootcmd.handle('cc_bootcmd', invalid_config, cc, LOG, []) + handle('cc_bootcmd', invalid_config, cc, LOG, []) self.assertIn('Failed to shellify bootcmd', self.logs.getvalue()) self.assertEqual( "Input to shellify was type 'int'. Expected list or tuple.", @@ -76,7 +77,7 @@ class TestBootcmd(CiTestCase): invalid_config = {'bootcmd': 1} cc = self._get_cloud('ubuntu') with self.assertRaises(TypeError): - cc_bootcmd.handle('cc_bootcmd', invalid_config, cc, LOG, []) + handle('cc_bootcmd', invalid_config, cc, LOG, []) self.assertIn( 'Invalid config:\nbootcmd: 1 is not of type \'array\'', self.logs.getvalue()) @@ -93,7 +94,7 @@ class TestBootcmd(CiTestCase): 'bootcmd': ['ls /', 20, ['wget', 'http://stuff/blah'], {'a': 'n'}]} cc = self._get_cloud('ubuntu') with self.assertRaises(TypeError) as context_manager: - cc_bootcmd.handle('cc_bootcmd', invalid_config, cc, LOG, []) + handle('cc_bootcmd', invalid_config, cc, LOG, []) expected_warnings = [ 'bootcmd.1: 20 is not valid under any of the given schemas', 'bootcmd.3: {\'a\': \'n\'} is not valid under any of the given' @@ -117,7 +118,7 @@ class TestBootcmd(CiTestCase): 'echo {0} $INSTANCE_ID > {1}'.format(my_id, out_file)]} with mock.patch(self._etmpfile_path, FakeExtendedTempFile): - cc_bootcmd.handle('cc_bootcmd', valid_config, cc, LOG, []) + handle('cc_bootcmd', valid_config, cc, LOG, []) self.assertEqual(my_id + ' iid-datasource-none\n', util.load_file(out_file)) @@ -128,7 +129,7 @@ class TestBootcmd(CiTestCase): with mock.patch(self._etmpfile_path, FakeExtendedTempFile): with self.assertRaises(util.ProcessExecutionError) as ctxt_manager: - cc_bootcmd.handle('does-not-matter', valid_config, cc, LOG, []) + handle('does-not-matter', valid_config, cc, LOG, []) self.assertIn( 'Unexpected error while running command.\n' "Command: ['/bin/sh',", @@ -138,26 +139,21 @@ class TestBootcmd(CiTestCase): self.logs.getvalue()) -class TestSchema(CiTestCase): +@skipUnlessJsonSchema() +class TestSchema(CiTestCase, SchemaTestCaseMixin): """Directly test schema rather than through handle.""" + schema = schema + def test_duplicates_are_fine_array_array(self): - """Duplicated array entries are allowed.""" - try: - byebye = ["echo", "bye"] - schema.validate_cloudconfig_schema( - {'bootcmd': [byebye, byebye]}, cc_bootcmd.schema, strict=True) - except schema.SchemaValidationError as e: - self.fail("runcmd entries as list are allowed to be duplicates.") + """Duplicated commands array/array entries are allowed.""" + self.assertSchemaValid( + ["byebye", "byebye"], 'command entries can be duplicate') def test_duplicates_are_fine_array_string(self): - """Duplicated array entries entries are allowed in values of array.""" - try: - byebye = "echo bye" - schema.validate_cloudconfig_schema( - {'bootcmd': [byebye, byebye]}, cc_bootcmd.schema, strict=True) - except schema.SchemaValidationError as e: - self.fail("runcmd entries are allowed to be duplicates.") + """Duplicated commands array/string entries are allowed.""" + self.assertSchemaValid( + ["echo bye", "echo bye"], "command entries can be duplicate.") # vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_runcmd.py b/tests/unittests/test_handler/test_handler_runcmd.py index ee1981d1..9ce334ac 100644 --- a/tests/unittests/test_handler/test_handler_runcmd.py +++ b/tests/unittests/test_handler/test_handler_runcmd.py @@ -1,10 +1,11 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.config import cc_runcmd, schema +from cloudinit.config.cc_runcmd import handle, schema from cloudinit.sources import DataSourceNone from cloudinit import (distros, helpers, cloud, util) from cloudinit.tests.helpers import ( - CiTestCase, FilesystemMockingTestCase, skipUnlessJsonSchema) + CiTestCase, FilesystemMockingTestCase, SchemaTestCaseMixin, + skipUnlessJsonSchema) import logging import os @@ -35,7 +36,7 @@ class TestRuncmd(FilesystemMockingTestCase): """When the provided config doesn't contain runcmd, skip it.""" cfg = {} mycloud = self._get_cloud('ubuntu') - cc_runcmd.handle('notimportant', cfg, mycloud, LOG, None) + handle('notimportant', cfg, mycloud, LOG, None) self.assertIn( "Skipping module named notimportant, no 'runcmd' key", self.logs.getvalue()) @@ -44,7 +45,7 @@ class TestRuncmd(FilesystemMockingTestCase): """Commands which can't be converted to shell will raise errors.""" invalid_config = {'runcmd': 1} cc = self._get_cloud('ubuntu') - cc_runcmd.handle('cc_runcmd', invalid_config, cc, LOG, []) + handle('cc_runcmd', invalid_config, cc, LOG, []) self.assertIn( 'Failed to shellify 1 into file' ' /var/lib/cloud/instances/iid-datasource-none/scripts/runcmd', @@ -59,7 +60,7 @@ class TestRuncmd(FilesystemMockingTestCase): """ invalid_config = {'runcmd': 1} cc = self._get_cloud('ubuntu') - cc_runcmd.handle('cc_runcmd', invalid_config, cc, LOG, []) + handle('cc_runcmd', invalid_config, cc, LOG, []) self.assertIn( 'Invalid config:\nruncmd: 1 is not of type \'array\'', self.logs.getvalue()) @@ -75,7 +76,7 @@ class TestRuncmd(FilesystemMockingTestCase): invalid_config = { 'runcmd': ['ls /', 20, ['wget', 'http://stuff/blah'], {'a': 'n'}]} cc = self._get_cloud('ubuntu') - cc_runcmd.handle('cc_runcmd', invalid_config, cc, LOG, []) + handle('cc_runcmd', invalid_config, cc, LOG, []) expected_warnings = [ 'runcmd.1: 20 is not valid under any of the given schemas', 'runcmd.3: {\'a\': \'n\'} is not valid under any of the given' @@ -90,7 +91,7 @@ class TestRuncmd(FilesystemMockingTestCase): """Valid runcmd schema is written to a runcmd shell script.""" valid_config = {'runcmd': [['ls', '/']]} cc = self._get_cloud('ubuntu') - cc_runcmd.handle('cc_runcmd', valid_config, cc, LOG, []) + handle('cc_runcmd', valid_config, cc, LOG, []) runcmd_file = os.path.join( self.new_root, 'var/lib/cloud/instances/iid-datasource-none/scripts/runcmd') @@ -99,25 +100,22 @@ class TestRuncmd(FilesystemMockingTestCase): self.assertEqual(0o700, stat.S_IMODE(file_stat.st_mode)) -class TestSchema(CiTestCase): +@skipUnlessJsonSchema() +class TestSchema(CiTestCase, SchemaTestCaseMixin): """Directly test schema rather than through handle.""" - def test_duplicates_are_fine_array(self): - """Duplicated array entries are allowed.""" - try: - byebye = ["echo", "bye"] - schema.validate_cloudconfig_schema( - {'runcmd': [byebye, byebye]}, cc_runcmd.schema, strict=True) - except schema.SchemaValidationError as e: - self.fail("runcmd entries as list are allowed to be duplicates.") - - def test_duplicates_are_fine_string(self): - """Duplicated string entries are allowed.""" - try: - byebye = "echo bye" - schema.validate_cloudconfig_schema( - {'runcmd': [byebye, byebye]}, cc_runcmd.schema, strict=True) - except schema.SchemaValidationError as e: - self.fail("runcmd entries are allowed to be duplicates.") + schema = schema + + def test_duplicates_are_fine_array_array(self): + """Duplicated commands array/array entries are allowed.""" + self.assertSchemaValid( + [["echo", "bye"], ["echo", "bye"]], + "command entries can be duplicate.") + + def test_duplicates_are_fine_array_string(self): + """Duplicated commands array/string entries are allowed.""" + self.assertSchemaValid( + ["echo bye", "echo bye"], + "command entries can be duplicate.") # vi: ts=4 expandtab -- cgit v1.2.3 From 8e111502a0f9d437ff6045f1269c95e5756fc43b Mon Sep 17 00:00:00 2001 From: Mike Gerdts Date: Fri, 20 Apr 2018 20:30:44 -0400 Subject: DataSourceSmartOS: list() should always return a list If customer_metadata has no keys, the KEYS request returns an empty string. Callers of the list() method expect a list to be returned and will give a stack trace if this expectation is not met. LP: #1763480 --- cloudinit/sources/DataSourceSmartOS.py | 6 +++--- tests/unittests/test_datasource/test_smartos.py | 26 +++++++++++++++++++++++-- 2 files changed, 27 insertions(+), 5 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index c8998b40..d4386fec 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -455,9 +455,9 @@ class JoyentMetadataClient(object): def list(self): result = self.request(rtype='KEYS') - if result: - result = result.split('\n') - return result + if not result: + return [] + return result.split('\n') def put(self, key, val): param = b' '.join([base64.b64encode(i.encode()) diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 2bea7a17..6fd431f9 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -319,6 +319,12 @@ MOCK_RETURNS = { DMI_DATA_RETURN = 'smartdc' +# Useful for calculating the length of a frame body. A SUCCESS body will be +# followed by more characters or be one character less if SUCCESS with no +# payload. See Section 4.3 of https://eng.joyent.com/mdata/protocol.html. +SUCCESS_LEN = len('0123abcd SUCCESS ') +NOTFOUND_LEN = len('0123abcd NOTFOUND') + class PsuedoJoyentClient(object): def __init__(self, data=None): @@ -651,7 +657,7 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase): self.response_parts = { 'command': 'SUCCESS', 'crc': 'b5a9ff00', - 'length': 17 + len(b64e(self.metadata_value)), + 'length': SUCCESS_LEN + len(b64e(self.metadata_value)), 'payload': b64e(self.metadata_value), 'request_id': '{0:08x}'.format(self.request_id), } @@ -787,7 +793,7 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase): def test_get_metadata_returns_None_if_value_not_found(self): self.response_parts['payload'] = '' self.response_parts['command'] = 'NOTFOUND' - self.response_parts['length'] = 17 + self.response_parts['length'] = NOTFOUND_LEN client = self._get_client() client._checksum = lambda _: self.response_parts['crc'] self.assertIsNone(client.get('some_key')) @@ -838,6 +844,22 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase): client.open_transport() self.assertTrue(reader.emptied) + def test_list_metadata_returns_list(self): + parts = ['foo', 'bar'] + value = b64e('\n'.join(parts)) + self.response_parts['payload'] = value + self.response_parts['crc'] = '40873553' + self.response_parts['length'] = SUCCESS_LEN + len(value) + client = self._get_client() + self.assertEqual(client.list(), parts) + + def test_list_metadata_returns_empty_list_if_no_customer_metadata(self): + del self.response_parts['payload'] + self.response_parts['length'] = SUCCESS_LEN - 1 + self.response_parts['crc'] = '14e563ba' + client = self._get_client() + self.assertEqual(client.list(), []) + class TestNetworkConversion(TestCase): def test_convert_simple(self): -- cgit v1.2.3 From 23479881f51bae7a3f5743ce677ed82317ea8b9f Mon Sep 17 00:00:00 2001 From: Mike Gerdts Date: Fri, 20 Apr 2018 20:56:36 -0400 Subject: DataSourceSmartOS: sdc:hostname is ignored There are three potential sources of the hostname, one of which is documented SmartOS's vmadm(1M) via the hostname property. That property's value is retrieved via the sdc:hostname key. The other two sources for the hostname are a hostname key in customer_metadata and the VM's uuid (sdc:uuid). Of these three, the sdc:hostname value is not used in a meaningful way by DataSourceSmartOS. This fix changes the fallback mechanism when hostname is not specified in customer_metadata. The order of precedence for setting the hostname is now 1) hostname in customer_metadata, 2) sdc:hostname, then 3) sdc:uuid. LP: #1765085 --- cloudinit/sources/DataSourceSmartOS.py | 10 +++++++-- tests/unittests/test_datasource/test_smartos.py | 28 +++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 2 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index d4386fec..0ef10035 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -11,7 +11,7 @@ # SmartOS hosts use a serial console (/dev/ttyS1) on KVM Linux Guests # The meta-data is transmitted via key/value pairs made by # requests on the console. For example, to get the hostname, you -# would send "GET hostname" on /dev/ttyS1. +# would send "GET sdc:hostname" on /dev/ttyS1. # For Linux Guests running in LX-Brand Zones on SmartOS hosts # a socket (/native/.zonecontrol/metadata.sock) is used instead # of a serial console. @@ -273,8 +273,14 @@ class DataSourceSmartOS(sources.DataSource): write_boot_content(u_data, u_data_f) # Handle the cloud-init regular meta + + # The hostname may or may not be qualified with the local domain name. + # This follows section 3.14 of RFC 2132. if not md['local-hostname']: - md['local-hostname'] = md['instance-id'] + if md['hostname']: + md['local-hostname'] = md['hostname'] + else: + md['local-hostname'] = md['instance-id'] ud = None if md['user-data']: diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 6fd431f9..b926263f 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -437,6 +437,34 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): self.assertEqual(MOCK_RETURNS['hostname'], dsrc.metadata['local-hostname']) + def test_hostname_if_no_sdc_hostname(self): + my_returns = MOCK_RETURNS.copy() + my_returns['sdc:hostname'] = 'sdc-' + my_returns['hostname'] + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(my_returns['hostname'], + dsrc.metadata['local-hostname']) + + def test_sdc_hostname_if_no_hostname(self): + my_returns = MOCK_RETURNS.copy() + my_returns['sdc:hostname'] = 'sdc-' + my_returns['hostname'] + del my_returns['hostname'] + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(my_returns['sdc:hostname'], + dsrc.metadata['local-hostname']) + + def test_sdc_uuid_if_no_hostname_or_sdc_hostname(self): + my_returns = MOCK_RETURNS.copy() + del my_returns['hostname'] + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(my_returns['sdc:uuid'], + dsrc.metadata['local-hostname']) + def test_userdata(self): dsrc = self._get_ds(mockdata=MOCK_RETURNS) ret = dsrc.get_data() -- cgit v1.2.3 From 4ed164592fe8cb15758cacf3cb3f8c7d5ab7c82e Mon Sep 17 00:00:00 2001 From: Mike Gerdts Date: Mon, 23 Apr 2018 09:29:39 -0400 Subject: DataSourceSmartOS: add locking of serial device. cloud-init and mdata-get each have their own implementation of the SmartOS metadata protocol. If cloud-init and other services that call mdata-get are run concurrently, crosstalk on the serial port can cause them both to become confused. This change makes it so that cloud-init uses the same cooperative locking scheme that's used by mdata-get, thus preventing cross-talk between mdata-get and cloud-init. For testing, a VM running on a SmartOS host and pyserial are required. If the tests are run on a platform other than SmartOS, those that use a real serial port are skipped. pyserial remains commented in requirements.txt because most testers will not be running atop SmartOS. LP: #1746605 --- cloudinit/sources/DataSourceSmartOS.py | 2 + tests/unittests/test_datasource/test_smartos.py | 67 ++++++++++++++++++++++++- 2 files changed, 67 insertions(+), 2 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 0ef10035..4ea00eb1 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -23,6 +23,7 @@ import base64 import binascii import errno +import fcntl import json import os import random @@ -526,6 +527,7 @@ class JoyentMetadataSerialClient(JoyentMetadataClient): if not ser.isOpen(): raise SystemError("Unable to open %s" % self.device) self.fp = ser + fcntl.lockf(ser, fcntl.LOCK_EX) self._flush() self._negotiate() diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index b926263f..706e8eb8 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -16,23 +16,27 @@ from __future__ import print_function from binascii import crc32 import json +import multiprocessing import os import os.path import re import shutil +import signal import stat import tempfile +import unittest2 import uuid from cloudinit import serial from cloudinit.sources import DataSourceSmartOS from cloudinit.sources.DataSourceSmartOS import ( - convert_smartos_network_data as convert_net) + convert_smartos_network_data as convert_net, + SMARTOS_ENV_KVM, SERIAL_DEVICE, get_smartos_environ) import six from cloudinit import helpers as c_helpers -from cloudinit.util import b64e +from cloudinit.util import (b64e, subp) from cloudinit.tests.helpers import mock, FilesystemMockingTestCase, TestCase @@ -1023,4 +1027,63 @@ class TestNetworkConversion(TestCase): found = convert_net(SDC_NICS_SINGLE_GATEWAY) self.assertEqual(expected, found) + +@unittest2.skipUnless(get_smartos_environ() == SMARTOS_ENV_KVM, + "Only supported on KVM and bhyve guests under SmartOS") +@unittest2.skipUnless(os.access(SERIAL_DEVICE, os.W_OK), + "Requires write access to " + SERIAL_DEVICE) +class TestSerialConcurrency(TestCase): + """ + This class tests locking on an actual serial port, and as such can only + be run in a kvm or bhyve guest running on a SmartOS host. A test run on + a metadata socket will not be valid because a metadata socket ensures + there is only one session over a connection. In contrast, in the + absence of proper locking multiple processes opening the same serial + port can corrupt each others' exchanges with the metadata server. + """ + def setUp(self): + self.mdata_proc = multiprocessing.Process(target=self.start_mdata_loop) + self.mdata_proc.start() + super(TestSerialConcurrency, self).setUp() + + def tearDown(self): + # os.kill() rather than mdata_proc.terminate() to avoid console spam. + os.kill(self.mdata_proc.pid, signal.SIGKILL) + self.mdata_proc.join() + super(TestSerialConcurrency, self).tearDown() + + def start_mdata_loop(self): + """ + The mdata-get command is repeatedly run in a separate process so + that it may try to race with metadata operations performed in the + main test process. Use of mdata-get is better than two processes + using the protocol implementation in DataSourceSmartOS because we + are testing to be sure that cloud-init and mdata-get respect each + others locks. + """ + rcs = list(range(0, 256)) + while True: + subp(['mdata-get', 'sdc:routes'], rcs=rcs) + + def test_all_keys(self): + self.assertIsNotNone(self.mdata_proc.pid) + ds = DataSourceSmartOS + keys = [tup[0] for tup in ds.SMARTOS_ATTRIB_MAP.values()] + keys.extend(ds.SMARTOS_ATTRIB_JSON.values()) + + client = ds.jmc_client_factory() + self.assertIsNotNone(client) + + # The behavior that we are testing for was observed mdata-get running + # 10 times at roughly the same time as cloud-init fetched each key + # once. cloud-init would regularly see failures before making it + # through all keys once. + for _ in range(0, 3): + for key in keys: + # We don't care about the return value, just that it doesn't + # thrown any exceptions. + client.get(key) + + self.assertIsNone(self.mdata_proc.exitcode) + # vi: ts=4 expandtab -- cgit v1.2.3 From a57928d3c314d9568712cd190cb1e721e14c108b Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Wed, 25 Apr 2018 16:18:27 -0400 Subject: sysconfig: dhcp6 subnet type should not imply dhcpv4 BOOTPROTO=dhcp in sysconfig enables DHCPv4 and we should not do this implicitly when 'dhcp6' subnet is specified. In case both dhcpv4 and dhcpv6 are needed users should specify both: subnets: - type: dhcp6 - type: dhcp Fix the current code and add a dhcpv6 only test. Signed-off-by: Vitaly Kuznetsov --- cloudinit/net/sysconfig.py | 1 - tests/unittests/test_net.py | 59 ++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 58 insertions(+), 2 deletions(-) (limited to 'tests') diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 7a7f5093..e53b9f1b 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -287,7 +287,6 @@ class Renderer(renderer.Renderer): if subnet_type == 'dhcp6': iface_cfg['IPV6INIT'] = True iface_cfg['DHCPV6C'] = True - iface_cfg['BOOTPROTO'] = 'dhcp' elif subnet_type in ['dhcp4', 'dhcp']: iface_cfg['BOOTPROTO'] = 'dhcp' elif subnet_type == 'static': diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index c12a487a..84839fd7 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -553,6 +553,43 @@ NETWORK_CONFIGS = { """), }, }, + 'dhcpv6_only': { + 'expected_eni': textwrap.dedent("""\ + auto lo + iface lo inet loopback + + auto iface0 + iface iface0 inet6 dhcp + """).rstrip(' '), + 'expected_netplan': textwrap.dedent(""" + network: + version: 2 + ethernets: + iface0: + dhcp6: true + """).rstrip(' '), + 'yaml': textwrap.dedent("""\ + version: 1 + config: + - type: 'physical' + name: 'iface0' + subnets: + - {'type': 'dhcp6'} + """).rstrip(' '), + 'expected_sysconfig': { + 'ifcfg-iface0': textwrap.dedent("""\ + BOOTPROTO=none + DEVICE=iface0 + DHCPV6C=yes + IPV6INIT=yes + DEVICE=iface0 + NM_CONTROLLED=no + ONBOOT=yes + TYPE=Ethernet + USERCTL=no + """), + }, + }, 'all': { 'expected_eni': ("""\ auto lo @@ -740,7 +777,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true """miimon=100" BONDING_SLAVE0=eth1 BONDING_SLAVE1=eth2 - BOOTPROTO=dhcp + BOOTPROTO=none DEVICE=bond0 DHCPV6C=yes IPV6INIT=yes @@ -1829,6 +1866,12 @@ USERCTL=no self._compare_files_to_expected(entry['expected_sysconfig'], found) self._assert_headers(found) + def test_dhcpv6_only_config(self): + entry = NETWORK_CONFIGS['dhcpv6_only'] + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._assert_headers(found) + class TestEniNetRendering(CiTestCase): @@ -2277,6 +2320,13 @@ class TestNetplanRoundTrip(CiTestCase): entry['expected_netplan'].splitlines(), files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + def testsimple_render_dhcpv6_only(self): + entry = NETWORK_CONFIGS['dhcpv6_only'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + def testsimple_render_all(self): entry = NETWORK_CONFIGS['all'] files = self._render_and_read(network_config=yaml.load(entry['yaml'])) @@ -2345,6 +2395,13 @@ class TestEniRoundTrip(CiTestCase): entry['expected_eni'].splitlines(), files['/etc/network/interfaces'].splitlines()) + def testsimple_render_dhcpv6_only(self): + entry = NETWORK_CONFIGS['dhcpv6_only'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self.assertEqual( + entry['expected_eni'].splitlines(), + files['/etc/network/interfaces'].splitlines()) + def testsimple_render_v4_and_v6_static(self): entry = NETWORK_CONFIGS['v4_and_v6_static'] files = self._render_and_read(network_config=yaml.load(entry['yaml'])) -- cgit v1.2.3 From 4731c8da25ee9bfbcf0ade1d7ffec95814d8622a Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Thu, 26 Apr 2018 16:35:23 -0400 Subject: net: detect unstable network names and trigger a settle if needed The cloud-init-local.service expects that any network device name changes have already been completed by the kernel or udev daemon. In some situations we've found that the renaming of interfaces from kernel names (eth0, eth1, etc) to their persistent names (eno1, ens3, enp0s1, etc) may happen after cloud-init-local has started where it reads values from sysfs about what network devices are present, and which device to use as a fallback nic. Subsequently, cloud-init-local would write out network configuration for a kernel device name which would no longer be present by the time that networking services start to bring up the devices. The result is that the instance does not get networking configured. Prior to use of systemd-networkd, the Ubuntu 'networking.service' unit included a call to udevadm settle which is why this race is not seen on a Xenial system. This change adds the ability to detect if an interface has a stable name, if if we find one without stable names and stable names have not been disabled (net.ifnames=0 in /proc/cmdline), then cloud-init will invoke udevadm settle. LP: #1766287 --- cloudinit/config/cc_disk_setup.py | 12 ++---- cloudinit/net/__init__.py | 26 ++++++++++++ cloudinit/net/tests/test_init.py | 1 + cloudinit/sources/DataSourceAltCloud.py | 5 +-- cloudinit/tests/test_util.py | 49 ++++++++++++++++++++++ cloudinit/util.py | 15 +++++++ tests/unittests/test_net.py | 73 +++++++++++++++++++++++++++++++-- 7 files changed, 165 insertions(+), 16 deletions(-) (limited to 'tests') diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index c3e8c484..943089e0 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -680,13 +680,13 @@ def read_parttbl(device): reliable way to probe the partition table. """ blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device] - udevadm_settle() + util.udevadm_settle() try: util.subp(blkdev_cmd) except Exception as e: util.logexc(LOG, "Failed reading the partition table %s" % e) - udevadm_settle() + util.udevadm_settle() def exec_mkpart_mbr(device, layout): @@ -737,14 +737,10 @@ def exec_mkpart(table_type, device, layout): return get_dyn_func("exec_mkpart_%s", table_type, device, layout) -def udevadm_settle(): - util.subp(['udevadm', 'settle']) - - def assert_and_settle_device(device): """Assert that device exists and settle so it is fully recognized.""" if not os.path.exists(device): - udevadm_settle() + util.udevadm_settle() if not os.path.exists(device): raise RuntimeError("Device %s did not exist and was not created " "with a udevamd settle." % device) @@ -752,7 +748,7 @@ def assert_and_settle_device(device): # Whether or not the device existed above, it is possible that udev # events that would populate udev database (for reading by lsdname) have # not yet finished. So settle again. - udevadm_settle() + util.udevadm_settle() def mkpart(device, definition): diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 80054546..43226bd0 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -107,6 +107,21 @@ def is_bond(devname): return os.path.exists(sys_dev_path(devname, "bonding")) +def is_renamed(devname): + """ + /* interface name assignment types (sysfs name_assign_type attribute) */ + #define NET_NAME_UNKNOWN 0 /* unknown origin (not exposed to user) */ + #define NET_NAME_ENUM 1 /* enumerated by kernel */ + #define NET_NAME_PREDICTABLE 2 /* predictably named by the kernel */ + #define NET_NAME_USER 3 /* provided by user-space */ + #define NET_NAME_RENAMED 4 /* renamed by user-space */ + """ + name_assign_type = read_sys_net_safe(devname, 'name_assign_type') + if name_assign_type and name_assign_type in ['3', '4']: + return True + return False + + def is_vlan(devname): uevent = str(read_sys_net_safe(devname, "uevent")) return 'DEVTYPE=vlan' in uevent.splitlines() @@ -180,6 +195,17 @@ def find_fallback_nic(blacklist_drivers=None): if not blacklist_drivers: blacklist_drivers = [] + if 'net.ifnames=0' in util.get_cmdline(): + LOG.debug('Stable ifnames disabled by net.ifnames=0 in /proc/cmdline') + else: + unstable = [device for device in get_devicelist() + if device != 'lo' and not is_renamed(device)] + if len(unstable): + LOG.debug('Found unstable nic names: %s; calling udevadm settle', + unstable) + msg = 'Waiting for udev events to settle' + util.log_time(LOG.debug, msg, func=util.udevadm_settle) + # get list of interfaces that could have connections invalid_interfaces = set(['lo']) potential_interfaces = set([device for device in get_devicelist() diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py index 276556ee..5c017d15 100644 --- a/cloudinit/net/tests/test_init.py +++ b/cloudinit/net/tests/test_init.py @@ -199,6 +199,7 @@ class TestGenerateFallbackConfig(CiTestCase): self.sysdir = self.tmp_dir() + '/' self.m_sys_path.return_value = self.sysdir self.addCleanup(sys_mock.stop) + self.add_patch('cloudinit.net.util.udevadm_settle', 'm_settle') def test_generate_fallback_finds_connected_eth_with_mac(self): """generate_fallback_config finds any connected device with a mac.""" diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index e1d0055b..f6e86f34 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -29,7 +29,6 @@ CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info' # Shell command lists CMD_PROBE_FLOPPY = ['modprobe', 'floppy'] -CMD_UDEVADM_SETTLE = ['udevadm', 'settle', '--timeout=5'] META_DATA_NOT_SUPPORTED = { 'block-device-mapping': {}, @@ -196,9 +195,7 @@ class DataSourceAltCloud(sources.DataSource): # udevadm settle for floppy device try: - cmd = CMD_UDEVADM_SETTLE - cmd.append('--exit-if-exists=' + floppy_dev) - (cmd_out, _err) = util.subp(cmd) + (cmd_out, _err) = util.udevadm_settle(exists=floppy_dev, timeout=5) LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out) except ProcessExecutionError as _err: util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index 76eed076..3c05a437 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -212,4 +212,53 @@ class TestBlkid(CiTestCase): capture=True, decode="replace") +@mock.patch('cloudinit.util.subp') +class TestUdevadmSettle(CiTestCase): + def test_with_no_params(self, m_subp): + """called with no parameters.""" + util.udevadm_settle() + m_subp.called_once_with(mock.call(['udevadm', 'settle'])) + + def test_with_exists_and_not_exists(self, m_subp): + """with exists=file where file does not exist should invoke subp.""" + mydev = self.tmp_path("mydev") + util.udevadm_settle(exists=mydev) + m_subp.called_once_with( + ['udevadm', 'settle', '--exit-if-exists=%s' % mydev]) + + def test_with_exists_and_file_exists(self, m_subp): + """with exists=file where file does exist should not invoke subp.""" + mydev = self.tmp_path("mydev") + util.write_file(mydev, "foo\n") + util.udevadm_settle(exists=mydev) + self.assertIsNone(m_subp.call_args) + + def test_with_timeout_int(self, m_subp): + """timeout can be an integer.""" + timeout = 9 + util.udevadm_settle(timeout=timeout) + m_subp.called_once_with( + ['udevadm', 'settle', '--timeout=%s' % timeout]) + + def test_with_timeout_string(self, m_subp): + """timeout can be a string.""" + timeout = "555" + util.udevadm_settle(timeout=timeout) + m_subp.assert_called_once_with( + ['udevadm', 'settle', '--timeout=%s' % timeout]) + + def test_with_exists_and_timeout(self, m_subp): + """test call with both exists and timeout.""" + mydev = self.tmp_path("mydev") + timeout = "3" + util.udevadm_settle(exists=mydev) + m_subp.called_once_with( + ['udevadm', 'settle', '--exit-if-exists=%s' % mydev, + '--timeout=%s' % timeout]) + + def test_subp_exception_raises_to_caller(self, m_subp): + m_subp.side_effect = util.ProcessExecutionError("BOOM") + self.assertRaises(util.ProcessExecutionError, util.udevadm_settle) + + # vi: ts=4 expandtab diff --git a/cloudinit/util.py b/cloudinit/util.py index 310758dd..2828ca38 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2727,4 +2727,19 @@ def mount_is_read_write(mount_point): mount_opts = result[-1].split(',') return mount_opts[0] == 'rw' + +def udevadm_settle(exists=None, timeout=None): + """Invoke udevadm settle with optional exists and timeout parameters""" + settle_cmd = ["udevadm", "settle"] + if exists: + # skip the settle if the requested path already exists + if os.path.exists(exists): + return + settle_cmd.extend(['--exit-if-exists=%s' % exists]) + if timeout: + settle_cmd.extend(['--timeout=%s' % timeout]) + + return subp(settle_cmd) + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 84839fd7..fac82678 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1442,6 +1442,7 @@ DEFAULT_DEV_ATTRS = { "address": "07-1C-C6-75-A4-BE", "device/driver": None, "device/device": None, + "name_assign_type": "4", } } @@ -1489,11 +1490,14 @@ class TestGenerateFallbackConfig(CiTestCase): 'eth0': { 'bridge': False, 'carrier': False, 'dormant': False, 'operstate': 'down', 'address': '00:11:22:33:44:55', - 'device/driver': 'hv_netsvc', 'device/device': '0x3'}, + 'device/driver': 'hv_netsvc', 'device/device': '0x3', + 'name_assign_type': '4'}, 'eth1': { 'bridge': False, 'carrier': False, 'dormant': False, 'operstate': 'down', 'address': '00:11:22:33:44:55', - 'device/driver': 'mlx4_core', 'device/device': '0x7'}, + 'device/driver': 'mlx4_core', 'device/device': '0x7', + 'name_assign_type': '4'}, + } tmp_dir = self.tmp_dir() @@ -1549,11 +1553,13 @@ iface eth0 inet dhcp 'eth1': { 'bridge': False, 'carrier': False, 'dormant': False, 'operstate': 'down', 'address': '00:11:22:33:44:55', - 'device/driver': 'hv_netsvc', 'device/device': '0x3'}, + 'device/driver': 'hv_netsvc', 'device/device': '0x3', + 'name_assign_type': '4'}, 'eth0': { 'bridge': False, 'carrier': False, 'dormant': False, 'operstate': 'down', 'address': '00:11:22:33:44:55', - 'device/driver': 'mlx4_core', 'device/device': '0x7'}, + 'device/driver': 'mlx4_core', 'device/device': '0x7', + 'name_assign_type': '4'}, } tmp_dir = self.tmp_dir() @@ -1602,6 +1608,65 @@ iface eth1 inet dhcp ] self.assertEqual(", ".join(expected_rule) + '\n', contents.lstrip()) + @mock.patch("cloudinit.util.udevadm_settle") + @mock.patch("cloudinit.net.sys_dev_path") + @mock.patch("cloudinit.net.read_sys_net") + @mock.patch("cloudinit.net.get_devicelist") + def test_unstable_names(self, mock_get_devicelist, mock_read_sys_net, + mock_sys_dev_path, mock_settle): + """verify that udevadm settle is called when we find unstable names""" + devices = { + 'eth0': { + 'bridge': False, 'carrier': False, 'dormant': False, + 'operstate': 'down', 'address': '00:11:22:33:44:55', + 'device/driver': 'hv_netsvc', 'device/device': '0x3', + 'name_assign_type': False}, + 'ens4': { + 'bridge': False, 'carrier': False, 'dormant': False, + 'operstate': 'down', 'address': '00:11:22:33:44:55', + 'device/driver': 'mlx4_core', 'device/device': '0x7', + 'name_assign_type': '4'}, + + } + + tmp_dir = self.tmp_dir() + _setup_test(tmp_dir, mock_get_devicelist, + mock_read_sys_net, mock_sys_dev_path, + dev_attrs=devices) + net.generate_fallback_config(config_driver=True) + self.assertEqual(1, mock_settle.call_count) + + @mock.patch("cloudinit.util.get_cmdline") + @mock.patch("cloudinit.util.udevadm_settle") + @mock.patch("cloudinit.net.sys_dev_path") + @mock.patch("cloudinit.net.read_sys_net") + @mock.patch("cloudinit.net.get_devicelist") + def test_unstable_names_disabled(self, mock_get_devicelist, + mock_read_sys_net, mock_sys_dev_path, + mock_settle, m_get_cmdline): + """verify udevadm settle not called when cmdline has net.ifnames=0""" + devices = { + 'eth0': { + 'bridge': False, 'carrier': False, 'dormant': False, + 'operstate': 'down', 'address': '00:11:22:33:44:55', + 'device/driver': 'hv_netsvc', 'device/device': '0x3', + 'name_assign_type': False}, + 'ens4': { + 'bridge': False, 'carrier': False, 'dormant': False, + 'operstate': 'down', 'address': '00:11:22:33:44:55', + 'device/driver': 'mlx4_core', 'device/device': '0x7', + 'name_assign_type': '4'}, + + } + + m_get_cmdline.return_value = 'net.ifnames=0' + tmp_dir = self.tmp_dir() + _setup_test(tmp_dir, mock_get_devicelist, + mock_read_sys_net, mock_sys_dev_path, + dev_attrs=devices) + net.generate_fallback_config(config_driver=True) + self.assertEqual(0, mock_settle.call_count) + class TestSysConfigRendering(CiTestCase): -- cgit v1.2.3 From 6ef92c98c3d2b127b05d6708337efc8a81e00071 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 26 Apr 2018 16:24:24 -0500 Subject: IBMCloud: recognize provisioning environment during debug boots. When images are deployed from template in a production environment the artifacts of the provisioning stage (provisioningConfiguration.cfg) that cloud-init referenced are cleaned up. However, when provisioned in "debug" mode (internal to IBM) the artifacts are left. This changes the 'is_ibm_provisioning' implementations in both ds-identify and in the IBM datasource to identify the provisioning stage more correctly. The change is to consider provisioning only if the provisioing file existed and there was no log file or the log file was older than this boot. LP: #1767166 --- cloudinit/sources/DataSourceIBMCloud.py | 42 +++++++++----- cloudinit/tests/helpers.py | 13 ++++- tests/unittests/test_datasource/test_ibmcloud.py | 50 ++++++++++++++++ tests/unittests/test_ds_identify.py | 72 +++++++++++++++++++++--- tools/ds-identify | 21 ++++++- 5 files changed, 175 insertions(+), 23 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py index cfa724bf..01106ec0 100644 --- a/cloudinit/sources/DataSourceIBMCloud.py +++ b/cloudinit/sources/DataSourceIBMCloud.py @@ -8,17 +8,11 @@ There are 2 different api exposed launch methods. * template: This is the legacy method of launching instances. When booting from an image template, the system boots first into a "provisioning" mode. There, host <-> guest mechanisms are utilized - to execute code in the guest and provision it. + to execute code in the guest and configure it. The configuration + includes configuring the system network and possibly installing + packages and other software stack. - Cloud-init will disable itself when it detects that it is in the - provisioning mode. It detects this by the presence of - a file '/root/provisioningConfiguration.cfg'. - - When provided with user-data, the "first boot" will contain a - ConfigDrive-like disk labeled with 'METADATA'. If there is no user-data - provided, then there is no data-source. - - Cloud-init never does any network configuration in this mode. + After the provisioning is finished, the system reboots. * os_code: Essentially "launch by OS Code" (Operating System Code). This is a more modern approach. There is no specific "provisioning" boot. @@ -200,8 +194,30 @@ def _is_xen(): return os.path.exists("/proc/xen") -def _is_ibm_provisioning(): - return os.path.exists("/root/provisioningConfiguration.cfg") +def _is_ibm_provisioning( + prov_cfg="/root/provisioningConfiguration.cfg", + inst_log="/root/swinstall.log", + boot_ref="/proc/1/environ"): + """Return boolean indicating if this boot is ibm provisioning boot.""" + if os.path.exists(prov_cfg): + msg = "config '%s' exists." % prov_cfg + result = True + if os.path.exists(inst_log): + if os.path.exists(boot_ref): + result = (os.stat(inst_log).st_mtime > + os.stat(boot_ref).st_mtime) + msg += (" log '%s' from %s boot." % + (inst_log, "current" if result else "previous")) + else: + msg += (" log '%s' existed, but no reference file '%s'." % + (inst_log, boot_ref)) + result = False + else: + msg += " log '%s' did not exist." % inst_log + else: + result, msg = (False, "config '%s' did not exist." % prov_cfg) + LOG.debug("ibm_provisioning=%s: %s", result, msg) + return result def get_ibm_platform(): @@ -251,7 +267,7 @@ def get_ibm_platform(): else: return (Platforms.TEMPLATE_LIVE_METADATA, metadata_path) elif _is_ibm_provisioning(): - return (Platforms.TEMPLATE_PROVISIONING_NODATA, None) + return (Platforms.TEMPLATE_PROVISIONING_NODATA, None) return not_found diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 4999f1f6..117a9cfe 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -8,6 +8,7 @@ import os import shutil import sys import tempfile +import time import unittest import mock @@ -263,7 +264,8 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): os.path: [('isfile', 1), ('exists', 1), ('islink', 1), ('isdir', 1), ('lexists', 1)], os: [('listdir', 1), ('mkdir', 1), - ('lstat', 1), ('symlink', 2)] + ('lstat', 1), ('symlink', 2), + ('stat', 1)] } if hasattr(os, 'scandir'): @@ -349,6 +351,15 @@ def populate_dir(path, files): return ret +def populate_dir_with_ts(path, data): + """data is {'file': ('contents', mtime)}. mtime relative to now.""" + populate_dir(path, dict((k, v[0]) for k, v in data.items())) + btime = time.time() + for fpath, (_contents, mtime) in data.items(): + ts = btime + mtime if mtime else btime + os.utime(os.path.sep.join((path, fpath)), (ts, ts)) + + def dir2dict(startdir, prefix=None): flist = {} if prefix is None: diff --git a/tests/unittests/test_datasource/test_ibmcloud.py b/tests/unittests/test_datasource/test_ibmcloud.py index 621cfe49..e639ae47 100644 --- a/tests/unittests/test_datasource/test_ibmcloud.py +++ b/tests/unittests/test_datasource/test_ibmcloud.py @@ -259,4 +259,54 @@ class TestReadMD(test_helpers.CiTestCase): ret['metadata']) +class TestIsIBMProvisioning(test_helpers.FilesystemMockingTestCase): + """Test the _is_ibm_provisioning method.""" + inst_log = "/root/swinstall.log" + prov_cfg = "/root/provisioningConfiguration.cfg" + boot_ref = "/proc/1/environ" + with_logs = True + + def _call_with_root(self, rootd): + self.reRoot(rootd) + return ibm._is_ibm_provisioning() + + def test_no_config(self): + """No provisioning config means not provisioning.""" + self.assertFalse(self._call_with_root(self.tmp_dir())) + + def test_config_only(self): + """A provisioning config without a log means provisioning.""" + rootd = self.tmp_dir() + test_helpers.populate_dir(rootd, {self.prov_cfg: "key=value"}) + self.assertTrue(self._call_with_root(rootd)) + + def test_config_with_old_log(self): + """A config with a log from previous boot is not provisioning.""" + rootd = self.tmp_dir() + data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10), + self.inst_log: ("log data\n", -30), + self.boot_ref: ("PWD=/", 0)} + test_helpers.populate_dir_with_ts(rootd, data) + self.assertFalse(self._call_with_root(rootd=rootd)) + self.assertIn("from previous boot", self.logs.getvalue()) + + def test_config_with_new_log(self): + """A config with a log from this boot is provisioning.""" + rootd = self.tmp_dir() + data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10), + self.inst_log: ("log data\n", 30), + self.boot_ref: ("PWD=/", 0)} + test_helpers.populate_dir_with_ts(rootd, data) + self.assertTrue(self._call_with_root(rootd=rootd)) + self.assertIn("from current boot", self.logs.getvalue()) + + def test_config_and_log_no_reference(self): + """If the config and log existed, but no reference, assume not.""" + rootd = self.tmp_dir() + test_helpers.populate_dir( + rootd, {self.prov_cfg: "key=value", self.inst_log: "log data\n"}) + self.assertFalse(self._call_with_root(rootd=rootd)) + self.assertIn("no reference file", self.logs.getvalue()) + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 53643989..ad7fe41e 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -1,5 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. +from collections import namedtuple import copy import os from uuid import uuid4 @@ -7,7 +8,7 @@ from uuid import uuid4 from cloudinit import safeyaml from cloudinit import util from cloudinit.tests.helpers import ( - CiTestCase, dir2dict, populate_dir) + CiTestCase, dir2dict, populate_dir, populate_dir_with_ts) from cloudinit.sources import DataSourceIBMCloud as dsibm @@ -66,7 +67,6 @@ P_SYS_VENDOR = "sys/class/dmi/id/sys_vendor" P_SEED_DIR = "var/lib/cloud/seed" P_DSID_CFG = "etc/cloud/ds-identify.cfg" -IBM_PROVISIONING_CHECK_PATH = "/root/provisioningConfiguration.cfg" IBM_CONFIG_UUID = "9796-932E" MOCK_VIRT_IS_KVM = {'name': 'detect_virt', 'RET': 'kvm', 'ret': 0} @@ -74,11 +74,17 @@ MOCK_VIRT_IS_VMWARE = {'name': 'detect_virt', 'RET': 'vmware', 'ret': 0} MOCK_VIRT_IS_XEN = {'name': 'detect_virt', 'RET': 'xen', 'ret': 0} MOCK_UNAME_IS_PPC64 = {'name': 'uname', 'out': UNAME_PPC64EL, 'ret': 0} +shell_true = 0 +shell_false = 1 -class TestDsIdentify(CiTestCase): +CallReturn = namedtuple('CallReturn', + ['rc', 'stdout', 'stderr', 'cfg', 'files']) + + +class DsIdentifyBase(CiTestCase): dsid_path = os.path.realpath('tools/ds-identify') - def call(self, rootd=None, mocks=None, args=None, files=None, + def call(self, rootd=None, mocks=None, func="main", args=None, files=None, policy_dmi=DI_DEFAULT_POLICY, policy_no_dmi=DI_DEFAULT_POLICY_NO_DMI, ec2_strict_id=DI_EC2_STRICT_ID_DEFAULT): @@ -135,7 +141,7 @@ class TestDsIdentify(CiTestCase): mocklines.append(write_mock(d)) endlines = [ - 'main %s' % ' '.join(['"%s"' % s for s in args]) + func + ' ' + ' '.join(['"%s"' % s for s in args]) ] with open(wrap, "w") as fp: @@ -159,7 +165,7 @@ class TestDsIdentify(CiTestCase): cfg = {"_INVALID_YAML": contents, "_EXCEPTION": str(e)} - return rc, out, err, cfg, dir2dict(rootd) + return CallReturn(rc, out, err, cfg, dir2dict(rootd)) def _call_via_dict(self, data, rootd=None, **kwargs): # return output of self.call with a dict input like VALID_CFG[item] @@ -190,6 +196,8 @@ class TestDsIdentify(CiTestCase): _print_run_output(rc, out, err, cfg, files) return rc, out, err, cfg, files + +class TestDsIdentify(DsIdentifyBase): def test_wb_print_variables(self): """_print_info reports an array of discovered variables to stderr.""" data = VALID_CFG['Azure-dmi-detection'] @@ -250,7 +258,10 @@ class TestDsIdentify(CiTestCase): Template provisioning with user-data has METADATA disk, datasource should return not found.""" data = copy.deepcopy(VALID_CFG['IBMCloud-metadata']) - data['files'] = {IBM_PROVISIONING_CHECK_PATH: 'xxx'} + # change the 'is_ibm_provisioning' mock to return 1 (false) + isprov_m = [m for m in data['mocks'] + if m["name"] == "is_ibm_provisioning"][0] + isprov_m['ret'] = shell_true return self._check_via_dict(data, RC_NOT_FOUND) def test_ibmcloud_template_userdata(self): @@ -265,7 +276,8 @@ class TestDsIdentify(CiTestCase): no disks attached. Datasource should return not found.""" data = copy.deepcopy(VALID_CFG['IBMCloud-nodisks']) - data['files'] = {IBM_PROVISIONING_CHECK_PATH: 'xxx'} + data['mocks'].append( + {'name': 'is_ibm_provisioning', 'ret': shell_true}) return self._check_via_dict(data, RC_NOT_FOUND) def test_ibmcloud_template_no_userdata(self): @@ -446,6 +458,47 @@ class TestDsIdentify(CiTestCase): self._test_ds_found('Hetzner') +class TestIsIBMProvisioning(DsIdentifyBase): + """Test the is_ibm_provisioning method in ds-identify.""" + + inst_log = "/root/swinstall.log" + prov_cfg = "/root/provisioningConfiguration.cfg" + boot_ref = "/proc/1/environ" + funcname = "is_ibm_provisioning" + + def test_no_config(self): + """No provisioning config means not provisioning.""" + ret = self.call(files={}, func=self.funcname) + self.assertEqual(shell_false, ret.rc) + + def test_config_only(self): + """A provisioning config without a log means provisioning.""" + ret = self.call(files={self.prov_cfg: "key=value"}, func=self.funcname) + self.assertEqual(shell_true, ret.rc) + + def test_config_with_old_log(self): + """A config with a log from previous boot is not provisioning.""" + rootd = self.tmp_dir() + data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10), + self.inst_log: ("log data\n", -30), + self.boot_ref: ("PWD=/", 0)} + populate_dir_with_ts(rootd, data) + ret = self.call(rootd=rootd, func=self.funcname) + self.assertEqual(shell_false, ret.rc) + self.assertIn("from previous boot", ret.stderr) + + def test_config_with_new_log(self): + """A config with a log from this boot is provisioning.""" + rootd = self.tmp_dir() + data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10), + self.inst_log: ("log data\n", 30), + self.boot_ref: ("PWD=/", 0)} + populate_dir_with_ts(rootd, data) + ret = self.call(rootd=rootd, func=self.funcname) + self.assertEqual(shell_true, ret.rc) + self.assertIn("from current boot", ret.stderr) + + def blkid_out(disks=None): """Convert a list of disk dictionaries into blkid content.""" if disks is None: @@ -639,6 +692,7 @@ VALID_CFG = { 'ds': 'IBMCloud', 'mocks': [ MOCK_VIRT_IS_XEN, + {'name': 'is_ibm_provisioning', 'ret': shell_false}, {'name': 'blkid', 'ret': 0, 'out': blkid_out( [{'DEVNAME': 'xvda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, @@ -652,6 +706,7 @@ VALID_CFG = { 'ds': 'IBMCloud', 'mocks': [ MOCK_VIRT_IS_XEN, + {'name': 'is_ibm_provisioning', 'ret': shell_false}, {'name': 'blkid', 'ret': 0, 'out': blkid_out( [{'DEVNAME': 'xvda1', 'TYPE': 'ext3', 'PARTUUID': uuid4(), @@ -669,6 +724,7 @@ VALID_CFG = { 'ds': 'IBMCloud', 'mocks': [ MOCK_VIRT_IS_XEN, + {'name': 'is_ibm_provisioning', 'ret': shell_false}, {'name': 'blkid', 'ret': 0, 'out': blkid_out( [{'DEVNAME': 'xvda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, diff --git a/tools/ds-identify b/tools/ds-identify index 9a2db5c4..7fff5d1e 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -125,6 +125,7 @@ DI_ON_NOTFOUND="" DI_EC2_STRICT_ID_DEFAULT="true" _IS_IBM_CLOUD="" +_IS_IBM_PROVISIONING="" error() { set -- "ERROR:" "$@"; @@ -1006,7 +1007,25 @@ dscheck_Hetzner() { } is_ibm_provisioning() { - [ -f "${PATH_ROOT}/root/provisioningConfiguration.cfg" ] + local pcfg="${PATH_ROOT}/root/provisioningConfiguration.cfg" + local logf="${PATH_ROOT}/root/swinstall.log" + local is_prov=false msg="config '$pcfg' did not exist." + if [ -f "$pcfg" ]; then + msg="config '$pcfg' exists." + is_prov=true + if [ -f "$logf" ]; then + if [ "$logf" -nt "$PATH_PROC_1_ENVIRON" ]; then + msg="$msg log '$logf' from current boot." + else + is_prov=false + msg="$msg log '$logf' from previous boot." + fi + else + msg="$msg log '$logf' did not exist." + fi + fi + debug 2 "ibm_provisioning=$is_prov: $msg" + [ "$is_prov" = "true" ] } is_ibm_cloud() { -- cgit v1.2.3 From 11172924a48a47a7231d19d9cefe628dfddda8bf Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 30 Apr 2018 13:21:51 -0600 Subject: IBMCloud: Disable config-drive and nocloud only if IBMCloud is enabled. Ubuntu images on IBMCloud for 16.04 have some seed data in /var/lib/cloud/data/seed/nocloud-net. In order to have systems with IBMCloud enabled, we modified ds-identify detection to skip that seed if the system was on IBMCloud. That change did not consider the fact that IBMCloud might not be in the datasource list. There was similar logic in the ConfigDrive datasource in ds-identify and the datasource itself. Config drive is now updated to only check and avoid IBMCloud if IBMCloud is enabled. The check in ds-identify for nocloud was dropped. If a user provides a nocloud seed on IBMCloud, then that can be used. This means that systems running Xenial will continue to get their old datasources. LP: #1766401 --- cloudinit/sources/DataSourceConfigDrive.py | 11 +++-- tests/unittests/test_ds_identify.py | 77 +++++++++++++++++++++++++++--- tools/ds-identify | 17 +++++-- 3 files changed, 91 insertions(+), 14 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index c7b5fe5f..121cf215 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -69,7 +69,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): util.logexc(LOG, "Failed reading config drive from %s", sdir) if not found: - for dev in find_candidate_devs(): + dslist = self.sys_cfg.get('datasource_list') + for dev in find_candidate_devs(dslist=dslist): try: # Set mtype if freebsd and turn off sync if dev.startswith("/dev/cd"): @@ -211,7 +212,7 @@ def write_injected_files(files): util.logexc(LOG, "Failed writing file: %s", filename) -def find_candidate_devs(probe_optical=True): +def find_candidate_devs(probe_optical=True, dslist=None): """Return a list of devices that may contain the config drive. The returned list is sorted by search order where the first item has @@ -227,6 +228,9 @@ def find_candidate_devs(probe_optical=True): * either vfat or iso9660 formated * labeled with 'config-2' or 'CONFIG-2' """ + if dslist is None: + dslist = [] + # query optical drive to get it in blkid cache for 2.6 kernels if probe_optical: for device in OPTICAL_DEVICES: @@ -257,7 +261,8 @@ def find_candidate_devs(probe_optical=True): devices = [d for d in candidates if d in by_label or not util.is_partition(d)] - if devices: + LOG.debug("devices=%s dslist=%s", devices, dslist) + if devices and "IBMCloud" in dslist: # IBMCloud uses config-2 label, but limited to a single UUID. ibm_platform, ibm_path = get_ibm_platform() if ibm_path in devices: diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index ad7fe41e..4d8a4360 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -184,17 +184,18 @@ class DsIdentifyBase(CiTestCase): data, RC_FOUND, dslist=[data.get('ds'), DS_NONE]) def _check_via_dict(self, data, rc, dslist=None, **kwargs): - found_rc, out, err, cfg, files = self._call_via_dict(data, **kwargs) + ret = self._call_via_dict(data, **kwargs) good = False try: - self.assertEqual(rc, found_rc) + self.assertEqual(rc, ret.rc) if dslist is not None: - self.assertEqual(dslist, cfg['datasource_list']) + self.assertEqual(dslist, ret.cfg['datasource_list']) good = True finally: if not good: - _print_run_output(rc, out, err, cfg, files) - return rc, out, err, cfg, files + _print_run_output(ret.rc, ret.stdout, ret.stderr, ret.cfg, + ret.files) + return ret class TestDsIdentify(DsIdentifyBase): @@ -245,13 +246,40 @@ class TestDsIdentify(DsIdentifyBase): def test_config_drive(self): """ConfigDrive datasource has a disk with LABEL=config-2.""" self._test_ds_found('ConfigDrive') - return def test_config_drive_upper(self): """ConfigDrive datasource has a disk with LABEL=CONFIG-2.""" self._test_ds_found('ConfigDriveUpper') return + def test_config_drive_seed(self): + """Config Drive seed directory.""" + self._test_ds_found('ConfigDrive-seed') + + def test_config_drive_interacts_with_ibmcloud_config_disk(self): + """Verify ConfigDrive interaction with IBMCloud. + + If ConfigDrive is enabled and not IBMCloud, then ConfigDrive + should claim the ibmcloud 'config-2' disk. + If IBMCloud is enabled, then ConfigDrive should skip.""" + data = copy.deepcopy(VALID_CFG['IBMCloud-config-2']) + files = data.get('files', {}) + if not files: + data['files'] = files + cfgpath = 'etc/cloud/cloud.cfg.d/99_networklayer_common.cfg' + + # with list including IBMCloud, config drive should be not found. + files[cfgpath] = 'datasource_list: [ ConfigDrive, IBMCloud ]\n' + ret = self._check_via_dict(data, shell_true) + self.assertEqual( + ret.cfg.get('datasource_list'), ['IBMCloud', 'None']) + + # But if IBMCloud is not enabled, config drive should claim this. + files[cfgpath] = 'datasource_list: [ ConfigDrive, NoCloud ]\n' + ret = self._check_via_dict(data, shell_true) + self.assertEqual( + ret.cfg.get('datasource_list'), ['ConfigDrive', 'None']) + def test_ibmcloud_template_userdata_in_provisioning(self): """Template provisioned with user-data during provisioning stage. @@ -307,6 +335,37 @@ class TestDsIdentify(DsIdentifyBase): self._check_via_dict( data, rc=RC_FOUND, dslist=['ConfigDrive', DS_NONE]) + def test_ibmcloud_with_nocloud_seed(self): + """NoCloud seed should be preferred over IBMCloud. + + A nocloud seed should be preferred over IBMCloud even if enabled. + Ubuntu 16.04 images have /seed/nocloud-net. LP: #1766401.""" + data = copy.deepcopy(VALID_CFG['IBMCloud-config-2']) + files = data.get('files', {}) + if not files: + data['files'] = files + files.update(VALID_CFG['NoCloud-seed']['files']) + ret = self._check_via_dict(data, shell_true) + self.assertEqual( + ['NoCloud', 'IBMCloud', 'None'], + ret.cfg.get('datasource_list')) + + def test_ibmcloud_with_configdrive_seed(self): + """ConfigDrive seed should be preferred over IBMCloud. + + A ConfigDrive seed should be preferred over IBMCloud even if enabled. + Ubuntu 16.04 images have a fstab entry that mounts the + METADATA disk into /seed/config_drive. LP: ##1766401.""" + data = copy.deepcopy(VALID_CFG['IBMCloud-config-2']) + files = data.get('files', {}) + if not files: + data['files'] = files + files.update(VALID_CFG['ConfigDrive-seed']['files']) + ret = self._check_via_dict(data, shell_true) + self.assertEqual( + ['ConfigDrive', 'IBMCloud', 'None'], + ret.cfg.get('datasource_list')) + def test_policy_disabled(self): """A Builtin policy of 'disabled' should return not found. @@ -684,6 +743,12 @@ VALID_CFG = { }, ], }, + 'ConfigDrive-seed': { + 'ds': 'ConfigDrive', + 'files': { + os.path.join(P_SEED_DIR, 'config_drive', 'openstack', + 'latest', 'meta_data.json'): 'md\n'}, + }, 'Hetzner': { 'ds': 'Hetzner', 'files': {P_SYS_VENDOR: 'Hetzner\n'}, diff --git a/tools/ds-identify b/tools/ds-identify index 7fff5d1e..9f0d96f7 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -601,7 +601,6 @@ dscheck_NoCloud() { *\ ds=nocloud*) return ${DS_FOUND};; esac - is_ibm_cloud && return ${DS_NOT_FOUND} for d in nocloud nocloud-net; do check_seed_dir "$d" meta-data user-data && return ${DS_FOUND} check_writable_seed_dir "$d" meta-data user-data && return ${DS_FOUND} @@ -612,11 +611,12 @@ dscheck_NoCloud() { return ${DS_NOT_FOUND} } +is_ds_enabled() { + local name="$1" pad=" ${DI_DSLIST} " + [ "${pad#* $name }" != "${pad}" ] +} + check_configdrive_v2() { - is_ibm_cloud && return ${DS_NOT_FOUND} - if has_fs_with_label CONFIG-2 config-2; then - return ${DS_FOUND} - fi # look in /config-drive /seed/config_drive for a directory # openstack/YYYY-MM-DD format with a file meta_data.json local d="" @@ -631,6 +631,13 @@ check_configdrive_v2() { debug 1 "config drive seeded directory had only 'latest'" return ${DS_FOUND} fi + + is_ds_enabled "IBMCloud" + debug 1 "is_ds_enabled returned $?: $DI_DSLIST" + is_ds_enabled "IBMCloud" && is_ibm_cloud && return ${DS_NOT_FOUND} + if has_fs_with_label CONFIG-2 config-2; then + return ${DS_FOUND} + fi return ${DS_NOT_FOUND} } -- cgit v1.2.3 From 14cb4924a6cf191107f9c04698ace2753eb44d2b Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 1 May 2018 14:18:18 -0600 Subject: netinfo: fix netdev_pformat when a nic does not have an address assigned. The last set of changes to netdev_pformat ended up dropping the output of devices that were not up. This adds back the 'down' interfaces to the rendered output. LP: #1766302 --- cloudinit/netinfo.py | 40 +++++++++++++++----- cloudinit/tests/test_netinfo.py | 47 +++++++++++++++++++++++- tests/data/netinfo/netdev-formatted-output-down | 8 ++++ tests/data/netinfo/new-ifconfig-output-down | 15 ++++++++ tests/data/netinfo/sample-ipaddrshow-output-down | 8 ++++ 5 files changed, 107 insertions(+), 11 deletions(-) create mode 100644 tests/data/netinfo/netdev-formatted-output-down create mode 100644 tests/data/netinfo/new-ifconfig-output-down create mode 100644 tests/data/netinfo/sample-ipaddrshow-output-down (limited to 'tests') diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index f0906160..1be76fe7 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -158,12 +158,28 @@ def netdev_info(empty=""): LOG.warning( "Could not print networks: missing 'ip' and 'ifconfig' commands") - if empty != "": - for (_devname, dev) in devs.items(): - for field in dev: - if dev[field] == "": - dev[field] = empty + if empty == "": + return devs + + recurse_types = (dict, tuple, list) + + def fill(data, new_val="", empty_vals=("", b"")): + """Recursively replace 'empty_vals' in data (dict, tuple, list) + with new_val""" + if isinstance(data, dict): + myiter = data.items() + elif isinstance(data, (tuple, list)): + myiter = enumerate(data) + else: + raise TypeError("Unexpected input to fill") + + for key, val in myiter: + if val in empty_vals: + data[key] = new_val + elif isinstance(val, recurse_types): + fill(val, new_val) + fill(devs, new_val=empty) return devs @@ -353,8 +369,9 @@ def getgateway(): def netdev_pformat(): lines = [] + empty = "." try: - netdev = netdev_info(empty=".") + netdev = netdev_info(empty=empty) except Exception as e: lines.append( util.center( @@ -368,12 +385,15 @@ def netdev_pformat(): for (dev, data) in sorted(netdev.items()): for addr in data.get('ipv4'): tbl.add_row( - [dev, data["up"], addr["ip"], addr["mask"], - addr.get('scope', '.'), data["hwaddr"]]) + (dev, data["up"], addr["ip"], addr["mask"], + addr.get('scope', empty), data["hwaddr"])) for addr in data.get('ipv6'): tbl.add_row( - [dev, data["up"], addr["ip"], ".", addr["scope6"], - data["hwaddr"]]) + (dev, data["up"], addr["ip"], empty, addr["scope6"], + data["hwaddr"])) + if len(data.get('ipv6')) + len(data.get('ipv4')) == 0: + tbl.add_row((dev, data["up"], empty, empty, empty, + data["hwaddr"])) netdev_s = tbl.get_string() max_len = len(max(netdev_s.splitlines(), key=len)) header = util.center("Net device info", "+", max_len) diff --git a/cloudinit/tests/test_netinfo.py b/cloudinit/tests/test_netinfo.py index 2537c1c2..d76e768e 100644 --- a/cloudinit/tests/test_netinfo.py +++ b/cloudinit/tests/test_netinfo.py @@ -4,7 +4,7 @@ from copy import copy -from cloudinit.netinfo import netdev_pformat, route_pformat +from cloudinit.netinfo import netdev_info, netdev_pformat, route_pformat from cloudinit.tests.helpers import CiTestCase, mock, readResource @@ -71,6 +71,51 @@ class TestNetInfo(CiTestCase): self.logs.getvalue()) m_subp.assert_not_called() + @mock.patch('cloudinit.netinfo.util.which') + @mock.patch('cloudinit.netinfo.util.subp') + def test_netdev_info_nettools_down(self, m_subp, m_which): + """test netdev_info using nettools and down interfaces.""" + m_subp.return_value = ( + readResource("netinfo/new-ifconfig-output-down"), "") + m_which.side_effect = lambda x: x if x == 'ifconfig' else None + self.assertEqual( + {'eth0': {'ipv4': [], 'ipv6': [], + 'hwaddr': '00:16:3e:de:51:a6', 'up': False}, + 'lo': {'ipv4': [{'ip': '127.0.0.1', 'mask': '255.0.0.0'}], + 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}], + 'hwaddr': '.', 'up': True}}, + netdev_info(".")) + + @mock.patch('cloudinit.netinfo.util.which') + @mock.patch('cloudinit.netinfo.util.subp') + def test_netdev_info_iproute_down(self, m_subp, m_which): + """Test netdev_info with ip and down interfaces.""" + m_subp.return_value = ( + readResource("netinfo/sample-ipaddrshow-output-down"), "") + m_which.side_effect = lambda x: x if x == 'ip' else None + self.assertEqual( + {'lo': {'ipv4': [{'ip': '127.0.0.1', 'bcast': '.', + 'mask': '255.0.0.0', 'scope': 'host'}], + 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}], + 'hwaddr': '.', 'up': True}, + 'eth0': {'ipv4': [], 'ipv6': [], + 'hwaddr': '00:16:3e:de:51:a6', 'up': False}}, + netdev_info(".")) + + @mock.patch('cloudinit.netinfo.netdev_info') + def test_netdev_pformat_with_down(self, m_netdev_info): + """test netdev_pformat when netdev_info returns 'down' interfaces.""" + m_netdev_info.return_value = ( + {'lo': {'ipv4': [{'ip': '127.0.0.1', 'mask': '255.0.0.0', + 'scope': 'host'}], + 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}], + 'hwaddr': '.', 'up': True}, + 'eth0': {'ipv4': [], 'ipv6': [], + 'hwaddr': '00:16:3e:de:51:a6', 'up': False}}) + self.assertEqual( + readResource("netinfo/netdev-formatted-output-down"), + netdev_pformat()) + @mock.patch('cloudinit.netinfo.util.which') @mock.patch('cloudinit.netinfo.util.subp') def test_route_nettools_pformat(self, m_subp, m_which): diff --git a/tests/data/netinfo/netdev-formatted-output-down b/tests/data/netinfo/netdev-formatted-output-down new file mode 100644 index 00000000..038dfb4d --- /dev/null +++ b/tests/data/netinfo/netdev-formatted-output-down @@ -0,0 +1,8 @@ ++++++++++++++++++++++++++++Net device info++++++++++++++++++++++++++++ ++--------+-------+-----------+-----------+-------+-------------------+ +| Device | Up | Address | Mask | Scope | Hw-Address | ++--------+-------+-----------+-----------+-------+-------------------+ +| eth0 | False | . | . | . | 00:16:3e:de:51:a6 | +| lo | True | 127.0.0.1 | 255.0.0.0 | host | . | +| lo | True | ::1/128 | . | host | . | ++--------+-------+-----------+-----------+-------+-------------------+ diff --git a/tests/data/netinfo/new-ifconfig-output-down b/tests/data/netinfo/new-ifconfig-output-down new file mode 100644 index 00000000..5d12e352 --- /dev/null +++ b/tests/data/netinfo/new-ifconfig-output-down @@ -0,0 +1,15 @@ +eth0: flags=4098 mtu 1500 + ether 00:16:3e:de:51:a6 txqueuelen 1000 (Ethernet) + RX packets 126229 bytes 158139342 (158.1 MB) + RX errors 0 dropped 0 overruns 0 frame 0 + TX packets 59317 bytes 4839008 (4.8 MB) + TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 + +lo: flags=73 mtu 65536 + inet 127.0.0.1 netmask 255.0.0.0 + inet6 ::1 prefixlen 128 scopeid 0x10 + loop txqueuelen 1000 (Local Loopback) + RX packets 260 bytes 20092 (20.0 KB) + RX errors 0 dropped 0 overruns 0 frame 0 + TX packets 260 bytes 20092 (20.0 KB) + TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0 diff --git a/tests/data/netinfo/sample-ipaddrshow-output-down b/tests/data/netinfo/sample-ipaddrshow-output-down new file mode 100644 index 00000000..cb516d64 --- /dev/null +++ b/tests/data/netinfo/sample-ipaddrshow-output-down @@ -0,0 +1,8 @@ +1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000 + link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + inet 127.0.0.1/8 scope host lo + valid_lft forever preferred_lft forever + inet6 ::1/128 scope host + valid_lft forever preferred_lft forever +44: eth0@if45: mtu 1500 qdisc noqueue state DOWN group default qlen 1000 + link/ether 00:16:3e:de:51:a6 brd ff:ff:ff:ff:ff:ff link-netnsid 0 -- cgit v1.2.3 From fed07fc6cddebcbc3e744926509c362d9e200aeb Mon Sep 17 00:00:00 2001 From: Harm Weites Date: Tue, 1 May 2018 15:38:51 -0600 Subject: FreeBSD: Invoke growfs on ufs filesystems such that it does not prompt. By default, FreeBSD's growfs runs interactively asking a question which can be mitigated using the '-y' command line option. The fix here is simply to pass -y to growfs to avoid the prompt. LP: #1404745 --- cloudinit/config/cc_resizefs.py | 2 +- tests/unittests/test_handler/test_handler_resizefs.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'tests') diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 82f29e10..2edddd0c 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -81,7 +81,7 @@ def _resize_xfs(mount_point, devpth): def _resize_ufs(mount_point, devpth): - return ('growfs', devpth) + return ('growfs', '-y', devpth) def _resize_zfs(mount_point, devpth): diff --git a/tests/unittests/test_handler/test_handler_resizefs.py b/tests/unittests/test_handler/test_handler_resizefs.py index 7a7ba1ff..f92175fd 100644 --- a/tests/unittests/test_handler/test_handler_resizefs.py +++ b/tests/unittests/test_handler/test_handler_resizefs.py @@ -147,7 +147,7 @@ class TestResizefs(CiTestCase): def test_resize_ufs_cmd_return(self): mount_point = '/' devpth = '/dev/sda2' - self.assertEqual(('growfs', devpth), + self.assertEqual(('growfs', '-y', devpth), _resize_ufs(mount_point, devpth)) @mock.patch('cloudinit.util.get_mount_info') -- cgit v1.2.3 From aae494c39f4c6f625e7409ca262e657d085dd5d1 Mon Sep 17 00:00:00 2001 From: Joshua Chan Date: Thu, 3 May 2018 14:50:16 -0600 Subject: azure: Add reported ready marker file. This change is for Azure VM Preprovisioning. A bug was found when after azure VMs report ready the first time, during the time when VM is polling indefinitely for the new ovf-env.xml from Instance Metadata Service (IMDS), if a reboot happens, we send another report ready signal to the fabric, which deletes the reprovisioning data on the node. This marker file is used to fix this issue so that we will only send a report ready signal to the fabric when no marker file is present. Then, create a marker file so that when a reboot does occur, we check if a marker file has been created and decide whether we would like to send the repot ready signal. LP: #1765214 --- cloudinit/sources/DataSourceAzure.py | 21 +++- tests/unittests/test_datasource/test_azure.py | 170 ++++++++++++++++++-------- 2 files changed, 134 insertions(+), 57 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index a71197a6..1b03d460 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -48,6 +48,7 @@ DEFAULT_FS = 'ext4' # DMI chassis-asset-tag is set static for all azure instances AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" +REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready" IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata" @@ -436,11 +437,12 @@ class DataSourceAzure(sources.DataSource): LOG.debug("negotiating already done for %s", self.get_instance_id()) - def _poll_imds(self, report_ready=True): + def _poll_imds(self): """Poll IMDS for the new provisioning data until we get a valid response. Then return the returned JSON object.""" url = IMDS_URL + "?api-version=2017-04-02" headers = {"Metadata": "true"} + report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE)) LOG.debug("Start polling IMDS") def exc_cb(msg, exception): @@ -450,13 +452,17 @@ class DataSourceAzure(sources.DataSource): # call DHCP and setup the ephemeral network to acquire the new IP. return False - need_report = report_ready while True: try: with EphemeralDHCPv4() as lease: - if need_report: + if report_ready: + path = REPORTED_READY_MARKER_FILE + LOG.info( + "Creating a marker file to report ready: %s", path) + util.write_file(path, "{pid}: {time}\n".format( + pid=os.getpid(), time=time())) self._report_ready(lease=lease) - need_report = False + report_ready = False return readurl(url, timeout=1, headers=headers, exception_cb=exc_cb, infinite=True).contents except UrlError: @@ -490,8 +496,10 @@ class DataSourceAzure(sources.DataSource): if (cfg.get('PreprovisionedVm') is True or os.path.isfile(path)): if not os.path.isfile(path): - LOG.info("Creating a marker file to poll imds") - util.write_file(path, "%s: %s\n" % (os.getpid(), time())) + LOG.info("Creating a marker file to poll imds: %s", + path) + util.write_file(path, "{pid}: {time}\n".format( + pid=os.getpid(), time=time())) return True return False @@ -526,6 +534,7 @@ class DataSourceAzure(sources.DataSource): "Error communicating with Azure fabric; You may experience." "connectivity issues.", exc_info=True) return False + util.del_file(REPORTED_READY_MARKER_FILE) util.del_file(REPROVISION_MARKER_FILE) return fabric_data diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 88fe76c7..26e8d7d3 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1125,19 +1125,9 @@ class TestAzureNetExists(CiTestCase): self.assertTrue(hasattr(dsaz, "DataSourceAzureNet")) -@mock.patch('cloudinit.sources.DataSourceAzure.util.subp') -@mock.patch.object(dsaz, 'get_hostname') -@mock.patch.object(dsaz, 'set_hostname') -class TestAzureDataSourcePreprovisioning(CiTestCase): - - def setUp(self): - super(TestAzureDataSourcePreprovisioning, self).setUp() - tmp = self.tmp_dir() - self.waagent_d = self.tmp_path('/var/lib/waagent', tmp) - self.paths = helpers.Paths({'cloud_dir': tmp}) - dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d +class TestPreprovisioningReadAzureOvfFlag(CiTestCase): - def test_read_azure_ovf_with_true_flag(self, *args): + def test_read_azure_ovf_with_true_flag(self): """The read_azure_ovf method should set the PreprovisionedVM cfg flag if the proper setting is present.""" content = construct_valid_ovf_env( @@ -1146,7 +1136,7 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): cfg = ret[2] self.assertTrue(cfg['PreprovisionedVm']) - def test_read_azure_ovf_with_false_flag(self, *args): + def test_read_azure_ovf_with_false_flag(self): """The read_azure_ovf method should set the PreprovisionedVM cfg flag to false if the proper setting is false.""" content = construct_valid_ovf_env( @@ -1155,7 +1145,7 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): cfg = ret[2] self.assertFalse(cfg['PreprovisionedVm']) - def test_read_azure_ovf_without_flag(self, *args): + def test_read_azure_ovf_without_flag(self): """The read_azure_ovf method should not set the PreprovisionedVM cfg flag.""" content = construct_valid_ovf_env() @@ -1163,12 +1153,121 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): cfg = ret[2] self.assertFalse(cfg['PreprovisionedVm']) - @mock.patch('cloudinit.sources.DataSourceAzure.util.is_FreeBSD') - @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') - @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') - @mock.patch('requests.Session.request') + +@mock.patch('os.path.isfile') +class TestPreprovisioningShouldReprovision(CiTestCase): + + def setUp(self): + super(TestPreprovisioningShouldReprovision, self).setUp() + tmp = self.tmp_dir() + self.waagent_d = self.tmp_path('/var/lib/waagent', tmp) + self.paths = helpers.Paths({'cloud_dir': tmp}) + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + + @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + def test__should_reprovision_with_true_cfg(self, isfile, write_f): + """The _should_reprovision method should return true with config + flag present.""" + isfile.return_value = False + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + self.assertTrue(dsa._should_reprovision( + (None, None, {'PreprovisionedVm': True}, None))) + + def test__should_reprovision_with_file_existing(self, isfile): + """The _should_reprovision method should return True if the sentinal + exists.""" + isfile.return_value = True + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + self.assertTrue(dsa._should_reprovision( + (None, None, {'preprovisionedvm': False}, None))) + + def test__should_reprovision_returns_false(self, isfile): + """The _should_reprovision method should return False + if config and sentinal are not present.""" + isfile.return_value = False + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + self.assertFalse(dsa._should_reprovision((None, None, {}, None))) + + @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') + def test_reprovision_calls__poll_imds(self, _poll_imds, isfile): + """_reprovision will poll IMDS.""" + isfile.return_value = False + hostname = "myhost" + username = "myuser" + odata = {'HostName': hostname, 'UserName': username} + _poll_imds.return_value = construct_valid_ovf_env(data=odata) + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + dsa._reprovision() + _poll_imds.assert_called_with() + + +@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') +@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') +@mock.patch('requests.Session.request') +@mock.patch( + 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') +class TestPreprovisioningPollIMDS(CiTestCase): + + def setUp(self): + super(TestPreprovisioningPollIMDS, self).setUp() + self.tmp = self.tmp_dir() + self.waagent_d = self.tmp_path('/var/lib/waagent', self.tmp) + self.paths = helpers.Paths({'cloud_dir': self.tmp}) + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + + @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + def test_poll_imds_calls_report_ready(self, write_f, report_ready_func, + fake_resp, m_dhcp, m_net): + """The poll_imds will call report_ready after creating marker file.""" + report_marker = self.tmp_path('report_marker', self.tmp) + lease = { + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'} + m_dhcp.return_value = [lease] + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + mock_path = ( + 'cloudinit.sources.DataSourceAzure.REPORTED_READY_MARKER_FILE') + with mock.patch(mock_path, report_marker): + dsa._poll_imds() + self.assertEqual(report_ready_func.call_count, 1) + report_ready_func.assert_called_with(lease=lease) + + def test_poll_imds_report_ready_false(self, report_ready_func, + fake_resp, m_dhcp, m_net): + """The poll_imds should not call reporting ready + when flag is false""" + report_marker = self.tmp_path('report_marker', self.tmp) + write_file(report_marker, content='dont run report_ready :)') + m_dhcp.return_value = [{ + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'}] + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + mock_path = ( + 'cloudinit.sources.DataSourceAzure.REPORTED_READY_MARKER_FILE') + with mock.patch(mock_path, report_marker): + dsa._poll_imds() + self.assertEqual(report_ready_func.call_count, 0) + + +@mock.patch('cloudinit.sources.DataSourceAzure.util.subp') +@mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') +@mock.patch('cloudinit.sources.DataSourceAzure.util.is_FreeBSD') +@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') +@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') +@mock.patch('requests.Session.request') +class TestAzureDataSourcePreprovisioning(CiTestCase): + + def setUp(self): + super(TestAzureDataSourcePreprovisioning, self).setUp() + tmp = self.tmp_dir() + self.waagent_d = self.tmp_path('/var/lib/waagent', tmp) + self.paths = helpers.Paths({'cloud_dir': tmp}) + dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + def test_poll_imds_returns_ovf_env(self, fake_resp, m_dhcp, m_net, - m_is_bsd, *args): + m_is_bsd, write_f, subp): """The _poll_imds method should return the ovf_env.xml.""" m_is_bsd.return_value = False m_dhcp.return_value = [{ @@ -1194,12 +1293,8 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): prefix_or_mask='255.255.255.0', router='192.168.2.1') self.assertEqual(m_net.call_count, 1) - @mock.patch('cloudinit.sources.DataSourceAzure.util.is_FreeBSD') - @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') - @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') - @mock.patch('requests.Session.request') def test__reprovision_calls__poll_imds(self, fake_resp, m_dhcp, m_net, - m_is_bsd, *args): + m_is_bsd, write_f, subp): """The _reprovision method should call poll IMDS.""" m_is_bsd.return_value = False m_dhcp.return_value = [{ @@ -1231,32 +1326,5 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): prefix_or_mask='255.255.255.0', router='192.168.2.1') self.assertEqual(m_net.call_count, 1) - @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') - @mock.patch('os.path.isfile') - def test__should_reprovision_with_true_cfg(self, isfile, write_f, *args): - """The _should_reprovision method should return true with config - flag present.""" - isfile.return_value = False - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - self.assertTrue(dsa._should_reprovision( - (None, None, {'PreprovisionedVm': True}, None))) - - @mock.patch('os.path.isfile') - def test__should_reprovision_with_file_existing(self, isfile, *args): - """The _should_reprovision method should return True if the sentinal - exists.""" - isfile.return_value = True - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - self.assertTrue(dsa._should_reprovision( - (None, None, {'preprovisionedvm': False}, None))) - - @mock.patch('os.path.isfile') - def test__should_reprovision_returns_false(self, isfile, *args): - """The _should_reprovision method should return False - if config and sentinal are not present.""" - isfile.return_value = False - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - self.assertFalse(dsa._should_reprovision((None, None, {}, None))) - # vi: ts=4 expandtab -- cgit v1.2.3 From 80dfb3b023a268d6d6204220665c2cf43eac66df Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Fri, 4 May 2018 11:18:22 -0600 Subject: pycodestyle: Fix deprecated string literals, move away from flake8. Fix remaining pycodesytle warnings related to invalid string literals introduced in more recent pycodeflakes versions https://bugs.python.org/issue27364 . Also stop using flake8 in tox as it is incompatible with newer versions of pyflakes. Instead we now add tox environments for pycodestyle and pyflakes individually. Set the versions in both pycodestyle and pyflakes to the currently available versions. --- cloudinit/netinfo.py | 2 +- tests/unittests/test_handler/test_handler_ntp.py | 2 +- tox.ini | 16 +++++++++------- 3 files changed, 11 insertions(+), 9 deletions(-) (limited to 'tests') diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index 1be76fe7..9ff929c2 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -138,7 +138,7 @@ def _netdev_info_ifconfig(ifconfig_data): elif toks[i].startswith("scope:"): devs[curdev]['ipv6'][-1]['scope6'] = toks[i].lstrip("scope:") elif toks[i] == "scopeid": - res = re.match(".*<(\S+)>", toks[i + 1]) + res = re.match(r'.*<(\S+)>', toks[i + 1]) if res: devs[curdev]['ipv6'][-1]['scope6'] = res.group(1) return devs diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py index 17c53559..6da4564e 100644 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ b/tests/unittests/test_handler/test_handler_ntp.py @@ -706,7 +706,7 @@ class TestSupplementalSchemaValidation(CiTestCase): cfg = {'confpath': 'someconf', 'check_exe': '', 'service_name': '', 'template': 'asdf', 'template_name': None, 'packages': 'NOPE'} match = (r'Invalid ntp configuration:\\nExpected a list of required' - ' package names for ntp:config:packages. Found \(NOPE\)') + ' package names for ntp:config:packages. Found \\(NOPE\\)') with self.assertRaisesRegex(ValueError, match): cc_ntp.supplemental_schema_validation(cfg) diff --git a/tox.ini b/tox.ini index 818ade3d..2fb3209d 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py27, py3, flake8, xenial, pylint +envlist = py27, py3, xenial, pycodestyle, pyflakes, pylint recreate = True [testenv] @@ -7,14 +7,11 @@ commands = python -m nose {posargs:tests/unittests cloudinit} setenv = LC_ALL = en_US.utf-8 -[testenv:flake8] +[testenv:pycodestyle] basepython = python3 deps = - pycodestyle==2.3.1 - pyflakes==1.5.0 - flake8==3.3.0 - hacking==0.13.0 -commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/} + pycodestyle==2.4.0 +commands = {envpython} -m pycodestyle {posargs:cloudinit/ tests/ tools/} # https://github.com/gabrielfalcao/HTTPretty/issues/223 setenv = @@ -118,6 +115,11 @@ deps = commands = {envpython} -m pycodestyle {posargs:cloudinit/ tests/ tools/} deps = pycodestyle +[testenv:pyflakes] +commands = {envpython} -m pyflakes {posargs:cloudinit/ tests/ tools/} +deps = + pyflakes==1.6.0 + [testenv:tip-pyflakes] commands = {envpython} -m pyflakes {posargs:cloudinit/ tests/ tools/} deps = pyflakes -- cgit v1.2.3 From 323eb30940cae2069daf74517089220fccc4afb9 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 9 May 2018 09:36:56 -0600 Subject: tests: fix package and ca_cert cloud_tests on bionic package_update_upgrade_install was failing as htop is now included in Bionic images. Switch this test to install 'sl' instead. ca_certs integration test fails on cert_count test because bionic update-ca-certificates on bionic generates less symlinks for a given cert. Integration tests now collect dpkg-query --show output on every instance. Add a new assertPackageInstalled helper method which finds the package or package version installed on the instance. Adapt existing byobu, package_update_upgrade_install, ntp and salt_minion tests to use assertPackageInstalled method. LP: #1769985 --- tests/cloud_tests/testcases.yaml | 4 ++-- tests/cloud_tests/testcases/base.py | 21 +++++++++++++++++++++ tests/cloud_tests/testcases/modules/byobu.py | 3 +-- tests/cloud_tests/testcases/modules/byobu.yaml | 3 --- tests/cloud_tests/testcases/modules/ca_certs.py | 21 +++++++++++++++++---- tests/cloud_tests/testcases/modules/ca_certs.yaml | 8 ++++++-- tests/cloud_tests/testcases/modules/ntp.py | 5 ++--- .../modules/package_update_upgrade_install.py | 14 ++++++-------- .../modules/package_update_upgrade_install.yaml | 9 +++------ tests/cloud_tests/testcases/modules/salt_minion.py | 3 +-- .../cloud_tests/testcases/modules/salt_minion.yaml | 3 --- 11 files changed, 59 insertions(+), 35 deletions(-) (limited to 'tests') diff --git a/tests/cloud_tests/testcases.yaml b/tests/cloud_tests/testcases.yaml index a3e29900..a16d1ddf 100644 --- a/tests/cloud_tests/testcases.yaml +++ b/tests/cloud_tests/testcases.yaml @@ -24,9 +24,9 @@ base_test_data: status.json: | #!/bin/sh cat /run/cloud-init/status.json - cloud-init-version: | + package-versions: | #!/bin/sh - dpkg-query -W -f='${Version}' cloud-init + dpkg-query --show system.journal.gz: | #!/bin/sh [ -d /run/systemd ] || { echo "not systemd."; exit 0; } diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py index 0d1916b4..696db8dd 100644 --- a/tests/cloud_tests/testcases/base.py +++ b/tests/cloud_tests/testcases/base.py @@ -31,6 +31,27 @@ class CloudTestCase(unittest.TestCase): def is_distro(self, distro_name): return self.os_cfg['os'] == distro_name + def assertPackageInstalled(self, name, version=None): + """Check dpkg-query --show output for matching package name. + + @param name: package base name + @param version: string representing a package version or part of a + version. + """ + pkg_out = self.get_data_file('package-versions') + pkg_match = re.search( + '^%s\t(?P.*)$' % name, pkg_out, re.MULTILINE) + if pkg_match: + installed_version = pkg_match.group('version') + if not version: + return # Success + if installed_version.startswith(version): + return # Success + raise AssertionError( + 'Expected package version %s-%s not found. Found %s' % + name, version, installed_version) + raise AssertionError('Package not installed: %s' % name) + def os_version_cmp(self, cmp_version): """Compare the version of the test to comparison_version. diff --git a/tests/cloud_tests/testcases/modules/byobu.py b/tests/cloud_tests/testcases/modules/byobu.py index 005ca014..74d0529a 100644 --- a/tests/cloud_tests/testcases/modules/byobu.py +++ b/tests/cloud_tests/testcases/modules/byobu.py @@ -9,8 +9,7 @@ class TestByobu(base.CloudTestCase): def test_byobu_installed(self): """Test byobu installed.""" - out = self.get_data_file('byobu_installed') - self.assertIn('/usr/bin/byobu', out) + self.assertPackageInstalled('byobu') def test_byobu_profile_enabled(self): """Test byobu profile.d file exists.""" diff --git a/tests/cloud_tests/testcases/modules/byobu.yaml b/tests/cloud_tests/testcases/modules/byobu.yaml index a9aa1f3f..d002a611 100644 --- a/tests/cloud_tests/testcases/modules/byobu.yaml +++ b/tests/cloud_tests/testcases/modules/byobu.yaml @@ -7,9 +7,6 @@ cloud_config: | #cloud-config byobu_by_default: enable collect_scripts: - byobu_installed: | - #!/bin/bash - which byobu byobu_profile_enabled: | #!/bin/bash ls /etc/profile.d/Z97-byobu.sh diff --git a/tests/cloud_tests/testcases/modules/ca_certs.py b/tests/cloud_tests/testcases/modules/ca_certs.py index e75f0413..6b56f639 100644 --- a/tests/cloud_tests/testcases/modules/ca_certs.py +++ b/tests/cloud_tests/testcases/modules/ca_certs.py @@ -7,10 +7,23 @@ from tests.cloud_tests.testcases import base class TestCaCerts(base.CloudTestCase): """Test ca certs module.""" - def test_cert_count(self): - """Test the count is proper.""" - out = self.get_data_file('cert_count') - self.assertEqual(5, int(out)) + def test_certs_updated(self): + """Test certs have been updated in /etc/ssl/certs.""" + out = self.get_data_file('cert_links') + # Bionic update-ca-certificates creates less links debian #895075 + unlinked_files = [] + links = {} + for cert_line in out.splitlines(): + if '->' in cert_line: + fname, _sep, link = cert_line.split() + links[fname] = link + else: + unlinked_files.append(cert_line) + self.assertEqual(['ca-certificates.crt'], unlinked_files) + self.assertEqual('cloud-init-ca-certs.pem', links['a535c1f3.0']) + self.assertEqual( + '/usr/share/ca-certificates/cloud-init-ca-certs.crt', + links['cloud-init-ca-certs.pem']) def test_cert_installed(self): """Test line from our cert exists.""" diff --git a/tests/cloud_tests/testcases/modules/ca_certs.yaml b/tests/cloud_tests/testcases/modules/ca_certs.yaml index d939f435..2cd91551 100644 --- a/tests/cloud_tests/testcases/modules/ca_certs.yaml +++ b/tests/cloud_tests/testcases/modules/ca_certs.yaml @@ -43,9 +43,13 @@ cloud_config: | DiH5uEqBXExjrj0FslxcVKdVj5glVcSmkLwZKbEU1OKwleT/iXFhvooWhQ== -----END CERTIFICATE----- collect_scripts: - cert_count: | + cert_links: | #!/bin/bash - ls -l /etc/ssl/certs | wc -l + # links printed -> + # non-links printed + for file in `ls /etc/ssl/certs`; do + [ -h /etc/ssl/certs/$file ] && echo -n $file ' -> ' && readlink /etc/ssl/certs/$file || echo $file; + done cert: | #!/bin/bash md5sum /etc/ssl/certs/ca-certificates.crt diff --git a/tests/cloud_tests/testcases/modules/ntp.py b/tests/cloud_tests/testcases/modules/ntp.py index b50e52fe..c63cc15e 100644 --- a/tests/cloud_tests/testcases/modules/ntp.py +++ b/tests/cloud_tests/testcases/modules/ntp.py @@ -9,15 +9,14 @@ class TestNtp(base.CloudTestCase): def test_ntp_installed(self): """Test ntp installed""" - out = self.get_data_file('ntp_installed') - self.assertEqual(0, int(out)) + self.assertPackageInstalled('ntp') def test_ntp_dist_entries(self): """Test dist config file is empty""" out = self.get_data_file('ntp_conf_dist_empty') self.assertEqual(0, int(out)) - def test_ntp_entires(self): + def test_ntp_entries(self): """Test config entries""" out = self.get_data_file('ntp_conf_pool_list') self.assertIn('pool.ntp.org iburst', out) diff --git a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py index a92dec22..fecad768 100644 --- a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py +++ b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py @@ -7,15 +7,13 @@ from tests.cloud_tests.testcases import base class TestPackageInstallUpdateUpgrade(base.CloudTestCase): """Test package install update upgrade module.""" - def test_installed_htop(self): - """Test htop got installed.""" - out = self.get_data_file('dpkg_htop') - self.assertEqual(1, int(out)) + def test_installed_sl(self): + """Test sl got installed.""" + self.assertPackageInstalled('sl') def test_installed_tree(self): """Test tree got installed.""" - out = self.get_data_file('dpkg_tree') - self.assertEqual(1, int(out)) + self.assertPackageInstalled('tree') def test_apt_history(self): """Test apt history for update command.""" @@ -23,13 +21,13 @@ class TestPackageInstallUpdateUpgrade(base.CloudTestCase): self.assertIn( 'Commandline: /usr/bin/apt-get --option=Dpkg::Options' '::=--force-confold --option=Dpkg::options::=--force-unsafe-io ' - '--assume-yes --quiet install htop tree', out) + '--assume-yes --quiet install sl tree', out) def test_cloud_init_output(self): """Test cloud-init-output for install & upgrade stuff.""" out = self.get_data_file('cloud-init-output.log') self.assertIn('Setting up tree (', out) - self.assertIn('Setting up htop (', out) + self.assertIn('Setting up sl (', out) self.assertIn('Reading package lists...', out) self.assertIn('Building dependency tree...', out) self.assertIn('Reading state information...', out) diff --git a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml index 71d24b83..dd79e438 100644 --- a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml +++ b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml @@ -15,7 +15,7 @@ required_features: cloud_config: | #cloud-config packages: - - htop + - sl - tree package_update: true package_upgrade: true @@ -23,11 +23,8 @@ collect_scripts: apt_history_cmdline: | #!/bin/bash grep ^Commandline: /var/log/apt/history.log - dpkg_htop: | + dpkg_show: | #!/bin/bash - dpkg -l | grep htop | wc -l - dpkg_tree: | - #!/bin/bash - dpkg -l | grep tree | wc -l + dpkg-query --show # vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/salt_minion.py b/tests/cloud_tests/testcases/modules/salt_minion.py index 70917a4c..fc9688ed 100644 --- a/tests/cloud_tests/testcases/modules/salt_minion.py +++ b/tests/cloud_tests/testcases/modules/salt_minion.py @@ -33,7 +33,6 @@ class Test(base.CloudTestCase): def test_minion_installed(self): """Test if the salt-minion package is installed""" - out = self.get_data_file('minion_installed') - self.assertEqual(1, int(out)) + self.assertPackageInstalled('salt-minion') # vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/salt_minion.yaml b/tests/cloud_tests/testcases/modules/salt_minion.yaml index f20b9765..c24aa177 100644 --- a/tests/cloud_tests/testcases/modules/salt_minion.yaml +++ b/tests/cloud_tests/testcases/modules/salt_minion.yaml @@ -35,8 +35,5 @@ collect_scripts: grains: | #!/bin/bash cat /etc/salt/grains - minion_installed: | - #!/bin/bash - dpkg -l | grep salt-minion | grep ii | wc -l # vi: ts=4 expandtab -- cgit v1.2.3 From 23a84d2ce4ec44a0a0c2edbc3b1948c59bb0afbb Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 9 May 2018 17:40:56 -0600 Subject: SmartOS: fix get_interfaces for nics that do not have addr_assign_type. When attempting to apply network configuration for SmartOS's container platform, cloud-init would not identify nics. The nics on provided in this container service do not have 'addr_assign_type'. That was being interpreted as being a "stolen" mac, and would be filtered out by get_interfaces. --- cloudinit/net/__init__.py | 8 ++++++-- tests/unittests/test_net.py | 46 +++++++++++++++++++++++++++++++++++++++------ 2 files changed, 46 insertions(+), 8 deletions(-) (limited to 'tests') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 43226bd0..3ffde52c 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -359,8 +359,12 @@ def interface_has_own_mac(ifname, strict=False): 1: randomly generated 3: set using dev_set_mac_address""" assign_type = read_sys_net_int(ifname, "addr_assign_type") - if strict and assign_type is None: - raise ValueError("%s had no addr_assign_type.") + if assign_type is None: + # None is returned if this nic had no 'addr_assign_type' entry. + # if strict, raise an error, if not return True. + if strict: + raise ValueError("%s had no addr_assign_type.") + return True return assign_type in (0, 1, 3) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index fac82678..31807e13 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -2,12 +2,9 @@ from cloudinit import net from cloudinit.net import cmdline -from cloudinit.net import eni -from cloudinit.net import natural_sort_key -from cloudinit.net import netplan -from cloudinit.net import network_state -from cloudinit.net import renderers -from cloudinit.net import sysconfig +from cloudinit.net import ( + eni, interface_has_own_mac, natural_sort_key, netplan, network_state, + renderers, sysconfig) from cloudinit.sources.helpers import openstack from cloudinit import temp_utils from cloudinit import util @@ -2691,6 +2688,43 @@ class TestGetInterfaces(CiTestCase): any_order=True) +class TestInterfaceHasOwnMac(CiTestCase): + """Test interface_has_own_mac. This is admittedly a bit whitebox.""" + + @mock.patch('cloudinit.net.read_sys_net_int', return_value=None) + def test_non_strict_with_no_addr_assign_type(self, m_read_sys_net_int): + """If nic does not have addr_assign_type, it is not "stolen". + + SmartOS containers do not provide the addr_assign_type in /sys. + + $ ( cd /sys/class/net/eth0/ && grep -r . *) + address:90:b8:d0:20:e1:b0 + addr_len:6 + flags:0x1043 + ifindex:2 + mtu:1500 + tx_queue_len:1 + type:1 + """ + self.assertTrue(interface_has_own_mac("eth0")) + + @mock.patch('cloudinit.net.read_sys_net_int', return_value=None) + def test_strict_with_no_addr_assign_type_raises(self, m_read_sys_net_int): + with self.assertRaises(ValueError): + interface_has_own_mac("eth0", True) + + @mock.patch('cloudinit.net.read_sys_net_int') + def test_expected_values(self, m_read_sys_net_int): + msg = "address_assign_type=%d said to not have own mac" + for address_assign_type in (0, 1, 3): + m_read_sys_net_int.return_value = address_assign_type + self.assertTrue( + interface_has_own_mac("eth0", msg % address_assign_type)) + + m_read_sys_net_int.return_value = 2 + self.assertFalse(interface_has_own_mac("eth0")) + + class TestGetInterfacesByMac(CiTestCase): _data = {'bonds': ['bond1'], 'bridges': ['bridge1'], -- cgit v1.2.3 From 3b712fcea9ca685c5cb761ea19c5126acf8ffaa1 Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Wed, 9 May 2018 20:36:51 -0600 Subject: tests: do not rely on host /proc/cmdline in test_net.py Make test_net.TestGenerateFallbackConfig.test_unstable_names mock the value of /proc/cmdline in the same way as the existing test_unstable_names_disabled test. LP: #1769952 --- tests/unittests/test_net.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'tests') diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 31807e13..e13ca3ce 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1605,12 +1605,13 @@ iface eth1 inet dhcp ] self.assertEqual(", ".join(expected_rule) + '\n', contents.lstrip()) + @mock.patch("cloudinit.util.get_cmdline") @mock.patch("cloudinit.util.udevadm_settle") @mock.patch("cloudinit.net.sys_dev_path") @mock.patch("cloudinit.net.read_sys_net") @mock.patch("cloudinit.net.get_devicelist") def test_unstable_names(self, mock_get_devicelist, mock_read_sys_net, - mock_sys_dev_path, mock_settle): + mock_sys_dev_path, mock_settle, m_get_cmdline): """verify that udevadm settle is called when we find unstable names""" devices = { 'eth0': { @@ -1626,6 +1627,7 @@ iface eth1 inet dhcp } + m_get_cmdline.return_value = '' tmp_dir = self.tmp_dir() _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net, mock_sys_dev_path, -- cgit v1.2.3 From 0d7ee5592621d09699d079945ffd6febf16669b2 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 15 May 2018 13:38:20 -0400 Subject: ds-identify: recognize container-other as a container, test SmartOS. In playing with a SmartOS container I found that ds-identify did not identify the container there as a container. Systemd-detect-virt identifies it as 'container-other'. Also here are tests for ds-identify for the SmartOS platform identification, and some indentation fixes in ds-identify. --- cloudinit/sources/DataSourceSmartOS.py | 4 +-- tests/unittests/test_ds_identify.py | 51 ++++++++++++++++++++++++++++++++-- tools/ds-identify | 12 ++++---- 3 files changed, 57 insertions(+), 10 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 4ea00eb1..fcb46b14 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -745,7 +745,7 @@ def get_smartos_environ(uname_version=None, product_name=None): # report 'BrandZ virtual linux' as the kernel version if uname_version is None: uname_version = uname[3] - if uname_version.lower() == 'brandz virtual linux': + if uname_version == 'BrandZ virtual linux': return SMARTOS_ENV_LX_BRAND if product_name is None: @@ -753,7 +753,7 @@ def get_smartos_environ(uname_version=None, product_name=None): else: system_type = product_name - if system_type and 'smartdc' in system_type.lower(): + if system_type and system_type.startswith('SmartDC'): return SMARTOS_ENV_KVM return None diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 4d8a4360..7f12be59 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -10,7 +10,8 @@ from cloudinit import util from cloudinit.tests.helpers import ( CiTestCase, dir2dict, populate_dir, populate_dir_with_ts) -from cloudinit.sources import DataSourceIBMCloud as dsibm +from cloudinit.sources import DataSourceIBMCloud as ds_ibm +from cloudinit.sources import DataSourceSmartOS as ds_smartos UNAME_MYSYS = ("Linux bart 4.4.0-62-generic #83-Ubuntu " "SMP Wed Jan 18 14:10:15 UTC 2017 x86_64 GNU/Linux") @@ -69,8 +70,12 @@ P_DSID_CFG = "etc/cloud/ds-identify.cfg" IBM_CONFIG_UUID = "9796-932E" +MOCK_VIRT_IS_CONTAINER_OTHER = {'name': 'detect_virt', + 'RET': 'container-other', 'ret': 0} MOCK_VIRT_IS_KVM = {'name': 'detect_virt', 'RET': 'kvm', 'ret': 0} MOCK_VIRT_IS_VMWARE = {'name': 'detect_virt', 'RET': 'vmware', 'ret': 0} +# currenty' SmartOS hypervisor "bhyve" is unknown by systemd-detect-virt. +MOCK_VIRT_IS_VM_OTHER = {'name': 'detect_virt', 'RET': 'vm-other', 'ret': 0} MOCK_VIRT_IS_XEN = {'name': 'detect_virt', 'RET': 'xen', 'ret': 0} MOCK_UNAME_IS_PPC64 = {'name': 'uname', 'out': UNAME_PPC64EL, 'ret': 0} @@ -330,7 +335,7 @@ class TestDsIdentify(DsIdentifyBase): break if not offset: raise ValueError("Expected to find 'blkid' mock, but did not.") - data['mocks'][offset]['out'] = d['out'].replace(dsibm.IBM_CONFIG_UUID, + data['mocks'][offset]['out'] = d['out'].replace(ds_ibm.IBM_CONFIG_UUID, "DEAD-BEEF") self._check_via_dict( data, rc=RC_FOUND, dslist=['ConfigDrive', DS_NONE]) @@ -516,6 +521,20 @@ class TestDsIdentify(DsIdentifyBase): """Hetzner cloud is identified in sys_vendor.""" self._test_ds_found('Hetzner') + def test_smartos_bhyve(self): + """SmartOS cloud identified by SmartDC in dmi.""" + self._test_ds_found('SmartOS-bhyve') + + def test_smartos_lxbrand(self): + """SmartOS cloud identified on lxbrand container.""" + self._test_ds_found('SmartOS-lxbrand') + + def test_smartos_lxbrand_requires_socket(self): + """SmartOS cloud should not be identified if no socket file.""" + mycfg = copy.deepcopy(VALID_CFG['SmartOS-lxbrand']) + del mycfg['files'][ds_smartos.METADATA_SOCKFILE] + self._check_via_dict(mycfg, rc=RC_NOT_FOUND, policy_dmi="disabled") + class TestIsIBMProvisioning(DsIdentifyBase): """Test the is_ibm_provisioning method in ds-identify.""" @@ -777,7 +796,7 @@ VALID_CFG = { [{'DEVNAME': 'xvda1', 'TYPE': 'ext3', 'PARTUUID': uuid4(), 'UUID': uuid4(), 'LABEL': 'cloudimg-bootfs'}, {'DEVNAME': 'xvdb', 'TYPE': 'vfat', 'LABEL': 'config-2', - 'UUID': dsibm.IBM_CONFIG_UUID}, + 'UUID': ds_ibm.IBM_CONFIG_UUID}, {'DEVNAME': 'xvda2', 'TYPE': 'ext4', 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4(), 'UUID': uuid4()}, @@ -798,6 +817,32 @@ VALID_CFG = { }, ], }, + 'SmartOS-bhyve': { + 'ds': 'SmartOS', + 'mocks': [ + MOCK_VIRT_IS_VM_OTHER, + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'vda1', 'TYPE': 'ext4', + 'PARTUUID': '49ec635a-01'}, + {'DEVNAME': 'vda2', 'TYPE': 'swap', + 'LABEL': 'cloudimg-swap', 'PARTUUID': '49ec635a-02'}]), + }, + ], + 'files': {P_PRODUCT_NAME: 'SmartDC HVM\n'}, + }, + 'SmartOS-lxbrand': { + 'ds': 'SmartOS', + 'mocks': [ + MOCK_VIRT_IS_CONTAINER_OTHER, + {'name': 'uname', 'ret': 0, + 'out': ("Linux d43da87a-daca-60e8-e6d4-d2ed372662a3 4.3.0 " + "BrandZ virtual linux x86_64 GNU/Linux")}, + {'name': 'blkid', 'ret': 2, 'out': ''}, + ], + 'files': {ds_smartos.METADATA_SOCKFILE: 'would be a socket\n'}, + } + } # vi: ts=4 expandtab diff --git a/tools/ds-identify b/tools/ds-identify index 435a5bcb..dc7ac3a7 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -261,7 +261,7 @@ read_virt() { is_container() { case "${DI_VIRT}" in - lxc|lxc-libvirt|systemd-nspawn|docker|rkt) return 0;; + container-other|lxc|lxc-libvirt|systemd-nspawn|docker|rkt) return 0;; *) return 1;; esac } @@ -990,12 +990,14 @@ dscheck_SmartOS() { # joyent cloud has two virt types: kvm and container # on kvm, product name on joyent public cloud shows 'SmartDC HVM' # on the container platform, uname's version has: BrandZ virtual linux + # for container, we also verify that the socketfile exists to protect + # against embedded containers (lxd running on brandz) local smartdc_kver="BrandZ virtual linux" + local metadata_sockfile="${PATH_ROOT}/native/.zonecontrol/metadata.sock" dmi_product_name_matches "SmartDC*" && return $DS_FOUND - if [ "${DI_UNAME_KERNEL_VERSION}" = "${smartdc_kver}" ] && - [ "${DI_VIRT}" = "container-other" ]; then - return ${DS_FOUND} - fi + [ "${DI_UNAME_KERNEL_VERSION}" = "${smartdc_kver}" ] && + [ -e "${metadata_sockfile}" ] && + return ${DS_FOUND} return ${DS_NOT_FOUND} } -- cgit v1.2.3 From 589b542bfb3b6630b931a506ca017635059cef1d Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 16 May 2018 08:16:10 -0600 Subject: tests: restructure SSH and initial connections The SSH function was retrying and waiting for SSH for over an hour when an SSH connection was failing to be established. This reduces the amount of retries and time between each retry to prevent tests from running for hours. Also restructures how waiting for the system works: the system will attempt to SSH up to the boot timeout time by catching SSH connection failures and retrying until the timeout is reached. If the limit is reached now an exception is thrown to abort the test. Drive by - this also fixes printing of the instance name when collecting the console log, rather than showing a Python object address. Fixes LP: #1758409 --- tests/cloud_tests/collect.py | 2 +- tests/cloud_tests/platforms/instances.py | 39 ++++++++++++++++++++++++-------- 2 files changed, 30 insertions(+), 11 deletions(-) (limited to 'tests') diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py index 1ba72856..78263bf5 100644 --- a/tests/cloud_tests/collect.py +++ b/tests/cloud_tests/collect.py @@ -42,7 +42,7 @@ def collect_console(instance, base_dir): @param base_dir: directory to write console log to """ logfile = os.path.join(base_dir, 'console.log') - LOG.debug('getting console log for %s to %s', instance, logfile) + LOG.debug('getting console log for %s to %s', instance.name, logfile) try: data = instance.console_log() except NotImplementedError as e: diff --git a/tests/cloud_tests/platforms/instances.py b/tests/cloud_tests/platforms/instances.py index cc439d29..95bc3b16 100644 --- a/tests/cloud_tests/platforms/instances.py +++ b/tests/cloud_tests/platforms/instances.py @@ -87,7 +87,12 @@ class Instance(TargetBase): self._ssh_client = None def _ssh_connect(self): - """Connect via SSH.""" + """Connect via SSH. + + Attempt to SSH to the client on the specific IP and port. If it + fails in some manner, then retry 2 more times for a total of 3 + attempts; sleeping a few seconds between attempts. + """ if self._ssh_client: return self._ssh_client @@ -98,21 +103,22 @@ class Instance(TargetBase): client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) private_key = paramiko.RSAKey.from_private_key_file(self.ssh_key_file) - retries = 30 + retries = 3 while retries: try: client.connect(username=self.ssh_username, hostname=self.ssh_ip, port=self.ssh_port, - pkey=private_key, banner_timeout=30) + pkey=private_key) self._ssh_client = client return client except (ConnectionRefusedError, AuthenticationException, BadHostKeyException, ConnectionResetError, SSHException, OSError): retries -= 1 - time.sleep(10) + LOG.debug('Retrying ssh connection on connect failure') + time.sleep(3) - ssh_cmd = 'Failed ssh connection to %s@%s:%s after 300 seconds' % ( + ssh_cmd = 'Failed ssh connection to %s@%s:%s after 3 retries' % ( self.ssh_username, self.ssh_ip, self.ssh_port ) raise util.InTargetExecuteError(b'', b'', 1, ssh_cmd, 'ssh') @@ -128,18 +134,31 @@ class Instance(TargetBase): return ' '.join(l for l in test.strip().splitlines() if not l.lstrip().startswith('#')) - time = self.config['boot_timeout'] + boot_timeout = self.config['boot_timeout'] tests = [self.config['system_ready_script']] if wait_for_cloud_init: tests.append(self.config['cloud_init_ready_script']) formatted_tests = ' && '.join(clean_test(t) for t in tests) cmd = ('i=0; while [ $i -lt {time} ] && i=$(($i+1)); do {test} && ' - 'exit 0; sleep 1; done; exit 1').format(time=time, + 'exit 0; sleep 1; done; exit 1').format(time=boot_timeout, test=formatted_tests) - if self.execute(cmd, rcs=(0, 1))[-1] != 0: - raise OSError('timeout: after {}s system not started'.format(time)) - + end_time = time.time() + boot_timeout + while True: + try: + return_code = self.execute( + cmd, rcs=(0, 1), description='wait for instance start' + )[-1] + if return_code == 0: + break + except util.InTargetExecuteError: + LOG.warning("failed to connect via SSH") + + if time.time() < end_time: + time.sleep(3) + else: + raise util.PlatformError('ssh', 'after %ss instance is not ' + 'reachable' % boot_timeout) # vi: ts=4 expandtab -- cgit v1.2.3 From 2dab7046f88e540836498b363c90ad46302a7cc4 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Thu, 17 May 2018 11:57:37 -0400 Subject: cloud_tests: help pylint pylint missed finding a typo in the lxd platform because it could not determine that the variable was being used was a string. The variable was set by loading a yaml file which pylint couldn't know that it would be a string. In these cases, we can be more explicit. --- tests/cloud_tests/platforms/lxd/instance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tests') diff --git a/tests/cloud_tests/platforms/lxd/instance.py b/tests/cloud_tests/platforms/lxd/instance.py index 1c17c781..d396519f 100644 --- a/tests/cloud_tests/platforms/lxd/instance.py +++ b/tests/cloud_tests/platforms/lxd/instance.py @@ -208,7 +208,7 @@ def _has_proper_console_support(): if 'console' not in info.get('api_extensions', []): reason = "LXD server does not support console api extension" else: - dver = info.get('environment', {}).get('driver_version', "") + dver = str(info.get('environment', {}).get('driver_version', "")) if dver.startswith("2.") or dver.startswith("1."): reason = "LXD Driver version not 3.x+ (%s)" % dver else: -- cgit v1.2.3 From 30e730f7ca111487d243ba9f40c66df6d7a49953 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 17 May 2018 14:59:54 -0600 Subject: read_file_or_url: move to url_helper, fix bug in its FileResponse. The result of a read_file_or_url on a file and on a url would differ in behavior. str(UrlResponse) would return UrlResponse.contents.decode('utf-8') while str(FileResponse) would return str(FileResponse.contents) The difference being "b'foo'" versus "foo". As part of the general goal of cleaning util, move read_file_or_url into url_helper. --- cloudinit/cmd/main.py | 2 +- cloudinit/config/cc_phone_home.py | 7 +-- cloudinit/config/schema.py | 4 +- cloudinit/ec2_utils.py | 14 +++--- cloudinit/sources/DataSourceMAAS.py | 2 +- cloudinit/sources/helpers/azure.py | 5 ++- cloudinit/tests/test_url_helper.py | 28 +++++++++++- cloudinit/url_helper.py | 29 +++++++++++- cloudinit/user_data.py | 6 +-- cloudinit/util.py | 37 ++-------------- tests/unittests/test__init__.py | 8 ++-- .../unittests/test_datasource/test_azure_helper.py | 2 +- .../test_handler/test_handler_apt_conf_v1.py | 16 +++---- .../test_handler_apt_configure_sources_list_v1.py | 7 --- .../test_handler/test_handler_apt_source_v1.py | 27 +++++------- .../test_handler/test_handler_apt_source_v3.py | 27 +++++------- tests/unittests/test_handler/test_handler_ntp.py | 51 +++++++++------------- 17 files changed, 131 insertions(+), 141 deletions(-) (limited to 'tests') diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 3f2dbb93..d6ba90f4 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -187,7 +187,7 @@ def attempt_cmdline_url(path, network=True, cmdline=None): data = None header = b'#cloud-config' try: - resp = util.read_file_or_url(**kwargs) + resp = url_helper.read_file_or_url(**kwargs) if resp.ok(): data = resp.contents if not resp.contents.startswith(header): diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index 878069b7..3be0d1c1 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -41,6 +41,7 @@ keys to post. Available keys are: """ from cloudinit import templater +from cloudinit import url_helper from cloudinit import util from cloudinit.settings import PER_INSTANCE @@ -136,9 +137,9 @@ def handle(name, cfg, cloud, log, args): } url = templater.render_string(url, url_params) try: - util.read_file_or_url(url, data=real_submit_keys, - retries=tries, sec_between=3, - ssl_details=util.fetch_ssl_details(cloud.paths)) + url_helper.read_file_or_url( + url, data=real_submit_keys, retries=tries, sec_between=3, + ssl_details=util.fetch_ssl_details(cloud.paths)) except Exception: util.logexc(log, "Failed to post phone home data to %s in %s tries", url, tries) diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index 76826e05..3bad8e22 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -4,7 +4,7 @@ from __future__ import print_function from cloudinit import importer -from cloudinit.util import find_modules, read_file_or_url +from cloudinit.util import find_modules, load_file import argparse from collections import defaultdict @@ -139,7 +139,7 @@ def validate_cloudconfig_file(config_path, schema, annotate=False): """ if not os.path.exists(config_path): raise RuntimeError('Configfile {0} does not exist'.format(config_path)) - content = read_file_or_url('file://{0}'.format(config_path)).contents + content = load_file(config_path, decode=False) if not content.startswith(CLOUD_CONFIG_HEADER): errors = ( ('header', 'File {0} needs to begin with "{1}"'.format( diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index dc3f0fc3..3b7b17f1 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -150,11 +150,9 @@ def get_instance_userdata(api_version='latest', # NOT_FOUND occurs) and just in that case returning an empty string. exception_cb = functools.partial(_skip_retry_on_codes, SKIP_USERDATA_CODES) - response = util.read_file_or_url(ud_url, - ssl_details=ssl_details, - timeout=timeout, - retries=retries, - exception_cb=exception_cb) + response = url_helper.read_file_or_url( + ud_url, ssl_details=ssl_details, timeout=timeout, + retries=retries, exception_cb=exception_cb) user_data = response.contents except url_helper.UrlError as e: if e.code not in SKIP_USERDATA_CODES: @@ -169,9 +167,9 @@ def _get_instance_metadata(tree, api_version='latest', ssl_details=None, timeout=5, retries=5, leaf_decoder=None): md_url = url_helper.combine_url(metadata_address, api_version, tree) - caller = functools.partial(util.read_file_or_url, - ssl_details=ssl_details, timeout=timeout, - retries=retries) + caller = functools.partial( + url_helper.read_file_or_url, ssl_details=ssl_details, + timeout=timeout, retries=retries) def mcaller(url): return caller(url).contents diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index aa56addb..bcb38544 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -198,7 +198,7 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None, If version is None, then / will not be used. """ if read_file_or_url is None: - read_file_or_url = util.read_file_or_url + read_file_or_url = url_helper.read_file_or_url if seed_url.endswith("/"): seed_url = seed_url[:-1] diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 90c12df1..e5696b1f 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -14,6 +14,7 @@ from cloudinit import temp_utils from contextlib import contextmanager from xml.etree import ElementTree +from cloudinit import url_helper from cloudinit import util LOG = logging.getLogger(__name__) @@ -55,14 +56,14 @@ class AzureEndpointHttpClient(object): if secure: headers = self.headers.copy() headers.update(self.extra_secure_headers) - return util.read_file_or_url(url, headers=headers) + return url_helper.read_file_or_url(url, headers=headers) def post(self, url, data=None, extra_headers=None): headers = self.headers if extra_headers is not None: headers = self.headers.copy() headers.update(extra_headers) - return util.read_file_or_url(url, data=data, headers=headers) + return url_helper.read_file_or_url(url, data=data, headers=headers) class GoalState(object): diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py index b778a3a7..113249d9 100644 --- a/cloudinit/tests/test_url_helper.py +++ b/cloudinit/tests/test_url_helper.py @@ -1,7 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.url_helper import oauth_headers +from cloudinit.url_helper import oauth_headers, read_file_or_url from cloudinit.tests.helpers import CiTestCase, mock, skipIf +from cloudinit import util + +import httpretty try: @@ -38,3 +41,26 @@ class TestOAuthHeaders(CiTestCase): 'url', 'consumer_key', 'token_key', 'token_secret', 'consumer_secret') self.assertEqual('url', return_value) + + +class TestReadFileOrUrl(CiTestCase): + def test_read_file_or_url_str_from_file(self): + """Test that str(result.contents) on file is text version of contents. + It should not be "b'data'", but just "'data'" """ + tmpf = self.tmp_path("myfile1") + data = b'This is my file content\n' + util.write_file(tmpf, data, omode="wb") + result = read_file_or_url("file://%s" % tmpf) + self.assertEqual(result.contents, data) + self.assertEqual(str(result), data.decode('utf-8')) + + @httpretty.activate + def test_read_file_or_url_str_from_url(self): + """Test that str(result.contents) on url is text version of contents. + It should not be "b'data'", but just "'data'" """ + url = 'http://hostname/path' + data = b'This is my url content\n' + httpretty.register_uri(httpretty.GET, url, data) + result = read_file_or_url(url) + self.assertEqual(result.contents, data) + self.assertEqual(str(result), data.decode('utf-8')) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 1de07b1c..8067979e 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -15,6 +15,7 @@ import six import time from email.utils import parsedate +from errno import ENOENT from functools import partial from itertools import count from requests import exceptions @@ -80,6 +81,32 @@ def combine_url(base, *add_ons): return url +def read_file_or_url(url, timeout=5, retries=10, + headers=None, data=None, sec_between=1, ssl_details=None, + headers_cb=None, exception_cb=None): + url = url.lstrip() + if url.startswith("/"): + url = "file://%s" % url + if url.lower().startswith("file://"): + if data: + LOG.warning("Unable to post data to file resource %s", url) + file_path = url[len("file://"):] + try: + with open(file_path, "rb") as fp: + contents = fp.read() + except IOError as e: + code = e.errno + if e.errno == ENOENT: + code = NOT_FOUND + raise UrlError(cause=e, code=code, headers=None, url=url) + return FileResponse(file_path, contents=contents) + else: + return readurl(url, timeout=timeout, retries=retries, headers=headers, + headers_cb=headers_cb, data=data, + sec_between=sec_between, ssl_details=ssl_details, + exception_cb=exception_cb) + + # Made to have same accessors as UrlResponse so that the # read_file_or_url can return this or that object and the # 'user' of those objects will not need to know the difference. @@ -96,7 +123,7 @@ class StringResponse(object): return True def __str__(self): - return self.contents + return self.contents.decode('utf-8') class FileResponse(StringResponse): diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index cc55daf8..8f6aba1e 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -19,7 +19,7 @@ import six from cloudinit import handlers from cloudinit import log as logging -from cloudinit.url_helper import UrlError +from cloudinit.url_helper import read_file_or_url, UrlError from cloudinit import util LOG = logging.getLogger(__name__) @@ -224,8 +224,8 @@ class UserDataProcessor(object): content = util.load_file(include_once_fn) else: try: - resp = util.read_file_or_url(include_url, - ssl_details=self.ssl_details) + resp = read_file_or_url(include_url, + ssl_details=self.ssl_details) if include_once_on and resp.ok(): util.write_file(include_once_fn, resp.contents, mode=0o600) diff --git a/cloudinit/util.py b/cloudinit/util.py index fc30018c..edfedc7d 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -857,37 +857,6 @@ def fetch_ssl_details(paths=None): return ssl_details -def read_file_or_url(url, timeout=5, retries=10, - headers=None, data=None, sec_between=1, ssl_details=None, - headers_cb=None, exception_cb=None): - url = url.lstrip() - if url.startswith("/"): - url = "file://%s" % url - if url.lower().startswith("file://"): - if data: - LOG.warning("Unable to post data to file resource %s", url) - file_path = url[len("file://"):] - try: - contents = load_file(file_path, decode=False) - except IOError as e: - code = e.errno - if e.errno == ENOENT: - code = url_helper.NOT_FOUND - raise url_helper.UrlError(cause=e, code=code, headers=None, - url=url) - return url_helper.FileResponse(file_path, contents=contents) - else: - return url_helper.readurl(url, - timeout=timeout, - retries=retries, - headers=headers, - headers_cb=headers_cb, - data=data, - sec_between=sec_between, - ssl_details=ssl_details, - exception_cb=exception_cb) - - def load_yaml(blob, default=None, allowed=(dict,)): loaded = default blob = decode_binary(blob) @@ -925,12 +894,14 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): ud_url = "%s%s%s" % (base, "user-data", ext) md_url = "%s%s%s" % (base, "meta-data", ext) - md_resp = read_file_or_url(md_url, timeout, retries, file_retries) + md_resp = url_helper.read_file_or_url(md_url, timeout, retries, + file_retries) md = None if md_resp.ok(): md = load_yaml(decode_binary(md_resp.contents), default={}) - ud_resp = read_file_or_url(ud_url, timeout, retries, file_retries) + ud_resp = url_helper.read_file_or_url(ud_url, timeout, retries, + file_retries) ud = None if ud_resp.ok(): ud = ud_resp.contents diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py index f1ab02e9..739bbebf 100644 --- a/tests/unittests/test__init__.py +++ b/tests/unittests/test__init__.py @@ -182,7 +182,7 @@ class TestCmdlineUrl(CiTestCase): self.assertEqual( ('url', 'http://example.com'), main.parse_cmdline_url(cmdline)) - @mock.patch('cloudinit.cmd.main.util.read_file_or_url') + @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url') def test_invalid_content(self, m_read): key = "cloud-config-url" url = 'http://example.com/foo' @@ -196,7 +196,7 @@ class TestCmdlineUrl(CiTestCase): self.assertIn(url, msg) self.assertFalse(os.path.exists(fpath)) - @mock.patch('cloudinit.cmd.main.util.read_file_or_url') + @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url') def test_valid_content(self, m_read): url = "http://example.com/foo" payload = b"#cloud-config\nmydata: foo\nbar: wark\n" @@ -210,7 +210,7 @@ class TestCmdlineUrl(CiTestCase): self.assertEqual(logging.INFO, lvl) self.assertIn(url, msg) - @mock.patch('cloudinit.cmd.main.util.read_file_or_url') + @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url') def test_no_key_found(self, m_read): cmdline = "ro mykey=http://example.com/foo root=foo" fpath = self.tmp_path("ccpath") @@ -221,7 +221,7 @@ class TestCmdlineUrl(CiTestCase): self.assertFalse(os.path.exists(fpath)) self.assertEqual(logging.DEBUG, lvl) - @mock.patch('cloudinit.cmd.main.util.read_file_or_url') + @mock.patch('cloudinit.cmd.main.url_helper.read_file_or_url') def test_exception_warns(self, m_read): url = "http://example.com/foo" cmdline = "ro cloud-config-url=%s root=LABEL=bar" % url diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index b42b073f..af9d3e1a 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -195,7 +195,7 @@ class TestAzureEndpointHttpClient(CiTestCase): self.addCleanup(patches.close) self.read_file_or_url = patches.enter_context( - mock.patch.object(azure_helper.util, 'read_file_or_url')) + mock.patch.object(azure_helper.url_helper, 'read_file_or_url')) def test_non_secure_get(self): client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) diff --git a/tests/unittests/test_handler/test_handler_apt_conf_v1.py b/tests/unittests/test_handler/test_handler_apt_conf_v1.py index 83f962a9..6a4b03ee 100644 --- a/tests/unittests/test_handler/test_handler_apt_conf_v1.py +++ b/tests/unittests/test_handler/test_handler_apt_conf_v1.py @@ -12,10 +12,6 @@ import shutil import tempfile -def load_tfile_or_url(*args, **kwargs): - return(util.decode_binary(util.read_file_or_url(*args, **kwargs).contents)) - - class TestAptProxyConfig(TestCase): def setUp(self): super(TestAptProxyConfig, self).setUp() @@ -36,7 +32,7 @@ class TestAptProxyConfig(TestCase): self.assertTrue(os.path.isfile(self.pfile)) self.assertFalse(os.path.isfile(self.cfile)) - contents = load_tfile_or_url(self.pfile) + contents = util.load_file(self.pfile) self.assertTrue(self._search_apt_config(contents, "http", "myproxy")) def test_apt_http_proxy_written(self): @@ -46,7 +42,7 @@ class TestAptProxyConfig(TestCase): self.assertTrue(os.path.isfile(self.pfile)) self.assertFalse(os.path.isfile(self.cfile)) - contents = load_tfile_or_url(self.pfile) + contents = util.load_file(self.pfile) self.assertTrue(self._search_apt_config(contents, "http", "myproxy")) def test_apt_all_proxy_written(self): @@ -64,7 +60,7 @@ class TestAptProxyConfig(TestCase): self.assertTrue(os.path.isfile(self.pfile)) self.assertFalse(os.path.isfile(self.cfile)) - contents = load_tfile_or_url(self.pfile) + contents = util.load_file(self.pfile) for ptype, pval in values.items(): self.assertTrue(self._search_apt_config(contents, ptype, pval)) @@ -80,7 +76,7 @@ class TestAptProxyConfig(TestCase): cc_apt_configure.apply_apt_config({'proxy': "foo"}, self.pfile, self.cfile) self.assertTrue(os.path.isfile(self.pfile)) - contents = load_tfile_or_url(self.pfile) + contents = util.load_file(self.pfile) self.assertTrue(self._search_apt_config(contents, "http", "foo")) def test_config_written(self): @@ -92,14 +88,14 @@ class TestAptProxyConfig(TestCase): self.assertTrue(os.path.isfile(self.cfile)) self.assertFalse(os.path.isfile(self.pfile)) - self.assertEqual(load_tfile_or_url(self.cfile), payload) + self.assertEqual(util.load_file(self.cfile), payload) def test_config_replaced(self): util.write_file(self.pfile, "content doesnt matter") cc_apt_configure.apply_apt_config({'conf': "foo"}, self.pfile, self.cfile) self.assertTrue(os.path.isfile(self.cfile)) - self.assertEqual(load_tfile_or_url(self.cfile), "foo") + self.assertEqual(util.load_file(self.cfile), "foo") def test_config_deleted(self): # if no 'conf' is provided, delete any previously written file diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py index d2b96f0b..23bd6e10 100644 --- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py +++ b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py @@ -64,13 +64,6 @@ deb-src http://archive.ubuntu.com/ubuntu/ fakerelease main restricted """) -def load_tfile_or_url(*args, **kwargs): - """load_tfile_or_url - load file and return content after decoding - """ - return util.decode_binary(util.read_file_or_url(*args, **kwargs).contents) - - class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): """TestAptSourceConfigSourceList Main Class to test sources list rendering diff --git a/tests/unittests/test_handler/test_handler_apt_source_v1.py b/tests/unittests/test_handler/test_handler_apt_source_v1.py index 46ca4ce4..a3132fbd 100644 --- a/tests/unittests/test_handler/test_handler_apt_source_v1.py +++ b/tests/unittests/test_handler/test_handler_apt_source_v1.py @@ -39,13 +39,6 @@ S0ORP6HXET3+jC8BMG4tBWCTK/XEZw== ADD_APT_REPO_MATCH = r"^[\w-]+:\w" -def load_tfile_or_url(*args, **kwargs): - """load_tfile_or_url - load file and return content after decoding - """ - return util.decode_binary(util.read_file_or_url(*args, **kwargs).contents) - - class FakeDistro(object): """Fake Distro helper object""" def update_package_sources(self): @@ -125,7 +118,7 @@ class TestAptSourceConfig(TestCase): self.assertTrue(os.path.isfile(filename)) - contents = load_tfile_or_url(filename) + contents = util.load_file(filename) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", "http://archive.ubuntu.com/ubuntu", "karmic-backports", @@ -157,13 +150,13 @@ class TestAptSourceConfig(TestCase): self.apt_src_basic(self.aptlistfile, cfg) # extra verify on two extra files of this test - contents = load_tfile_or_url(self.aptlistfile2) + contents = util.load_file(self.aptlistfile2) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", "http://archive.ubuntu.com/ubuntu", "precise-backports", "main universe multiverse restricted"), contents, flags=re.IGNORECASE)) - contents = load_tfile_or_url(self.aptlistfile3) + contents = util.load_file(self.aptlistfile3) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", "http://archive.ubuntu.com/ubuntu", "lucid-backports", @@ -220,7 +213,7 @@ class TestAptSourceConfig(TestCase): self.assertTrue(os.path.isfile(filename)) - contents = load_tfile_or_url(filename) + contents = util.load_file(filename) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", params['MIRROR'], params['RELEASE'], "multiverse"), @@ -241,12 +234,12 @@ class TestAptSourceConfig(TestCase): # extra verify on two extra files of this test params = self._get_default_params() - contents = load_tfile_or_url(self.aptlistfile2) + contents = util.load_file(self.aptlistfile2) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", params['MIRROR'], params['RELEASE'], "main"), contents, flags=re.IGNORECASE)) - contents = load_tfile_or_url(self.aptlistfile3) + contents = util.load_file(self.aptlistfile3) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", params['MIRROR'], params['RELEASE'], "universe"), @@ -296,7 +289,7 @@ class TestAptSourceConfig(TestCase): self.assertTrue(os.path.isfile(filename)) - contents = load_tfile_or_url(filename) + contents = util.load_file(filename) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", ('http://ppa.launchpad.net/smoser/' @@ -336,14 +329,14 @@ class TestAptSourceConfig(TestCase): 'filename': self.aptlistfile3} self.apt_src_keyid(self.aptlistfile, [cfg1, cfg2, cfg3], 3) - contents = load_tfile_or_url(self.aptlistfile2) + contents = util.load_file(self.aptlistfile2) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", ('http://ppa.launchpad.net/smoser/' 'cloud-init-test/ubuntu'), "xenial", "universe"), contents, flags=re.IGNORECASE)) - contents = load_tfile_or_url(self.aptlistfile3) + contents = util.load_file(self.aptlistfile3) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", ('http://ppa.launchpad.net/smoser/' @@ -375,7 +368,7 @@ class TestAptSourceConfig(TestCase): self.assertTrue(os.path.isfile(filename)) - contents = load_tfile_or_url(filename) + contents = util.load_file(filename) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", ('http://ppa.launchpad.net/smoser/' diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py index e486862d..7a64c230 100644 --- a/tests/unittests/test_handler/test_handler_apt_source_v3.py +++ b/tests/unittests/test_handler/test_handler_apt_source_v3.py @@ -49,13 +49,6 @@ ADD_APT_REPO_MATCH = r"^[\w-]+:\w" TARGET = None -def load_tfile(*args, **kwargs): - """load_tfile_or_url - load file and return content after decoding - """ - return util.decode_binary(util.read_file_or_url(*args, **kwargs).contents) - - class TestAptSourceConfig(t_help.FilesystemMockingTestCase): """TestAptSourceConfig Main Class to test apt configs @@ -119,7 +112,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): self.assertTrue(os.path.isfile(filename)) - contents = load_tfile(filename) + contents = util.load_file(filename) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", "http://test.ubuntu.com/ubuntu", "karmic-backports", @@ -151,13 +144,13 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): self._apt_src_basic(self.aptlistfile, cfg) # extra verify on two extra files of this test - contents = load_tfile(self.aptlistfile2) + contents = util.load_file(self.aptlistfile2) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", "http://test.ubuntu.com/ubuntu", "precise-backports", "main universe multiverse restricted"), contents, flags=re.IGNORECASE)) - contents = load_tfile(self.aptlistfile3) + contents = util.load_file(self.aptlistfile3) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", "http://test.ubuntu.com/ubuntu", "lucid-backports", @@ -174,7 +167,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): self.assertTrue(os.path.isfile(filename)) - contents = load_tfile(filename) + contents = util.load_file(filename) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", params['MIRROR'], params['RELEASE'], "multiverse"), @@ -201,12 +194,12 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): # extra verify on two extra files of this test params = self._get_default_params() - contents = load_tfile(self.aptlistfile2) + contents = util.load_file(self.aptlistfile2) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", params['MIRROR'], params['RELEASE'], "main"), contents, flags=re.IGNORECASE)) - contents = load_tfile(self.aptlistfile3) + contents = util.load_file(self.aptlistfile3) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", params['MIRROR'], params['RELEASE'], "universe"), @@ -240,7 +233,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): self.assertTrue(os.path.isfile(filename)) - contents = load_tfile(filename) + contents = util.load_file(filename) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", ('http://ppa.launchpad.net/smoser/' @@ -277,14 +270,14 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): 'keyid': "03683F77"}} self._apt_src_keyid(self.aptlistfile, cfg, 3) - contents = load_tfile(self.aptlistfile2) + contents = util.load_file(self.aptlistfile2) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", ('http://ppa.launchpad.net/smoser/' 'cloud-init-test/ubuntu'), "xenial", "universe"), contents, flags=re.IGNORECASE)) - contents = load_tfile(self.aptlistfile3) + contents = util.load_file(self.aptlistfile3) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", ('http://ppa.launchpad.net/smoser/' @@ -310,7 +303,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): self.assertTrue(os.path.isfile(self.aptlistfile)) - contents = load_tfile(self.aptlistfile) + contents = util.load_file(self.aptlistfile) self.assertTrue(re.search(r"%s %s %s %s\n" % ("deb", ('http://ppa.launchpad.net/smoser/' diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py index 6da4564e..6fe3659d 100644 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ b/tests/unittests/test_handler/test_handler_ntp.py @@ -155,9 +155,9 @@ class TestNtp(FilesystemMockingTestCase): path=confpath, template_fn=template_fn, template=None) - content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( - "servers []\npools ['10.0.0.1', '10.0.0.2']\n", content.decode()) + "servers []\npools ['10.0.0.1', '10.0.0.2']\n", + util.load_file(confpath)) def test_write_ntp_config_template_defaults_pools_w_empty_lists(self): """write_ntp_config_template defaults pools servers upon empty config. @@ -176,10 +176,9 @@ class TestNtp(FilesystemMockingTestCase): path=confpath, template_fn=template_fn, template=None) - content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( "servers []\npools {0}\n".format(pools), - content.decode()) + util.load_file(confpath)) def test_defaults_pools_empty_lists_sles(self): """write_ntp_config_template defaults opensuse pools upon empty config. @@ -196,11 +195,11 @@ class TestNtp(FilesystemMockingTestCase): path=confpath, template_fn=template_fn, template=None) - content = util.read_file_or_url('file://' + confpath).contents for pool in default_pools: self.assertIn('opensuse', pool) self.assertEqual( - "servers []\npools {0}\n".format(default_pools), content.decode()) + "servers []\npools {0}\n".format(default_pools), + util.load_file(confpath)) self.assertIn( "Adding distro default ntp pool servers: {0}".format( ",".join(default_pools)), @@ -217,10 +216,9 @@ class TestNtp(FilesystemMockingTestCase): path=confpath, template_fn=template_fn, template=None) - content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( "[Time]\nNTP=%s %s \n" % (" ".join(servers), " ".join(pools)), - content.decode()) + util.load_file(confpath)) def test_distro_ntp_client_configs(self): """Test we have updated ntp client configs on different distros""" @@ -267,17 +265,17 @@ class TestNtp(FilesystemMockingTestCase): cc_ntp.write_ntp_config_template(distro, servers=servers, pools=pools, path=confpath, template_fn=template_fn) - content = util.read_file_or_url('file://' + confpath).contents + content = util.load_file(confpath) if client in ['ntp', 'chrony']: expected_servers = '\n'.join([ 'server {0} iburst'.format(srv) for srv in servers]) print('distro=%s client=%s' % (distro, client)) - self.assertIn(expected_servers, content.decode('utf-8'), + self.assertIn(expected_servers, content, ('failed to render {0} conf' ' for distro:{1}'.format(client, distro))) expected_pools = '\n'.join([ 'pool {0} iburst'.format(pool) for pool in pools]) - self.assertIn(expected_pools, content.decode('utf-8'), + self.assertIn(expected_pools, content, ('failed to render {0} conf' ' for distro:{1}'.format(client, distro))) elif client == 'systemd-timesyncd': @@ -286,7 +284,7 @@ class TestNtp(FilesystemMockingTestCase): "# See timesyncd.conf(5) for details.\n\n" + "[Time]\nNTP=%s %s \n" % (" ".join(servers), " ".join(pools))) - self.assertEqual(expected_content, content.decode()) + self.assertEqual(expected_content, content) def test_no_ntpcfg_does_nothing(self): """When no ntp section is defined handler logs a warning and noops.""" @@ -308,10 +306,10 @@ class TestNtp(FilesystemMockingTestCase): confpath = ntpconfig['confpath'] m_select.return_value = ntpconfig cc_ntp.handle('cc_ntp', valid_empty_config, mycloud, None, []) - content = util.read_file_or_url('file://' + confpath).contents pools = cc_ntp.generate_server_names(mycloud.distro.name) self.assertEqual( - "servers []\npools {0}\n".format(pools), content.decode()) + "servers []\npools {0}\n".format(pools), + util.load_file(confpath)) self.assertNotIn('Invalid config:', self.logs.getvalue()) @skipUnlessJsonSchema() @@ -333,9 +331,8 @@ class TestNtp(FilesystemMockingTestCase): "Invalid config:\nntp.pools.0: 123 is not of type 'string'\n" "ntp.servers.1: None is not of type 'string'", self.logs.getvalue()) - content = util.read_file_or_url('file://' + confpath).contents self.assertEqual("servers ['valid', None]\npools [123]\n", - content.decode()) + util.load_file(confpath)) @skipUnlessJsonSchema() @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') @@ -357,9 +354,8 @@ class TestNtp(FilesystemMockingTestCase): "Invalid config:\nntp.pools: 123 is not of type 'array'\n" "ntp.servers: 'non-array' is not of type 'array'", self.logs.getvalue()) - content = util.read_file_or_url('file://' + confpath).contents self.assertEqual("servers non-array\npools 123\n", - content.decode()) + util.load_file(confpath)) @skipUnlessJsonSchema() @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') @@ -381,10 +377,9 @@ class TestNtp(FilesystemMockingTestCase): "Invalid config:\nntp: Additional properties are not allowed " "('invalidkey' was unexpected)", self.logs.getvalue()) - content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( "servers []\npools ['0.mycompany.pool.ntp.org']\n", - content.decode()) + util.load_file(confpath)) @skipUnlessJsonSchema() @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') @@ -407,10 +402,10 @@ class TestNtp(FilesystemMockingTestCase): " has non-unique elements\nntp.servers: " "['10.0.0.1', '10.0.0.1'] has non-unique elements", self.logs.getvalue()) - content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( "servers ['10.0.0.1', '10.0.0.1']\n" - "pools ['0.mypool.org', '0.mypool.org']\n", content.decode()) + "pools ['0.mypool.org', '0.mypool.org']\n", + util.load_file(confpath)) @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') def test_ntp_handler_timesyncd(self, m_select): @@ -426,10 +421,9 @@ class TestNtp(FilesystemMockingTestCase): confpath = ntpconfig['confpath'] m_select.return_value = ntpconfig cc_ntp.handle('cc_ntp', cfg, mycloud, None, []) - content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( "[Time]\nNTP=192.168.2.1 192.168.2.2 0.mypool.org \n", - content.decode()) + util.load_file(confpath)) @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') def test_ntp_handler_enabled_false(self, m_select): @@ -466,10 +460,9 @@ class TestNtp(FilesystemMockingTestCase): m_util.subp.assert_called_with( ['systemctl', 'reload-or-restart', service_name], capture=True) - content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( "servers []\npools {0}\n".format(pools), - content.decode()) + util.load_file(confpath)) def test_opensuse_picks_chrony(self): """Test opensuse picks chrony or ntp on certain distro versions""" @@ -638,10 +631,9 @@ class TestNtp(FilesystemMockingTestCase): mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' with mock.patch(mock_path, self.new_root): cc_ntp.handle('notimportant', cfg, mycloud, None, None) - content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( "servers []\npools ['mypool.org']\n%s" % custom, - content.decode()) + util.load_file(confpath)) @mock.patch('cloudinit.config.cc_ntp.supplemental_schema_validation') @mock.patch('cloudinit.config.cc_ntp.reload_ntp') @@ -675,10 +667,9 @@ class TestNtp(FilesystemMockingTestCase): with mock.patch(mock_path, self.new_root): cc_ntp.handle('notimportant', {'ntp': cfg}, mycloud, None, None) - content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( "servers []\npools ['mypool.org']\n%s" % custom, - content.decode()) + util.load_file(confpath)) m_schema.assert_called_with(expected_merged_cfg) -- cgit v1.2.3 From 327af4a7e64a7e52f2bab7c6d80b79a2474d93eb Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Tue, 22 May 2018 09:58:07 -0400 Subject: tests: enable Ubuntu Cosmic in integration tests --- tests/cloud_tests/releases.yaml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'tests') diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml index c7dcbe83..defae02b 100644 --- a/tests/cloud_tests/releases.yaml +++ b/tests/cloud_tests/releases.yaml @@ -129,6 +129,22 @@ features: releases: # UBUNTU ================================================================= + cosmic: + # EOL: Jul 2019 + default: + enabled: true + release: cosmic + version: 18.10 + os: ubuntu + feature_groups: + - base + - debian_base + - ubuntu_specific + lxd: + sstreams_server: https://cloud-images.ubuntu.com/daily + alias: cosmic + setup_overrides: null + override_templates: false bionic: # EOL: Apr 2023 default: -- cgit v1.2.3 From b4ae0e1fb8a48a83ea325cf032eb1acb196ee31c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 22 May 2018 10:07:59 -0400 Subject: ds-identify: ensure that we have certain tokens in PATH. SuSE builds were not getting a PATH set in generator's environment. This may seem like mis-configuration on the system, but caused ds-identify to fail to find blkid (or any other program). The change here just ensures that we get /sbin /usr/sbin /bin /usr/bin into the PATH when main is run. LP: #1771382 --- tests/unittests/test_ds_identify.py | 23 ++++++++++++++++++++++- tools/ds-identify | 11 +++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) (limited to 'tests') diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 7f12be59..64d9f9f8 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -175,7 +175,9 @@ class DsIdentifyBase(CiTestCase): def _call_via_dict(self, data, rootd=None, **kwargs): # return output of self.call with a dict input like VALID_CFG[item] xwargs = {'rootd': rootd} - for k in ('mocks', 'args', 'policy_dmi', 'policy_no_dmi', 'files'): + passthrough = ('mocks', 'func', 'args', 'policy_dmi', + 'policy_no_dmi', 'files') + for k in passthrough: if k in data: xwargs[k] = data[k] if k in kwargs: @@ -535,6 +537,25 @@ class TestDsIdentify(DsIdentifyBase): del mycfg['files'][ds_smartos.METADATA_SOCKFILE] self._check_via_dict(mycfg, rc=RC_NOT_FOUND, policy_dmi="disabled") + def test_path_env_gets_set_from_main(self): + """PATH environment should always have some tokens when main is run. + + We explicitly call main as we want to ensure it updates PATH.""" + cust = copy.deepcopy(VALID_CFG['NoCloud']) + rootd = self.tmp_dir() + mpp = 'main-printpath' + pre = "MYPATH=" + cust['files'][mpp] = ( + 'PATH="/mycust/path"; main; r=$?; echo ' + pre + '$PATH; exit $r;') + ret = self._check_via_dict( + cust, RC_FOUND, + func=".", args=[os.path.join(rootd, mpp)], rootd=rootd) + line = [l for l in ret.stdout.splitlines() if l.startswith(pre)][0] + toks = line.replace(pre, "").split(":") + expected = ["/sbin", "/bin", "/usr/sbin", "/usr/bin", "/mycust/path"] + self.assertEqual(expected, [p for p in expected if p in toks], + "path did not have expected tokens") + class TestIsIBMProvisioning(DsIdentifyBase): """Test the is_ibm_provisioning method in ds-identify.""" diff --git a/tools/ds-identify b/tools/ds-identify index dc7ac3a7..ce0477a5 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -187,6 +187,16 @@ block_dev_with_label() { return 0 } +ensure_sane_path() { + local t + for t in /sbin /usr/sbin /bin /usr/bin; do + case ":$PATH:" in + *:$t:*|*:$t/:*) continue;; + esac + PATH="${PATH:+${PATH}:}$t" + done +} + read_fs_info() { cached "${DI_BLKID_OUTPUT}" && return 0 # do not rely on links in /dev/disk which might not be present yet. @@ -1464,6 +1474,7 @@ _main() { main() { local ret="" + ensure_sane_path [ -d "$PATH_RUN_CI" ] || mkdir -p "$PATH_RUN_CI" if [ "${1:+$1}" != "--force" ] && [ -f "$PATH_RUN_CI_CFG" ] && [ -f "$PATH_RUN_DI_RESULT" ]; then -- cgit v1.2.3 From 529d48f69d3784b2314397f5eab9d750ab03cf6a Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Tue, 22 May 2018 10:55:04 -0400 Subject: cc_mounts: Do not add devices to fstab that are already present. Do not add new entries to /etc/fstab for devices that already have an existing fstab entry. Resolves: rhbz#1542578 --- cloudinit/config/cc_mounts.py | 53 +++++++---- cloudinit/tests/helpers.py | 4 +- .../unittests/test_handler/test_handler_mounts.py | 104 ++++++++++++++++++++- 3 files changed, 136 insertions(+), 25 deletions(-) (limited to 'tests') diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index b3b25c3b..eca6ea3f 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -76,6 +76,7 @@ DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$" DEVICE_NAME_RE = re.compile(DEVICE_NAME_FILTER) WS = re.compile("[%s]+" % (whitespace)) FSTAB_PATH = "/etc/fstab" +MNT_COMMENT = "comment=cloudconfig" LOG = logging.getLogger(__name__) @@ -327,6 +328,22 @@ def handle(_name, cfg, cloud, log, _args): LOG.debug("mounts configuration is %s", cfgmnt) + fstab_lines = [] + fstab_devs = {} + fstab_removed = [] + + for line in util.load_file(FSTAB_PATH).splitlines(): + if MNT_COMMENT in line: + fstab_removed.append(line) + continue + + try: + toks = WS.split(line) + except Exception: + pass + fstab_devs[toks[0]] = line + fstab_lines.append(line) + for i in range(len(cfgmnt)): # skip something that wasn't a list if not isinstance(cfgmnt[i], list): @@ -336,12 +353,17 @@ def handle(_name, cfg, cloud, log, _args): start = str(cfgmnt[i][0]) sanitized = sanitize_devname(start, cloud.device_name_to_device, log) + if sanitized != start: + log.debug("changed %s => %s" % (start, sanitized)) + if sanitized is None: log.debug("Ignoring nonexistent named mount %s", start) continue + elif sanitized in fstab_devs: + log.info("Device %s already defined in fstab: %s", + sanitized, fstab_devs[sanitized]) + continue - if sanitized != start: - log.debug("changed %s => %s" % (start, sanitized)) cfgmnt[i][0] = sanitized # in case the user did not quote a field (likely fs-freq, fs_passno) @@ -373,11 +395,17 @@ def handle(_name, cfg, cloud, log, _args): for defmnt in defmnts: start = defmnt[0] sanitized = sanitize_devname(start, cloud.device_name_to_device, log) + if sanitized != start: + log.debug("changed default device %s => %s" % (start, sanitized)) + if sanitized is None: log.debug("Ignoring nonexistent default named mount %s", start) continue - if sanitized != start: - log.debug("changed default device %s => %s" % (start, sanitized)) + elif sanitized in fstab_devs: + log.debug("Device %s already defined in fstab: %s", + sanitized, fstab_devs[sanitized]) + continue + defmnt[0] = sanitized cfgmnt_has = False @@ -409,31 +437,18 @@ def handle(_name, cfg, cloud, log, _args): log.debug("No modifications to fstab needed") return - comment = "comment=cloudconfig" cc_lines = [] needswap = False dirs = [] for line in actlist: # write 'comment' in the fs_mntops, entry, claiming this - line[3] = "%s,%s" % (line[3], comment) + line[3] = "%s,%s" % (line[3], MNT_COMMENT) if line[2] == "swap": needswap = True if line[1].startswith("/"): dirs.append(line[1]) cc_lines.append('\t'.join(line)) - fstab_lines = [] - removed = [] - for line in util.load_file(FSTAB_PATH).splitlines(): - try: - toks = WS.split(line) - if toks[3].find(comment) != -1: - removed.append(line) - continue - except Exception: - pass - fstab_lines.append(line) - for d in dirs: try: util.ensure_dir(d) @@ -441,7 +456,7 @@ def handle(_name, cfg, cloud, log, _args): util.logexc(log, "Failed to make '%s' config-mount", d) sadds = [WS.sub(" ", n) for n in cc_lines] - sdrops = [WS.sub(" ", n) for n in removed] + sdrops = [WS.sub(" ", n) for n in fstab_removed] sops = (["- " + drop for drop in sdrops if drop not in sadds] + ["+ " + add for add in sadds if add not in sdrops]) diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 117a9cfe..07059fd4 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -111,12 +111,12 @@ class TestCase(unittest2.TestCase): super(TestCase, self).setUp() self.reset_global_state() - def add_patch(self, target, attr, **kwargs): + def add_patch(self, target, attr, *args, **kwargs): """Patches specified target object and sets it as attr on test instance also schedules cleanup""" if 'autospec' not in kwargs: kwargs['autospec'] = True - m = mock.patch(target, **kwargs) + m = mock.patch(target, *args, **kwargs) p = m.start() self.addCleanup(m.stop) setattr(self, attr, p) diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/test_handler/test_handler_mounts.py index fe492d4b..8fea6c2a 100644 --- a/tests/unittests/test_handler/test_handler_mounts.py +++ b/tests/unittests/test_handler/test_handler_mounts.py @@ -1,8 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. import os.path -import shutil -import tempfile from cloudinit.config import cc_mounts @@ -18,8 +16,7 @@ class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase): def setUp(self): super(TestSanitizeDevname, self).setUp() - self.new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.new_root) + self.new_root = self.tmp_dir() self.patchOS(self.new_root) def _touch(self, path): @@ -134,4 +131,103 @@ class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase): cc_mounts.sanitize_devname( 'ephemeral0.1', lambda x: disk_path, mock.Mock())) + +class TestFstabHandling(test_helpers.FilesystemMockingTestCase): + + swap_path = '/dev/sdb1' + + def setUp(self): + super(TestFstabHandling, self).setUp() + self.new_root = self.tmp_dir() + self.patchOS(self.new_root) + + self.fstab_path = os.path.join(self.new_root, 'etc/fstab') + self._makedirs('/etc') + + self.add_patch('cloudinit.config.cc_mounts.FSTAB_PATH', + 'mock_fstab_path', + self.fstab_path, + autospec=False) + + self.add_patch('cloudinit.config.cc_mounts._is_block_device', + 'mock_is_block_device', + return_value=True) + + self.add_patch('cloudinit.config.cc_mounts.util.subp', + 'mock_util_subp') + + self.mock_cloud = mock.Mock() + self.mock_log = mock.Mock() + self.mock_cloud.device_name_to_device = self.device_name_to_device + + def _makedirs(self, directory): + directory = os.path.join(self.new_root, directory.lstrip('/')) + if not os.path.exists(directory): + os.makedirs(directory) + + def device_name_to_device(self, path): + if path == 'swap': + return self.swap_path + else: + dev = None + + return dev + + def test_fstab_no_swap_device(self): + '''Ensure that cloud-init adds a discovered swap partition + to /etc/fstab.''' + + fstab_original_content = '' + fstab_expected_content = ( + '%s\tnone\tswap\tsw,comment=cloudconfig\t' + '0\t0\n' % (self.swap_path,) + ) + + with open(cc_mounts.FSTAB_PATH, 'w') as fd: + fd.write(fstab_original_content) + + cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, []) + + with open(cc_mounts.FSTAB_PATH, 'r') as fd: + fstab_new_content = fd.read() + self.assertEqual(fstab_expected_content, fstab_new_content) + + def test_fstab_same_swap_device_already_configured(self): + '''Ensure that cloud-init will not add a swap device if the same + device already exists in /etc/fstab.''' + + fstab_original_content = '%s swap swap defaults 0 0\n' % ( + self.swap_path,) + fstab_expected_content = fstab_original_content + + with open(cc_mounts.FSTAB_PATH, 'w') as fd: + fd.write(fstab_original_content) + + cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, []) + + with open(cc_mounts.FSTAB_PATH, 'r') as fd: + fstab_new_content = fd.read() + self.assertEqual(fstab_expected_content, fstab_new_content) + + def test_fstab_alternate_swap_device_already_configured(self): + '''Ensure that cloud-init will add a discovered swap device to + /etc/fstab even when there exists a swap definition on another + device.''' + + fstab_original_content = '/dev/sdc1 swap swap defaults 0 0\n' + fstab_expected_content = ( + fstab_original_content + + '%s\tnone\tswap\tsw,comment=cloudconfig\t' + '0\t0\n' % (self.swap_path,) + ) + + with open(cc_mounts.FSTAB_PATH, 'w') as fd: + fd.write(fstab_original_content) + + cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, []) + + with open(cc_mounts.FSTAB_PATH, 'r') as fd: + fstab_new_content = fd.read() + self.assertEqual(fstab_expected_content, fstab_new_content) + # vi: ts=4 expandtab -- cgit v1.2.3 From 5446c788160412189200c6cc688b14c9f9071943 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 22 May 2018 16:06:41 -0400 Subject: Update version.version_string to contain packaged version. This modifies version.version_string to support having the package build write the *packaged* version in with a easy replace. Then, when cloud-init reports its version it will include the full packaged version. Also modified here are upstream package build files to get that done. Note part of the trickery in packages/debian/rules.in was to avoid the 'basic' templater consuming the '$variable' variable names. LP: #1770712 --- cloudinit/tests/test_version.py | 31 +++++++++++++++++++++++++++++++ cloudinit/version.py | 4 ++++ packages/debian/rules.in | 2 ++ tests/unittests/test_version.py | 14 -------------- 4 files changed, 37 insertions(+), 14 deletions(-) create mode 100644 cloudinit/tests/test_version.py delete mode 100644 tests/unittests/test_version.py (limited to 'tests') diff --git a/cloudinit/tests/test_version.py b/cloudinit/tests/test_version.py new file mode 100644 index 00000000..a96c2a47 --- /dev/null +++ b/cloudinit/tests/test_version.py @@ -0,0 +1,31 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.tests.helpers import CiTestCase +from cloudinit import version + +import mock + + +class TestExportsFeatures(CiTestCase): + def test_has_network_config_v1(self): + self.assertIn('NETWORK_CONFIG_V1', version.FEATURES) + + def test_has_network_config_v2(self): + self.assertIn('NETWORK_CONFIG_V2', version.FEATURES) + + +class TestVersionString(CiTestCase): + @mock.patch("cloudinit.version._PACKAGED_VERSION", + "17.2-3-gb05b9972-0ubuntu1") + def test_package_version_respected(self): + """If _PACKAGED_VERSION is filled in, then it should be returned.""" + self.assertEqual("17.2-3-gb05b9972-0ubuntu1", version.version_string()) + + @mock.patch("cloudinit.version._PACKAGED_VERSION", "@@PACKAGED_VERSION@@") + @mock.patch("cloudinit.version.__VERSION__", "17.2") + def test_package_version_skipped(self): + """If _PACKAGED_VERSION is not modified, then return __VERSION__.""" + self.assertEqual("17.2", version.version_string()) + + +# vi: ts=4 expandtab diff --git a/cloudinit/version.py b/cloudinit/version.py index ccd0f84e..ce3b8c1e 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -5,6 +5,7 @@ # This file is part of cloud-init. See LICENSE file for license information. __VERSION__ = "18.2" +_PACKAGED_VERSION = '@@PACKAGED_VERSION@@' FEATURES = [ # supports network config version 1 @@ -15,6 +16,9 @@ FEATURES = [ def version_string(): + """Extract a version string from cloud-init.""" + if not _PACKAGED_VERSION.startswith('@@'): + return _PACKAGED_VERSION return __VERSION__ # vi: ts=4 expandtab diff --git a/packages/debian/rules.in b/packages/debian/rules.in index 4aa907e3..e542c7f1 100755 --- a/packages/debian/rules.in +++ b/packages/debian/rules.in @@ -3,6 +3,7 @@ INIT_SYSTEM ?= systemd export PYBUILD_INSTALL_ARGS=--init-system=$(INIT_SYSTEM) PYVER ?= python${pyver} +DEB_VERSION := $(shell dpkg-parsechangelog --show-field=Version) %: dh $@ --with $(PYVER),systemd --buildsystem pybuild @@ -14,6 +15,7 @@ override_dh_install: cp tools/21-cloudinit.conf debian/cloud-init/etc/rsyslog.d/21-cloudinit.conf install -D ./tools/Z99-cloud-locale-test.sh debian/cloud-init/etc/profile.d/Z99-cloud-locale-test.sh install -D ./tools/Z99-cloudinit-warnings.sh debian/cloud-init/etc/profile.d/Z99-cloudinit-warnings.sh + flist=$$(find $(CURDIR)/debian/ -type f -name version.py) && sed -i 's,@@PACKAGED_VERSION@@,$(DEB_VERSION),' $${flist:-did-not-find-version-py-for-replacement} override_dh_auto_test: ifeq (,$(findstring nocheck,$(DEB_BUILD_OPTIONS))) diff --git a/tests/unittests/test_version.py b/tests/unittests/test_version.py deleted file mode 100644 index d012f69d..00000000 --- a/tests/unittests/test_version.py +++ /dev/null @@ -1,14 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.tests.helpers import CiTestCase -from cloudinit import version - - -class TestExportsFeatures(CiTestCase): - def test_has_network_config_v1(self): - self.assertIn('NETWORK_CONFIG_V1', version.FEATURES) - - def test_has_network_config_v2(self): - self.assertIn('NETWORK_CONFIG_V2', version.FEATURES) - -# vi: ts=4 expandtab -- cgit v1.2.3 From aa4eeb80839382117e1813e396dc53aa634fd7ba Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Wed, 23 May 2018 15:45:39 -0400 Subject: Azure: Ignore NTFS mount errors when checking ephemeral drive The Azure data source provides a method to check whether a NTFS partition on the ephemeral disk is safe for reformatting to ext4. The method checks to see if there are customer data files on the disk. However, mounting the partition fails on systems that do not have the capability of mounting NTFS. Note that in this case, it is also very unlikely that the NTFS partition would have been used by the system (since it can't mount it). The only case would be where an update to the system removed the capability to mount NTFS, the likelihood of which is also very small. This change allows the reformatting of the ephemeral disk to ext4 on systems where mounting NTFS is not supported. --- cloudinit/sources/DataSourceAzure.py | 63 ++++++++++++---- cloudinit/util.py | 5 +- tests/unittests/test_datasource/test_azure.py | 105 +++++++++++++++++++++----- 3 files changed, 138 insertions(+), 35 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 1b03d460..7007d9ea 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -208,6 +208,7 @@ BUILTIN_CLOUD_CONFIG = { } DS_CFG_PATH = ['datasource', DS_NAME] +DS_CFG_KEY_PRESERVE_NTFS = 'never_destroy_ntfs' DEF_EPHEMERAL_LABEL = 'Temporary Storage' # The redacted password fails to meet password complexity requirements @@ -394,14 +395,9 @@ class DataSourceAzure(sources.DataSource): if found == ddir: LOG.debug("using files cached in %s", ddir) - # azure / hyper-v provides random data here - # TODO. find the seed on FreeBSD platform - # now update ds_cfg to reflect contents pass in config - if not util.is_FreeBSD(): - seed = util.load_file("/sys/firmware/acpi/tables/OEM0", - quiet=True, decode=False) - if seed: - self.metadata['random_seed'] = seed + seed = _get_random_seed() + if seed: + self.metadata['random_seed'] = seed user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) @@ -539,7 +535,9 @@ class DataSourceAzure(sources.DataSource): return fabric_data def activate(self, cfg, is_new_instance): - address_ephemeral_resize(is_new_instance=is_new_instance) + address_ephemeral_resize(is_new_instance=is_new_instance, + preserve_ntfs=self.ds_cfg.get( + DS_CFG_KEY_PRESERVE_NTFS, False)) return @property @@ -583,17 +581,29 @@ def _has_ntfs_filesystem(devpath): return os.path.realpath(devpath) in ntfs_devices -def can_dev_be_reformatted(devpath): - """Determine if block device devpath is newly formatted ephemeral. +def can_dev_be_reformatted(devpath, preserve_ntfs): + """Determine if the ephemeral drive at devpath should be reformatted. - A newly formatted disk will: + A fresh ephemeral disk is formatted by Azure and will: a.) have a partition table (dos or gpt) b.) have 1 partition that is ntfs formatted, or have 2 partitions with the second partition ntfs formatted. (larger instances with >2TB ephemeral disk have gpt, and will have a microsoft reserved partition as part 1. LP: #1686514) c.) the ntfs partition will have no files other than possibly - 'dataloss_warning_readme.txt'""" + 'dataloss_warning_readme.txt' + + User can indicate that NTFS should never be destroyed by setting + DS_CFG_KEY_PRESERVE_NTFS in dscfg. + If data is found on NTFS, user is warned to set DS_CFG_KEY_PRESERVE_NTFS + to make sure cloud-init does not accidentally wipe their data. + If cloud-init cannot mount the disk to check for data, destruction + will be allowed, unless the dscfg key is set.""" + if preserve_ntfs: + msg = ('config says to never destroy NTFS (%s.%s), skipping checks' % + (".".join(DS_CFG_PATH), DS_CFG_KEY_PRESERVE_NTFS)) + return False, msg + if not os.path.exists(devpath): return False, 'device %s does not exist' % devpath @@ -626,18 +636,27 @@ def can_dev_be_reformatted(devpath): bmsg = ('partition %s (%s) on device %s was ntfs formatted' % (cand_part, cand_path, devpath)) try: - file_count = util.mount_cb(cand_path, count_files) + file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", + update_env_for_mount={'LANG': 'C'}) except util.MountFailedError as e: + if "mount: unknown filesystem type 'ntfs'" in str(e): + return True, (bmsg + ' but this system cannot mount NTFS,' + ' assuming there are no important files.' + ' Formatting allowed.') return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) if file_count != 0: + LOG.warning("it looks like you're using NTFS on the ephemeral disk, " + 'to ensure that filesystem does not get wiped, set ' + '%s.%s in config', '.'.join(DS_CFG_PATH), + DS_CFG_KEY_PRESERVE_NTFS) return False, bmsg + ' but had %d files on it.' % file_count return True, bmsg + ' and had no important files. Safe for reformatting.' def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, - is_new_instance=False): + is_new_instance=False, preserve_ntfs=False): # wait for ephemeral disk to come up naplen = .2 missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen, @@ -653,7 +672,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, if is_new_instance: result, msg = (True, "First instance boot.") else: - result, msg = can_dev_be_reformatted(devpath) + result, msg = can_dev_be_reformatted(devpath, preserve_ntfs) LOG.debug("reformattable=%s: %s", result, msg) if not result: @@ -967,6 +986,18 @@ def _check_freebsd_cdrom(cdrom_dev): return False +def _get_random_seed(): + """Return content random seed file if available, otherwise, + return None.""" + # azure / hyper-v provides random data here + # TODO. find the seed on FreeBSD platform + # now update ds_cfg to reflect contents pass in config + if util.is_FreeBSD(): + return None + return util.load_file("/sys/firmware/acpi/tables/OEM0", + quiet=True, decode=False) + + def list_possible_azure_ds_devs(): devlist = [] if util.is_FreeBSD(): diff --git a/cloudinit/util.py b/cloudinit/util.py index edfedc7d..653ed6ea 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1581,7 +1581,8 @@ def mounts(): return mounted -def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True): +def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True, + update_env_for_mount=None): """ Mount the device, call method 'callback' passing the directory in which it was mounted, then unmount. Return whatever 'callback' @@ -1643,7 +1644,7 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True): mountcmd.extend(['-t', mtype]) mountcmd.append(device) mountcmd.append(tmpd) - subp(mountcmd) + subp(mountcmd, update_env=update_env_for_mount) umount = tmpd # This forces it to be unmounted (when set) mountpoint = tmpd break diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 26e8d7d3..e82716eb 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1,10 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit import helpers -from cloudinit.util import b64e, decode_binary, load_file, write_file from cloudinit.sources import DataSourceAzure as dsaz -from cloudinit.util import find_freebsd_part -from cloudinit.util import get_path_dev_freebsd +from cloudinit.util import (b64e, decode_binary, load_file, write_file, + find_freebsd_part, get_path_dev_freebsd, + MountFailedError) from cloudinit.version import version_string as vs from cloudinit.tests.helpers import (CiTestCase, TestCase, populate_dir, mock, ExitStack, PY26, SkipTest) @@ -95,6 +95,8 @@ class TestAzureDataSource(CiTestCase): self.patches = ExitStack() self.addCleanup(self.patches.close) + self.patches.enter_context(mock.patch.object(dsaz, '_get_random_seed')) + super(TestAzureDataSource, self).setUp() def apply_patches(self, patches): @@ -335,6 +337,18 @@ fdescfs /dev/fd fdescfs rw 0 0 self.assertTrue(ret) self.assertEqual(data['agent_invoked'], '_COMMAND') + def test_sys_cfg_set_never_destroy_ntfs(self): + sys_cfg = {'datasource': {'Azure': { + 'never_destroy_ntfs': 'user-supplied-value'}}} + data = {'ovfcontent': construct_valid_ovf_env(data={}), + 'sys_cfg': sys_cfg} + + dsrc = self._get_ds(data) + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) + self.assertEqual(dsrc.ds_cfg.get(dsaz.DS_CFG_KEY_PRESERVE_NTFS), + 'user-supplied-value') + def test_username_used(self): odata = {'HostName': "myhost", 'UserName': "myuser"} data = {'ovfcontent': construct_valid_ovf_env(data=odata)} @@ -676,6 +690,8 @@ class TestAzureBounce(CiTestCase): mock.MagicMock(return_value={}))) self.patches.enter_context( mock.patch.object(dsaz.util, 'which', lambda x: True)) + self.patches.enter_context( + mock.patch.object(dsaz, '_get_random_seed')) def _dmi_mocks(key): if key == 'system-uuid': @@ -957,7 +973,9 @@ class TestCanDevBeReformatted(CiTestCase): # return sorted by partition number return sorted(ret, key=lambda d: d[0]) - def mount_cb(device, callback): + def mount_cb(device, callback, mtype, update_env_for_mount): + self.assertEqual('ntfs', mtype) + self.assertEqual('C', update_env_for_mount.get('LANG')) p = self.tmp_dir() for f in bypath.get(device).get('files', []): write_file(os.path.join(p, f), content=f) @@ -988,14 +1006,16 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda2': {'num': 2}, '/dev/sda3': {'num': 3}, }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) self.assertFalse(value) self.assertIn("3 or more", msg.lower()) def test_no_partitions_is_false(self): """A disk with no partitions can not be formatted.""" self.patchup({'/dev/sda': {}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) self.assertFalse(value) self.assertIn("not partitioned", msg.lower()) @@ -1007,7 +1027,8 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda1': {'num': 1}, '/dev/sda2': {'num': 2, 'fs': 'ext4', 'files': []}, }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) self.assertFalse(value) self.assertIn("not ntfs", msg.lower()) @@ -1020,7 +1041,8 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda2': {'num': 2, 'fs': 'ntfs', 'files': ['secret.txt']}, }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) self.assertFalse(value) self.assertIn("files on it", msg.lower()) @@ -1032,7 +1054,8 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda1': {'num': 1}, '/dev/sda2': {'num': 2, 'fs': 'ntfs', 'files': []}, }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) self.assertTrue(value) self.assertIn("safe for", msg.lower()) @@ -1043,7 +1066,8 @@ class TestCanDevBeReformatted(CiTestCase): 'partitions': { '/dev/sda1': {'num': 1, 'fs': 'zfs'}, }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) self.assertFalse(value) self.assertIn("not ntfs", msg.lower()) @@ -1055,9 +1079,14 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': ['file1.txt', 'file2.exe']}, }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda") - self.assertFalse(value) - self.assertIn("files on it", msg.lower()) + with mock.patch.object(dsaz.LOG, 'warning') as warning: + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) + wmsg = warning.call_args[0][0] + self.assertIn("looks like you're using NTFS on the ephemeral disk", + wmsg) + self.assertFalse(value) + self.assertIn("files on it", msg.lower()) def test_one_partition_ntfs_empty_is_true(self): """1 mountable ntfs partition and no files can be formatted.""" @@ -1066,7 +1095,8 @@ class TestCanDevBeReformatted(CiTestCase): 'partitions': { '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []} }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) self.assertTrue(value) self.assertIn("safe for", msg.lower()) @@ -1078,7 +1108,8 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': ['dataloss_warning_readme.txt']} }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda") + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=False) self.assertTrue(value) self.assertIn("safe for", msg.lower()) @@ -1093,7 +1124,8 @@ class TestCanDevBeReformatted(CiTestCase): 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file], 'realpath': '/dev/sdb1'} }}}) - value, msg = dsaz.can_dev_be_reformatted(epath) + value, msg = dsaz.can_dev_be_reformatted(epath, + preserve_ntfs=False) self.assertTrue(value) self.assertIn("safe for", msg.lower()) @@ -1112,10 +1144,49 @@ class TestCanDevBeReformatted(CiTestCase): epath + '-part3': {'num': 3, 'fs': 'ext', 'realpath': '/dev/sdb3'} }}}) - value, msg = dsaz.can_dev_be_reformatted(epath) + value, msg = dsaz.can_dev_be_reformatted(epath, + preserve_ntfs=False) self.assertFalse(value) self.assertIn("3 or more", msg.lower()) + def test_ntfs_mount_errors_true(self): + """can_dev_be_reformatted does not fail if NTFS is unknown fstype.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []} + }}}) + + err = ("Unexpected error while running command.\n", + "Command: ['mount', '-o', 'ro,sync', '-t', 'auto', ", + "'/dev/sda1', '/fake-tmp/dir']\n" + "Exit code: 32\n" + "Reason: -\n" + "Stdout: -\n" + "Stderr: mount: unknown filesystem type 'ntfs'") + self.m_mount_cb.side_effect = MountFailedError( + 'Failed mounting %s to %s due to: %s' % + ('/dev/sda', '/fake-tmp/dir', err)) + + value, msg = dsaz.can_dev_be_reformatted('/dev/sda', + preserve_ntfs=False) + self.assertTrue(value) + self.assertIn('cannot mount NTFS, assuming', msg) + + def test_never_destroy_ntfs_config_false(self): + """Normally formattable situation with never_destroy_ntfs set.""" + self.patchup({ + '/dev/sda': { + 'partitions': { + '/dev/sda1': {'num': 1, 'fs': 'ntfs', + 'files': ['dataloss_warning_readme.txt']} + }}}) + value, msg = dsaz.can_dev_be_reformatted("/dev/sda", + preserve_ntfs=True) + self.assertFalse(value) + self.assertIn("config says to never destroy NTFS " + "(datasource.Azure.never_destroy_ntfs)", msg) + class TestAzureNetExists(CiTestCase): -- cgit v1.2.3 From 3b28bdc616f3e7f4d6b419629dc7b9efc3ae8d1e Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 23 May 2018 15:56:16 -0400 Subject: yaml_load/schema: Add invalid line and column nums to error message Yaml tracebacks are generally hard to read for average users. Add a bit of logic to util.yaml_load and schema validation to look for YAMLError.context_marker or problem_marker line and column counts. No longer log the full exceeption traceback from the yaml_load error, instead just LOG.warning for the specific error and point to the offending line and column where the problem exists. --- cloudinit/config/schema.py | 60 +++++++++++++++++++++-------- cloudinit/util.py | 16 +++++++- tests/unittests/test_handler/test_schema.py | 39 ++++++++++++++++--- tests/unittests/test_util.py | 30 +++++++++++++-- 4 files changed, 118 insertions(+), 27 deletions(-) (limited to 'tests') diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index 3bad8e22..080a6d06 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -93,20 +93,33 @@ def validate_cloudconfig_schema(config, schema, strict=False): def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors): """Return contents of the cloud-config file annotated with schema errors. - @param cloudconfig: YAML-loaded object from the original_content. + @param cloudconfig: YAML-loaded dict from the original_content or empty + dict if unparseable. @param original_content: The contents of a cloud-config file @param schema_errors: List of tuples from a JSONSchemaValidationError. The tuples consist of (schemapath, error_message). """ if not schema_errors: return original_content - schemapaths = _schemapath_for_cloudconfig(cloudconfig, original_content) + schemapaths = {} + if cloudconfig: + schemapaths = _schemapath_for_cloudconfig( + cloudconfig, original_content) errors_by_line = defaultdict(list) error_count = 1 error_footer = [] annotated_content = [] for path, msg in schema_errors: - errors_by_line[schemapaths[path]].append(msg) + match = re.match(r'format-l(?P\d+)\.c(?P\d+).*', path) + if match: + line, col = match.groups() + errors_by_line[int(line)].append(msg) + else: + col = None + errors_by_line[schemapaths[path]].append(msg) + if col is not None: + msg = 'Line {line} column {col}: {msg}'.format( + line=line, col=col, msg=msg) error_footer.append('# E{0}: {1}'.format(error_count, msg)) error_count += 1 lines = original_content.decode().split('\n') @@ -142,18 +155,31 @@ def validate_cloudconfig_file(config_path, schema, annotate=False): content = load_file(config_path, decode=False) if not content.startswith(CLOUD_CONFIG_HEADER): errors = ( - ('header', 'File {0} needs to begin with "{1}"'.format( + ('format-l1.c1', 'File {0} needs to begin with "{1}"'.format( config_path, CLOUD_CONFIG_HEADER.decode())),) - raise SchemaValidationError(errors) - + error = SchemaValidationError(errors) + if annotate: + print(annotated_cloudconfig_file({}, content, error.schema_errors)) + raise error try: cloudconfig = yaml.safe_load(content) - except yaml.parser.ParserError as e: - errors = ( - ('format', 'File {0} is not valid yaml. {1}'.format( - config_path, str(e))),) - raise SchemaValidationError(errors) - + except (yaml.YAMLError) as e: + line = column = 1 + mark = None + if hasattr(e, 'context_mark') and getattr(e, 'context_mark'): + mark = getattr(e, 'context_mark') + elif hasattr(e, 'problem_mark') and getattr(e, 'problem_mark'): + mark = getattr(e, 'problem_mark') + if mark: + line = mark.line + 1 + column = mark.column + 1 + errors = (('format-l{line}.c{col}'.format(line=line, col=column), + 'File {0} is not valid yaml. {1}'.format( + config_path, str(e))),) + error = SchemaValidationError(errors) + if annotate: + print(annotated_cloudconfig_file({}, content, error.schema_errors)) + raise error try: validate_cloudconfig_schema( cloudconfig, schema, strict=True) @@ -176,7 +202,7 @@ def _schemapath_for_cloudconfig(config, original_content): list_index = 0 RE_YAML_INDENT = r'^(\s*)' scopes = [] - for line_number, line in enumerate(content_lines): + for line_number, line in enumerate(content_lines, 1): indent_depth = len(re.match(RE_YAML_INDENT, line).groups()[0]) line = line.strip() if not line or line.startswith('#'): @@ -208,8 +234,8 @@ def _schemapath_for_cloudconfig(config, original_content): scopes.append((indent_depth + 2, key + '.0')) for inner_list_index in range(0, len(yaml.safe_load(value))): list_key = key + '.' + str(inner_list_index) - schema_line_numbers[list_key] = line_number + 1 - schema_line_numbers[key] = line_number + 1 + schema_line_numbers[list_key] = line_number + schema_line_numbers[key] = line_number return schema_line_numbers @@ -337,9 +363,11 @@ def handle_schema_args(name, args): try: validate_cloudconfig_file( args.config_file, full_schema, args.annotate) - except (SchemaValidationError, RuntimeError) as e: + except SchemaValidationError as e: if not args.annotate: error(str(e)) + except RuntimeError as e: + error(str(e)) else: print("Valid cloud-config file {0}".format(args.config_file)) if args.doc: diff --git a/cloudinit/util.py b/cloudinit/util.py index 653ed6ea..c0473b8c 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -874,8 +874,20 @@ def load_yaml(blob, default=None, allowed=(dict,)): " but got %s instead") % (allowed, type_utils.obj_name(converted))) loaded = converted - except (yaml.YAMLError, TypeError, ValueError): - logexc(LOG, "Failed loading yaml blob") + except (yaml.YAMLError, TypeError, ValueError) as e: + msg = 'Failed loading yaml blob' + mark = None + if hasattr(e, 'context_mark') and getattr(e, 'context_mark'): + mark = getattr(e, 'context_mark') + elif hasattr(e, 'problem_mark') and getattr(e, 'problem_mark'): + mark = getattr(e, 'problem_mark') + if mark: + msg += ( + '. Invalid format at line {line} column {col}: "{err}"'.format( + line=mark.line + 1, col=mark.column + 1, err=e)) + else: + msg += '. {err}'.format(err=e) + LOG.warning(msg) return loaded diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py index ac41f124..fb266faf 100644 --- a/tests/unittests/test_handler/test_schema.py +++ b/tests/unittests/test_handler/test_schema.py @@ -134,22 +134,35 @@ class ValidateCloudConfigFileTest(CiTestCase): with self.assertRaises(SchemaValidationError) as context_mgr: validate_cloudconfig_file(self.config_file, {}) self.assertEqual( - 'Cloud config schema errors: header: File {0} needs to begin with ' - '"{1}"'.format(self.config_file, CLOUD_CONFIG_HEADER.decode()), + 'Cloud config schema errors: format-l1.c1: File {0} needs to begin' + ' with "{1}"'.format( + self.config_file, CLOUD_CONFIG_HEADER.decode()), str(context_mgr.exception)) - def test_validateconfig_file_error_on_non_yaml_format(self): - """On non-yaml format, validate_cloudconfig_file errors.""" + def test_validateconfig_file_error_on_non_yaml_scanner_error(self): + """On non-yaml scan issues, validate_cloudconfig_file errors.""" + # Generate a scanner error by providing text on a single line with + # improper indent. + write_file(self.config_file, '#cloud-config\nasdf:\nasdf') + with self.assertRaises(SchemaValidationError) as context_mgr: + validate_cloudconfig_file(self.config_file, {}) + self.assertIn( + 'schema errors: format-l3.c1: File {0} is not valid yaml.'.format( + self.config_file), + str(context_mgr.exception)) + + def test_validateconfig_file_error_on_non_yaml_parser_error(self): + """On non-yaml parser issues, validate_cloudconfig_file errors.""" write_file(self.config_file, '#cloud-config\n{}}') with self.assertRaises(SchemaValidationError) as context_mgr: validate_cloudconfig_file(self.config_file, {}) self.assertIn( - 'schema errors: format: File {0} is not valid yaml.'.format( + 'schema errors: format-l2.c3: File {0} is not valid yaml.'.format( self.config_file), str(context_mgr.exception)) @skipUnlessJsonSchema() - def test_validateconfig_file_sctricty_validates_schema(self): + def test_validateconfig_file_sctrictly_validates_schema(self): """validate_cloudconfig_file raises errors on invalid schema.""" schema = { 'properties': {'p1': {'type': 'string', 'format': 'hostname'}}} @@ -342,6 +355,20 @@ class MainTest(CiTestCase): 'Expected either --config-file argument or --doc\n', m_stderr.getvalue()) + def test_main_absent_config_file(self): + """Main exits non-zero when config file is absent.""" + myargs = ['mycmd', '--annotate', '--config-file', 'NOT_A_FILE'] + with mock.patch('sys.exit', side_effect=self.sys_exit): + with mock.patch('sys.argv', myargs): + with mock.patch('sys.stderr', new_callable=StringIO) as \ + m_stderr: + with self.assertRaises(SystemExit) as context_manager: + main() + self.assertEqual(1, context_manager.exception.code) + self.assertEqual( + 'Configfile NOT_A_FILE does not exist\n', + m_stderr.getvalue()) + def test_main_prints_docs(self): """When --doc parameter is provided, main generates documentation.""" myargs = ['mycmd', '--doc'] diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 84941c7d..d774f3dc 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -4,6 +4,7 @@ from __future__ import print_function import logging import os +import re import shutil import stat import tempfile @@ -265,26 +266,49 @@ class TestGetCmdline(helpers.TestCase): self.assertEqual("abcd 123", ret) -class TestLoadYaml(helpers.TestCase): +class TestLoadYaml(helpers.CiTestCase): mydefault = "7b03a8ebace993d806255121073fed52" + with_logs = True def test_simple(self): mydata = {'1': "one", '2': "two"} self.assertEqual(util.load_yaml(yaml.dump(mydata)), mydata) def test_nonallowed_returns_default(self): + '''Any unallowed types result in returning default; log the issue.''' # for now, anything not in the allowed list just returns the default. myyaml = yaml.dump({'1': "one"}) self.assertEqual(util.load_yaml(blob=myyaml, default=self.mydefault, allowed=(str,)), self.mydefault) - - def test_bogus_returns_default(self): + regex = re.compile( + r'Yaml load allows \(<(class|type) \'str\'>,\) root types, but' + r' got dict') + self.assertTrue(regex.search(self.logs.getvalue()), + msg='Missing expected yaml load error') + + def test_bogus_scan_error_returns_default(self): + '''On Yaml scan error, load_yaml returns the default and logs issue.''' badyaml = "1\n 2:" self.assertEqual(util.load_yaml(blob=badyaml, default=self.mydefault), self.mydefault) + self.assertIn( + 'Failed loading yaml blob. Invalid format at line 2 column 3:' + ' "mapping values are not allowed here', + self.logs.getvalue()) + + def test_bogus_parse_error_returns_default(self): + '''On Yaml parse error, load_yaml returns default and logs issue.''' + badyaml = "{}}" + self.assertEqual(util.load_yaml(blob=badyaml, + default=self.mydefault), + self.mydefault) + self.assertIn( + 'Failed loading yaml blob. Invalid format at line 1 column 3:' + " \"expected \'\', but found \'}\'", + self.logs.getvalue()) def test_unsafe_types(self): # should not load complex types -- cgit v1.2.3 From 12799d96f85e210c8e1216a3b06d8a98468fedd7 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 23 May 2018 16:04:42 -0400 Subject: tests: Avoid using https in httpretty, improve HttPretty test case. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On OpenSuSE 42.3, we would get errors running tests/unittests/test_handler/test_handler_chef.py  - test_myhttps_nonet raises a UnmockedError    No mocking was registered, and real connections are not allowed  - test_myhttps_net raises SSLError    ("bad handshake: SysCallError(32, 'EPIPE')",) This fixes the errors by just using http instead of https. Also it modifies the HttprettyTestCase to do the httpretty activate and deactivate itself in setUp and tearDown. Then we don't have to decorate individual test_ methods. Also, we set    httpretty.HTTPretty.allow_net_connect = False Test cases here should not reach out to a network resource. LP: #1771659 --- cloudinit/tests/helpers.py | 8 ++++++++ tests/unittests/test_data.py | 13 +++++++++++-- tests/unittests/test_datasource/test_aliyun.py | 2 -- tests/unittests/test_datasource/test_ec2.py | 12 ------------ tests/unittests/test_datasource/test_gce.py | 1 - tests/unittests/test_datasource/test_openstack.py | 12 ------------ tests/unittests/test_datasource/test_scaleway.py | 3 --- tests/unittests/test_ec2_util.py | 9 --------- tests/unittests/test_handler/test_handler_chef.py | 16 ++++++++++++---- 9 files changed, 31 insertions(+), 45 deletions(-) (limited to 'tests') diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 07059fd4..5bfe7fa4 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -3,6 +3,7 @@ from __future__ import print_function import functools +import httpretty import logging import os import shutil @@ -303,14 +304,21 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): class HttprettyTestCase(CiTestCase): # necessary as http_proxy gets in the way of httpretty # https://github.com/gabrielfalcao/HTTPretty/issues/122 + # Also make sure that allow_net_connect is set to False. + # And make sure reset and enable/disable are done. def setUp(self): self.restore_proxy = os.environ.get('http_proxy') if self.restore_proxy is not None: del os.environ['http_proxy'] super(HttprettyTestCase, self).setUp() + httpretty.HTTPretty.allow_net_connect = False + httpretty.reset() + httpretty.enable() def tearDown(self): + httpretty.disable() + httpretty.reset() if self.restore_proxy: os.environ['http_proxy'] = self.restore_proxy super(HttprettyTestCase, self).tearDown() diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index 275b16d2..91d35cb8 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -524,7 +524,17 @@ c: 4 self.assertEqual(cfg.get('password'), 'gocubs') self.assertEqual(cfg.get('locale'), 'chicago') - @httpretty.activate + +class TestConsumeUserDataHttp(TestConsumeUserData, helpers.HttprettyTestCase): + + def setUp(self): + TestConsumeUserData.setUp(self) + helpers.HttprettyTestCase.setUp(self) + + def tearDown(self): + TestConsumeUserData.tearDown(self) + helpers.HttprettyTestCase.tearDown(self) + @mock.patch('cloudinit.url_helper.time.sleep') def test_include(self, mock_sleep): """Test #include.""" @@ -543,7 +553,6 @@ c: 4 cc = util.load_yaml(cc_contents) self.assertTrue(cc.get('included')) - @httpretty.activate @mock.patch('cloudinit.url_helper.time.sleep') def test_include_bad_url(self, mock_sleep): """Test #include with a bad URL.""" diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py index 4fa9616b..1e77842f 100644 --- a/tests/unittests/test_datasource/test_aliyun.py +++ b/tests/unittests/test_datasource/test_aliyun.py @@ -130,7 +130,6 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase): self.ds.get_hostname()) @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun") - @httpretty.activate def test_with_mock_server(self, m_is_aliyun): m_is_aliyun.return_value = True self.regist_default_server() @@ -143,7 +142,6 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase): self._test_host_name() @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun") - @httpretty.activate def test_returns_false_when_not_on_aliyun(self, m_is_aliyun): """If is_aliyun returns false, then get_data should return False.""" m_is_aliyun.return_value = False diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index dff8b1ec..497e7610 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -191,7 +191,6 @@ def register_mock_metaserver(base_url, data): register(base_url, 'not found', status=404) def myreg(*argc, **kwargs): - # print("register_url(%s, %s)" % (argc, kwargs)) return httpretty.register_uri(httpretty.GET, *argc, **kwargs) register_helper(myreg, base_url, data) @@ -236,7 +235,6 @@ class TestEc2(test_helpers.HttprettyTestCase): return_value=platform_data) if md: - httpretty.HTTPretty.allow_net_connect = False all_versions = ( [ds.min_metadata_version] + ds.extended_metadata_versions) for version in all_versions: @@ -255,7 +253,6 @@ class TestEc2(test_helpers.HttprettyTestCase): register_mock_metaserver(instance_id_url, None) return ds - @httpretty.activate def test_network_config_property_returns_version_1_network_data(self): """network_config property returns network version 1 for metadata. @@ -288,7 +285,6 @@ class TestEc2(test_helpers.HttprettyTestCase): m_get_mac.return_value = mac1 self.assertEqual(expected, ds.network_config) - @httpretty.activate def test_network_config_property_set_dhcp4_on_private_ipv4(self): """network_config property configures dhcp4 on private ipv4 nics. @@ -330,7 +326,6 @@ class TestEc2(test_helpers.HttprettyTestCase): ds._network_config = {'cached': 'data'} self.assertEqual({'cached': 'data'}, ds.network_config) - @httpretty.activate @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') def test_network_config_cached_property_refreshed_on_upgrade(self, m_dhcp): """Refresh the network_config Ec2 cache if network key is absent. @@ -364,7 +359,6 @@ class TestEc2(test_helpers.HttprettyTestCase): 'type': 'physical'}]} self.assertEqual(expected, ds.network_config) - @httpretty.activate def test_ec2_get_instance_id_refreshes_identity_on_upgrade(self): """get_instance-id gets DataSourceEc2Local.identity if not present. @@ -397,7 +391,6 @@ class TestEc2(test_helpers.HttprettyTestCase): ds.metadata = DEFAULT_METADATA self.assertEqual('my-identity-id', ds.get_instance_id()) - @httpretty.activate @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') def test_valid_platform_with_strict_true(self, m_dhcp): """Valid platform data should return true with strict_id true.""" @@ -409,7 +402,6 @@ class TestEc2(test_helpers.HttprettyTestCase): self.assertTrue(ret) self.assertEqual(0, m_dhcp.call_count) - @httpretty.activate def test_valid_platform_with_strict_false(self): """Valid platform data should return true with strict_id false.""" ds = self._setup_ds( @@ -419,7 +411,6 @@ class TestEc2(test_helpers.HttprettyTestCase): ret = ds.get_data() self.assertTrue(ret) - @httpretty.activate def test_unknown_platform_with_strict_true(self): """Unknown platform data with strict_id true should return False.""" uuid = 'ab439480-72bf-11d3-91fc-b8aded755F9a' @@ -430,7 +421,6 @@ class TestEc2(test_helpers.HttprettyTestCase): ret = ds.get_data() self.assertFalse(ret) - @httpretty.activate def test_unknown_platform_with_strict_false(self): """Unknown platform data with strict_id false should return True.""" uuid = 'ab439480-72bf-11d3-91fc-b8aded755F9a' @@ -462,7 +452,6 @@ class TestEc2(test_helpers.HttprettyTestCase): ' not {0}'.format(platform_name)) self.assertIn(message, self.logs.getvalue()) - @httpretty.activate @mock.patch('cloudinit.sources.DataSourceEc2.util.is_FreeBSD') def test_ec2_local_returns_false_on_bsd(self, m_is_freebsd): """DataSourceEc2Local returns False on BSD. @@ -481,7 +470,6 @@ class TestEc2(test_helpers.HttprettyTestCase): "FreeBSD doesn't support running dhclient with -sf", self.logs.getvalue()) - @httpretty.activate @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') @mock.patch('cloudinit.net.find_fallback_nic') @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index eb3cec42..41176c6a 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -78,7 +78,6 @@ def _set_mock_metadata(gce_meta=None): return (404, headers, '') # reset is needed. https://github.com/gabrielfalcao/HTTPretty/issues/316 - httpretty.reset() httpretty.register_uri(httpretty.GET, MD_URL_RE, body=_request_callback) diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index 42c31554..bb180c08 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -135,7 +135,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): super(TestOpenStackDataSource, self).setUp() self.tmp = self.tmp_dir() - @hp.activate def test_successful(self): _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) f = _read_metadata_service() @@ -157,7 +156,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertEqual('b0fa911b-69d4-4476-bbe2-1c92bff6535c', metadata.get('instance-id')) - @hp.activate def test_no_ec2(self): _register_uris(self.VERSION, {}, {}, OS_FILES) f = _read_metadata_service() @@ -168,7 +166,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertEqual({}, f.get('ec2-metadata')) self.assertEqual(2, f.get('version')) - @hp.activate def test_bad_metadata(self): os_files = copy.deepcopy(OS_FILES) for k in list(os_files.keys()): @@ -177,7 +174,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): _register_uris(self.VERSION, {}, {}, os_files) self.assertRaises(openstack.NonReadable, _read_metadata_service) - @hp.activate def test_bad_uuid(self): os_files = copy.deepcopy(OS_FILES) os_meta = copy.deepcopy(OSTACK_META) @@ -188,7 +184,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): _register_uris(self.VERSION, {}, {}, os_files) self.assertRaises(openstack.BrokenMetadata, _read_metadata_service) - @hp.activate def test_userdata_empty(self): os_files = copy.deepcopy(OS_FILES) for k in list(os_files.keys()): @@ -201,7 +196,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) self.assertFalse(f.get('userdata')) - @hp.activate def test_vendordata_empty(self): os_files = copy.deepcopy(OS_FILES) for k in list(os_files.keys()): @@ -213,7 +207,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) self.assertFalse(f.get('vendordata')) - @hp.activate def test_vendordata_invalid(self): os_files = copy.deepcopy(OS_FILES) for k in list(os_files.keys()): @@ -222,7 +215,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): _register_uris(self.VERSION, {}, {}, os_files) self.assertRaises(openstack.BrokenMetadata, _read_metadata_service) - @hp.activate def test_metadata_invalid(self): os_files = copy.deepcopy(OS_FILES) for k in list(os_files.keys()): @@ -231,7 +223,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): _register_uris(self.VERSION, {}, {}, os_files) self.assertRaises(openstack.BrokenMetadata, _read_metadata_service) - @hp.activate def test_datasource(self): _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, @@ -251,7 +242,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertEqual(VENDOR_DATA, ds_os.vendordata_pure) self.assertIsNone(ds_os.vendordata_raw) - @hp.activate def test_bad_datasource_meta(self): os_files = copy.deepcopy(OS_FILES) for k in list(os_files.keys()): @@ -266,7 +256,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertFalse(found) self.assertIsNone(ds_os.version) - @hp.activate def test_no_datasource(self): os_files = copy.deepcopy(OS_FILES) for k in list(os_files.keys()): @@ -285,7 +274,6 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertFalse(found) self.assertIsNone(ds_os.version) - @hp.activate def test_disabled_datasource(self): os_files = copy.deepcopy(OS_FILES) os_meta = copy.deepcopy(OSTACK_META) diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py index 8dec06b1..e4e9bb20 100644 --- a/tests/unittests/test_datasource/test_scaleway.py +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -176,7 +176,6 @@ class TestDataSourceScaleway(HttprettyTestCase): self.vendordata_url = \ DataSourceScaleway.BUILTIN_DS_CONFIG['vendordata_url'] - @httpretty.activate @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', get_source_address_adapter) @mock.patch('cloudinit.util.get_cmdline') @@ -212,7 +211,6 @@ class TestDataSourceScaleway(HttprettyTestCase): self.assertIsNone(self.datasource.region) self.assertEqual(sleep.call_count, 0) - @httpretty.activate @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', get_source_address_adapter) @mock.patch('cloudinit.util.get_cmdline') @@ -236,7 +234,6 @@ class TestDataSourceScaleway(HttprettyTestCase): self.assertIsNone(self.datasource.get_vendordata_raw()) self.assertEqual(sleep.call_count, 0) - @httpretty.activate @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', get_source_address_adapter) @mock.patch('cloudinit.util.get_cmdline') diff --git a/tests/unittests/test_ec2_util.py b/tests/unittests/test_ec2_util.py index af78997f..3f50f57d 100644 --- a/tests/unittests/test_ec2_util.py +++ b/tests/unittests/test_ec2_util.py @@ -11,7 +11,6 @@ from cloudinit import url_helper as uh class TestEc2Util(helpers.HttprettyTestCase): VERSION = 'latest' - @hp.activate def test_userdata_fetch(self): hp.register_uri(hp.GET, 'http://169.254.169.254/%s/user-data' % (self.VERSION), @@ -20,7 +19,6 @@ class TestEc2Util(helpers.HttprettyTestCase): userdata = eu.get_instance_userdata(self.VERSION) self.assertEqual('stuff', userdata.decode('utf-8')) - @hp.activate def test_userdata_fetch_fail_not_found(self): hp.register_uri(hp.GET, 'http://169.254.169.254/%s/user-data' % (self.VERSION), @@ -28,7 +26,6 @@ class TestEc2Util(helpers.HttprettyTestCase): userdata = eu.get_instance_userdata(self.VERSION, retries=0) self.assertEqual('', userdata) - @hp.activate def test_userdata_fetch_fail_server_dead(self): hp.register_uri(hp.GET, 'http://169.254.169.254/%s/user-data' % (self.VERSION), @@ -36,7 +33,6 @@ class TestEc2Util(helpers.HttprettyTestCase): userdata = eu.get_instance_userdata(self.VERSION, retries=0) self.assertEqual('', userdata) - @hp.activate def test_userdata_fetch_fail_server_not_found(self): hp.register_uri(hp.GET, 'http://169.254.169.254/%s/user-data' % (self.VERSION), @@ -44,7 +40,6 @@ class TestEc2Util(helpers.HttprettyTestCase): userdata = eu.get_instance_userdata(self.VERSION) self.assertEqual('', userdata) - @hp.activate def test_metadata_fetch_no_keys(self): base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION) hp.register_uri(hp.GET, base_url, status=200, @@ -62,7 +57,6 @@ class TestEc2Util(helpers.HttprettyTestCase): self.assertEqual(md['instance-id'], '123') self.assertEqual(md['ami-launch-index'], '1') - @hp.activate def test_metadata_fetch_key(self): base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION) hp.register_uri(hp.GET, base_url, status=200, @@ -83,7 +77,6 @@ class TestEc2Util(helpers.HttprettyTestCase): self.assertEqual(md['instance-id'], '123') self.assertEqual(1, len(md['public-keys'])) - @hp.activate def test_metadata_fetch_with_2_keys(self): base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION) hp.register_uri(hp.GET, base_url, status=200, @@ -108,7 +101,6 @@ class TestEc2Util(helpers.HttprettyTestCase): self.assertEqual(md['instance-id'], '123') self.assertEqual(2, len(md['public-keys'])) - @hp.activate def test_metadata_fetch_bdm(self): base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION) hp.register_uri(hp.GET, base_url, status=200, @@ -140,7 +132,6 @@ class TestEc2Util(helpers.HttprettyTestCase): self.assertEqual(bdm['ami'], 'sdb') self.assertEqual(bdm['ephemeral0'], 'sdc') - @hp.activate def test_metadata_no_security_credentials(self): base_url = 'http://169.254.169.254/%s/meta-data/' % (self.VERSION) hp.register_uri(hp.GET, base_url, status=200, diff --git a/tests/unittests/test_handler/test_handler_chef.py b/tests/unittests/test_handler/test_handler_chef.py index 0136a93d..f4bbd66d 100644 --- a/tests/unittests/test_handler/test_handler_chef.py +++ b/tests/unittests/test_handler/test_handler_chef.py @@ -14,19 +14,27 @@ from cloudinit.sources import DataSourceNone from cloudinit import util from cloudinit.tests.helpers import ( - CiTestCase, FilesystemMockingTestCase, mock, skipIf) + HttprettyTestCase, FilesystemMockingTestCase, mock, skipIf) LOG = logging.getLogger(__name__) CLIENT_TEMPL = os.path.sep.join(["templates", "chef_client.rb.tmpl"]) +# This is adjusted to use http because using with https causes issue +# in some openssl/httpretty combinations. +# https://github.com/gabrielfalcao/HTTPretty/issues/242 +# We saw issue in opensuse 42.3 with +# httpretty=0.8.8-7.1 ndg-httpsclient=0.4.0-3.2 pyOpenSSL=16.0.0-4.1 +OMNIBUS_URL_HTTP = cc_chef.OMNIBUS_URL.replace("https:", "http:") -class TestInstallChefOmnibus(CiTestCase): + +class TestInstallChefOmnibus(HttprettyTestCase): def setUp(self): + super(TestInstallChefOmnibus, self).setUp() self.new_root = self.tmp_dir() - @httpretty.activate + @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP) def test_install_chef_from_omnibus_runs_chef_url_content(self): """install_chef_from_omnibus runs downloaded OMNIBUS_URL as script.""" chef_outfile = self.tmp_path('chef.out', self.new_root) @@ -65,7 +73,7 @@ class TestInstallChefOmnibus(CiTestCase): expected_subp_kwargs, m_subp_blob.call_args_list[0][1]) - @httpretty.activate + @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP) @mock.patch('cloudinit.config.cc_chef.util.subp_blob_in_tempfile') def test_install_chef_from_omnibus_has_omnibus_version(self, m_subp_blob): """install_chef_from_omnibus provides version arg to OMNIBUS_URL.""" -- cgit v1.2.3 From cd1de5f47ab6b82f2c6fd61a5f6681f33b3e5705 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 23 May 2018 16:08:43 -0600 Subject: openstack: Allow discovery in init-local using dhclient in a sandbox. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Network has not yet been configured in the init-local stage so the openstack datasource will use dhcp-client to temporarily obtain an ipv4 address and query the metadata service at http://169.254.169.254 to get network_data.json configuration. If present, the datasource will return network_config version 1 config based on that network_data.json content. Previously OpenStack datasource only setup dhcp on the fallback interface so this represents a change in behavior to react to the full config provided by openstack. Also significant to OpenStack is the separation of a _crawl_data operation from get_data(). crawl_data walks the available metadata services and returns a dict of discovered content. get_data consumes the crawled_data,  caches it in the datasource and reacts to that data. /run/cloud-init/instance-data.json now published network_data.json or ec2_metadata key if that data is present on any datasource. The main reasons for the separation of crawl from get_data:  * Enable performance metrics of cloud-init's metadata crawls on each  * Enable cloud-init modules and scripts to query and consume metadata    content which may have updated/changed after cloud-init's initial cache    during instance boot. (Think hotplug) Also generalize common logic to base DataSource class/module:  * Move to a common UNSET variable up into base datasource module fix EC2,    ConfigDrive, OpenStack, SmartOS to use the global.  * Drop get_url_settings from Ec2, CloudStack and OpenStack and generalize    DataSource.get_url_params(). Allow subclasses to override url_max_wait,    url_timeout and url_retries params.  * Rename get_network_metadata bool to perform_dhcp_setup as it designates    whether EphemeralDHCPv4 setup is required before crawling metadata. LP: #1749717 --- cloudinit/sources/DataSourceCloudStack.py | 31 ++--- cloudinit/sources/DataSourceConfigDrive.py | 4 +- cloudinit/sources/DataSourceEc2.py | 48 ++----- cloudinit/sources/DataSourceOpenStack.py | 161 ++++++++++++++-------- cloudinit/sources/DataSourceSmartOS.py | 9 +- cloudinit/sources/__init__.py | 76 ++++++++++ cloudinit/sources/tests/test_init.py | 89 +++++++++++- tests/unittests/test_datasource/test_common.py | 1 + tests/unittests/test_datasource/test_openstack.py | 121 +++++++++++++++- 9 files changed, 416 insertions(+), 124 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 0df545fc..d4b758f2 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -68,6 +68,10 @@ class DataSourceCloudStack(sources.DataSource): dsname = 'CloudStack' + # Setup read_url parameters per get_url_params. + url_max_wait = 120 + url_timeout = 50 + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, 'cs') @@ -80,33 +84,18 @@ class DataSourceCloudStack(sources.DataSource): self.metadata_address = "http://%s/" % (self.vr_addr,) self.cfg = {} - def _get_url_settings(self): - mcfg = self.ds_cfg - max_wait = 120 - try: - max_wait = int(mcfg.get("max_wait", max_wait)) - except Exception: - util.logexc(LOG, "Failed to get max wait. using %s", max_wait) + def wait_for_metadata_service(self): + url_params = self.get_url_params() - if max_wait == 0: + if url_params.max_wait_seconds <= 0: return False - timeout = 50 - try: - timeout = int(mcfg.get("timeout", timeout)) - except Exception: - util.logexc(LOG, "Failed to get timeout, using %s", timeout) - - return (max_wait, timeout) - - def wait_for_metadata_service(self): - (max_wait, timeout) = self._get_url_settings() - urls = [uhelp.combine_url(self.metadata_address, 'latest/meta-data/instance-id')] start_time = time.time() - url = uhelp.wait_for_url(urls=urls, max_wait=max_wait, - timeout=timeout, status_cb=LOG.warn) + url = uhelp.wait_for_url( + urls=urls, max_wait=url_params.max_wait_seconds, + timeout=url_params.timeout_seconds, status_cb=LOG.warn) if url: LOG.debug("Using metadata source: '%s'", url) diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 121cf215..4cb28977 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -43,7 +43,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): self.version = None self.ec2_metadata = None self._network_config = None - self.network_json = None + self.network_json = sources.UNSET self.network_eni = None self.known_macs = None self.files = {} @@ -149,7 +149,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): @property def network_config(self): if self._network_config is None: - if self.network_json is not None: + if self.network_json not in (None, sources.UNSET): LOG.debug("network config provided via network_json") self._network_config = openstack.convert_net_json( self.network_json, known_macs=self.known_macs) diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 21e9ef84..968ab3f7 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -27,8 +27,6 @@ SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND]) STRICT_ID_PATH = ("datasource", "Ec2", "strict_id") STRICT_ID_DEFAULT = "warn" -_unset = "_unset" - class Platforms(object): # TODO Rename and move to cloudinit.cloud.CloudNames @@ -59,15 +57,16 @@ class DataSourceEc2(sources.DataSource): # for extended metadata content. IPv6 support comes in 2016-09-02 extended_metadata_versions = ['2016-09-02'] + # Setup read_url parameters per get_url_params. + url_max_wait = 120 + url_timeout = 50 + _cloud_platform = None - _network_config = _unset # Used for caching calculated network config v1 + _network_config = sources.UNSET # Used to cache calculated network cfg v1 # Whether we want to get network configuration from the metadata service. - get_network_metadata = False - - # Track the discovered fallback nic for use in configuration generation. - _fallback_interface = None + perform_dhcp_setup = False def __init__(self, sys_cfg, distro, paths): super(DataSourceEc2, self).__init__(sys_cfg, distro, paths) @@ -98,7 +97,7 @@ class DataSourceEc2(sources.DataSource): elif self.cloud_platform == Platforms.NO_EC2_METADATA: return False - if self.get_network_metadata: # Setup networking in init-local stage. + if self.perform_dhcp_setup: # Setup networking in init-local stage. if util.is_FreeBSD(): LOG.debug("FreeBSD doesn't support running dhclient with -sf") return False @@ -158,27 +157,11 @@ class DataSourceEc2(sources.DataSource): else: return self.metadata['instance-id'] - def _get_url_settings(self): - mcfg = self.ds_cfg - max_wait = 120 - try: - max_wait = int(mcfg.get("max_wait", max_wait)) - except Exception: - util.logexc(LOG, "Failed to get max wait. using %s", max_wait) - - timeout = 50 - try: - timeout = max(0, int(mcfg.get("timeout", timeout))) - except Exception: - util.logexc(LOG, "Failed to get timeout, using %s", timeout) - - return (max_wait, timeout) - def wait_for_metadata_service(self): mcfg = self.ds_cfg - (max_wait, timeout) = self._get_url_settings() - if max_wait <= 0: + url_params = self.get_url_params() + if url_params.max_wait_seconds <= 0: return False # Remove addresses from the list that wont resolve. @@ -205,7 +188,8 @@ class DataSourceEc2(sources.DataSource): start_time = time.time() url = uhelp.wait_for_url( - urls=urls, max_wait=max_wait, timeout=timeout, status_cb=LOG.warn) + urls=urls, max_wait=url_params.max_wait_seconds, + timeout=url_params.timeout_seconds, status_cb=LOG.warn) if url: self.metadata_address = url2base[url] @@ -310,11 +294,11 @@ class DataSourceEc2(sources.DataSource): @property def network_config(self): """Return a network config dict for rendering ENI or netplan files.""" - if self._network_config != _unset: + if self._network_config != sources.UNSET: return self._network_config if self.metadata is None: - # this would happen if get_data hadn't been called. leave as _unset + # this would happen if get_data hadn't been called. leave as UNSET LOG.warning( "Unexpected call to network_config when metadata is None.") return None @@ -353,9 +337,7 @@ class DataSourceEc2(sources.DataSource): self._fallback_interface = _legacy_fbnic self.fallback_nic = None else: - self._fallback_interface = net.find_fallback_nic() - if self._fallback_interface is None: - LOG.warning("Did not find a fallback interface on EC2.") + return super(DataSourceEc2, self).fallback_interface return self._fallback_interface def _crawl_metadata(self): @@ -390,7 +372,7 @@ class DataSourceEc2Local(DataSourceEc2): metadata service. If the metadata service provides network configuration then render the network configuration for that instance based on metadata. """ - get_network_metadata = True # Get metadata network config if present + perform_dhcp_setup = True # Use dhcp before querying metadata def get_data(self): supported_platforms = (Platforms.AWS,) diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index fb166ae1..1a12a3f1 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -7,6 +7,7 @@ import time from cloudinit import log as logging +from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError from cloudinit import sources from cloudinit import url_helper from cloudinit import util @@ -27,46 +28,25 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): dsname = "OpenStack" + _network_config = sources.UNSET # Used to cache calculated network cfg v1 + + # Whether we want to get network configuration from the metadata service. + perform_dhcp_setup = False + def __init__(self, sys_cfg, distro, paths): super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths) self.metadata_address = None self.ssl_details = util.fetch_ssl_details(self.paths) self.version = None self.files = {} - self.ec2_metadata = None + self.ec2_metadata = sources.UNSET + self.network_json = sources.UNSET def __str__(self): root = sources.DataSource.__str__(self) mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version) return mstr - def _get_url_settings(self): - # TODO(harlowja): this is shared with ec2 datasource, we should just - # move it to a shared location instead... - # Note: the defaults here are different though. - - # max_wait < 0 indicates do not wait - max_wait = -1 - timeout = 10 - retries = 5 - - try: - max_wait = int(self.ds_cfg.get("max_wait", max_wait)) - except Exception: - util.logexc(LOG, "Failed to get max wait. using %s", max_wait) - - try: - timeout = max(0, int(self.ds_cfg.get("timeout", timeout))) - except Exception: - util.logexc(LOG, "Failed to get timeout, using %s", timeout) - - try: - retries = int(self.ds_cfg.get("retries", retries)) - except Exception: - util.logexc(LOG, "Failed to get retries. using %s", retries) - - return (max_wait, timeout, retries) - def wait_for_metadata_service(self): urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL]) filtered = [x for x in urls if util.is_resolvable_url(x)] @@ -86,10 +66,11 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): md_urls.append(md_url) url2base[md_url] = url - (max_wait, timeout, _retries) = self._get_url_settings() + url_params = self.get_url_params() start_time = time.time() - avail_url = url_helper.wait_for_url(urls=md_urls, max_wait=max_wait, - timeout=timeout) + avail_url = url_helper.wait_for_url( + urls=md_urls, max_wait=url_params.max_wait_seconds, + timeout=url_params.timeout_seconds) if avail_url: LOG.debug("Using metadata source: '%s'", url2base[avail_url]) else: @@ -99,38 +80,64 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): self.metadata_address = url2base.get(avail_url) return bool(avail_url) - def _get_data(self): - try: - if not self.wait_for_metadata_service(): - return False - except IOError: - return False + def check_instance_id(self, sys_cfg): + # quickly (local check only) if self.instance_id is still valid + return sources.instance_id_matches_system_uuid(self.get_instance_id()) - (_max_wait, timeout, retries) = self._get_url_settings() + @property + def network_config(self): + """Return a network config dict for rendering ENI or netplan files.""" + if self._network_config != sources.UNSET: + return self._network_config + + # RELEASE_BLOCKER: SRU to Xenial and Artful SRU should not provide + # network_config by default unless configured in /etc/cloud/cloud.cfg*. + # Patch Xenial and Artful before release to default to False. + if util.is_false(self.ds_cfg.get('apply_network_config', True)): + self._network_config = None + return self._network_config + if self.network_json == sources.UNSET: + # this would happen if get_data hadn't been called. leave as UNSET + LOG.warning( + 'Unexpected call to network_config when network_json is None.') + return None + + LOG.debug('network config provided via network_json') + self._network_config = openstack.convert_net_json( + self.network_json, known_macs=None) + return self._network_config - try: - results = util.log_time(LOG.debug, - 'Crawl of openstack metadata service', - read_metadata_service, - args=[self.metadata_address], - kwargs={'ssl_details': self.ssl_details, - 'retries': retries, - 'timeout': timeout}) - except openstack.NonReadable: - return False - except (openstack.BrokenMetadata, IOError): - util.logexc(LOG, "Broken metadata address %s", - self.metadata_address) - return False + def _get_data(self): + """Crawl metadata, parse and persist that data for this instance. + + @return: True when metadata discovered indicates OpenStack datasource. + False when unable to contact metadata service or when metadata + format is invalid or disabled. + """ + if self.perform_dhcp_setup: # Setup networking in init-local stage. + try: + with EphemeralDHCPv4(self.fallback_interface): + results = util.log_time( + logfunc=LOG.debug, msg='Crawl of metadata service', + func=self._crawl_metadata) + except (NoDHCPLeaseError, sources.InvalidMetaDataException) as e: + util.logexc(LOG, str(e)) + return False + else: + try: + results = self._crawl_metadata() + except sources.InvalidMetaDataException as e: + util.logexc(LOG, str(e)) + return False self.dsmode = self._determine_dsmode([results.get('dsmode')]) if self.dsmode == sources.DSMODE_DISABLED: return False - md = results.get('metadata', {}) md = util.mergemanydict([md, DEFAULT_METADATA]) self.metadata = md self.ec2_metadata = results.get('ec2-metadata') + self.network_json = results.get('networkdata') self.userdata_raw = results.get('userdata') self.version = results['version'] self.files.update(results.get('files', {})) @@ -145,9 +152,50 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): return True - def check_instance_id(self, sys_cfg): - # quickly (local check only) if self.instance_id is still valid - return sources.instance_id_matches_system_uuid(self.get_instance_id()) + def _crawl_metadata(self): + """Crawl metadata service when available. + + @returns: Dictionary with all metadata discovered for this datasource. + @raise: InvalidMetaDataException on unreadable or broken + metadata. + """ + try: + if not self.wait_for_metadata_service(): + raise sources.InvalidMetaDataException( + 'No active metadata service found') + except IOError as e: + raise sources.InvalidMetaDataException( + 'IOError contacting metadata service: {error}'.format( + error=str(e))) + + url_params = self.get_url_params() + + try: + result = util.log_time( + LOG.debug, 'Crawl of openstack metadata service', + read_metadata_service, args=[self.metadata_address], + kwargs={'ssl_details': self.ssl_details, + 'retries': url_params.num_retries, + 'timeout': url_params.timeout_seconds}) + except openstack.NonReadable as e: + raise sources.InvalidMetaDataException(str(e)) + except (openstack.BrokenMetadata, IOError): + msg = 'Broken metadata address {addr}'.format( + addr=self.metadata_address) + raise sources.InvalidMetaDataException(msg) + return result + + +class DataSourceOpenStackLocal(DataSourceOpenStack): + """Run in init-local using a dhcp discovery prior to metadata crawl. + + In init-local, no network is available. This subclass sets up minimal + networking with dhclient on a viable nic so that it can talk to the + metadata service. If the metadata service provides network configuration + then render the network configuration for that instance based on metadata. + """ + + perform_dhcp_setup = True # Get metadata network config if present def read_metadata_service(base_url, ssl_details=None, @@ -159,6 +207,7 @@ def read_metadata_service(base_url, ssl_details=None, # Used to match classes to dependencies datasources = [ + (DataSourceOpenStackLocal, (sources.DEP_FILESYSTEM,)), (DataSourceOpenStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ] diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index fcb46b14..c91e4d59 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -165,9 +165,8 @@ class DataSourceSmartOS(sources.DataSource): dsname = "Joyent" - _unset = "_unset" - smartos_type = _unset - md_client = _unset + smartos_type = sources.UNSET + md_client = sources.UNSET def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -189,12 +188,12 @@ class DataSourceSmartOS(sources.DataSource): return "%s [client=%s]" % (root, self.md_client) def _init(self): - if self.smartos_type == self._unset: + if self.smartos_type == sources.UNSET: self.smartos_type = get_smartos_environ() if self.smartos_type is None: self.md_client = None - if self.md_client == self._unset: + if self.md_client == sources.UNSET: self.md_client = jmc_client_factory( smartos_type=self.smartos_type, metadata_sockfile=self.ds_cfg['metadata_sockfile'], diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index df0b374a..90d74575 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -9,6 +9,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import abc +from collections import namedtuple import copy import json import os @@ -17,6 +18,7 @@ import six from cloudinit.atomic_helper import write_json from cloudinit import importer from cloudinit import log as logging +from cloudinit import net from cloudinit import type_utils from cloudinit import user_data as ud from cloudinit import util @@ -41,6 +43,8 @@ INSTANCE_JSON_FILE = 'instance-data.json' # Key which can be provide a cloud's official product name to cloud-init METADATA_CLOUD_NAME_KEY = 'cloud-name' +UNSET = "_unset" + LOG = logging.getLogger(__name__) @@ -48,6 +52,11 @@ class DataSourceNotFoundException(Exception): pass +class InvalidMetaDataException(Exception): + """Raised when metadata is broken, unavailable or disabled.""" + pass + + def process_base64_metadata(metadata, key_path=''): """Strip ci-b64 prefix and return metadata with base64-encoded-keys set.""" md_copy = copy.deepcopy(metadata) @@ -68,6 +77,10 @@ def process_base64_metadata(metadata, key_path=''): return md_copy +URLParams = namedtuple( + 'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries']) + + @six.add_metaclass(abc.ABCMeta) class DataSource(object): @@ -81,6 +94,14 @@ class DataSource(object): # Cached cloud_name as determined by _get_cloud_name _cloud_name = None + # Track the discovered fallback nic for use in configuration generation. + _fallback_interface = None + + # read_url_params + url_max_wait = -1 # max_wait < 0 means do not wait + url_timeout = 10 # timeout for each metadata url read attempt + url_retries = 5 # number of times to retry url upon 404 + def __init__(self, sys_cfg, distro, paths, ud_proc=None): self.sys_cfg = sys_cfg self.distro = distro @@ -128,6 +149,14 @@ class DataSource(object): 'meta-data': self.metadata, 'user-data': self.get_userdata_raw(), 'vendor-data': self.get_vendordata_raw()}} + if hasattr(self, 'network_json'): + network_json = getattr(self, 'network_json') + if network_json != UNSET: + instance_data['ds']['network_json'] = network_json + if hasattr(self, 'ec2_metadata'): + ec2_metadata = getattr(self, 'ec2_metadata') + if ec2_metadata != UNSET: + instance_data['ds']['ec2_metadata'] = ec2_metadata instance_data.update( self._get_standardized_metadata()) try: @@ -149,6 +178,42 @@ class DataSource(object): 'Subclasses of DataSource must implement _get_data which' ' sets self.metadata, vendordata_raw and userdata_raw.') + def get_url_params(self): + """Return the Datasource's prefered url_read parameters. + + Subclasses may override url_max_wait, url_timeout, url_retries. + + @return: A URLParams object with max_wait_seconds, timeout_seconds, + num_retries. + """ + max_wait = self.url_max_wait + try: + max_wait = int(self.ds_cfg.get("max_wait", self.url_max_wait)) + except ValueError: + util.logexc( + LOG, "Config max_wait '%s' is not an int, using default '%s'", + self.ds_cfg.get("max_wait"), max_wait) + + timeout = self.url_timeout + try: + timeout = max( + 0, int(self.ds_cfg.get("timeout", self.url_timeout))) + except ValueError: + timeout = self.url_timeout + util.logexc( + LOG, "Config timeout '%s' is not an int, using default '%s'", + self.ds_cfg.get('timeout'), timeout) + + retries = self.url_retries + try: + retries = int(self.ds_cfg.get("retries", self.url_retries)) + except Exception: + util.logexc( + LOG, "Config retries '%s' is not an int, using default '%s'", + self.ds_cfg.get('retries'), retries) + + return URLParams(max_wait, timeout, retries) + def get_userdata(self, apply_filter=False): if self.userdata is None: self.userdata = self.ud_proc.process(self.get_userdata_raw()) @@ -161,6 +226,17 @@ class DataSource(object): self.vendordata = self.ud_proc.process(self.get_vendordata_raw()) return self.vendordata + @property + def fallback_interface(self): + """Determine the network interface used during local network config.""" + if self._fallback_interface is None: + self._fallback_interface = net.find_fallback_nic() + if self._fallback_interface is None: + LOG.warning( + "Did not find a fallback interface on %s.", + self.cloud_name) + return self._fallback_interface + @property def cloud_name(self): """Return lowercase cloud name as determined by the datasource. diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index 452e9219..d5bc98a4 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -17,6 +17,7 @@ from cloudinit import util class DataSourceTestSubclassNet(DataSource): dsname = 'MyTestSubclass' + url_max_wait = 55 def __init__(self, sys_cfg, distro, paths, custom_userdata=None): super(DataSourceTestSubclassNet, self).__init__( @@ -70,8 +71,7 @@ class TestDataSource(CiTestCase): """Init uses DataSource.dsname for sourcing ds_cfg.""" sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}} distro = 'distrotest' # generally should be a Distro object - paths = Paths({}) - datasource = DataSourceTestSubclassNet(sys_cfg, distro, paths) + datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths) self.assertEqual({'key2': False}, datasource.ds_cfg) def test_str_is_classname(self): @@ -81,6 +81,91 @@ class TestDataSource(CiTestCase): 'DataSourceTestSubclassNet', str(DataSourceTestSubclassNet('', '', self.paths))) + def test_datasource_get_url_params_defaults(self): + """get_url_params default url config settings for the datasource.""" + params = self.datasource.get_url_params() + self.assertEqual(params.max_wait_seconds, self.datasource.url_max_wait) + self.assertEqual(params.timeout_seconds, self.datasource.url_timeout) + self.assertEqual(params.num_retries, self.datasource.url_retries) + + def test_datasource_get_url_params_subclassed(self): + """Subclasses can override get_url_params defaults.""" + sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}} + distro = 'distrotest' # generally should be a Distro object + datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths) + expected = (datasource.url_max_wait, datasource.url_timeout, + datasource.url_retries) + url_params = datasource.get_url_params() + self.assertNotEqual(self.datasource.get_url_params(), url_params) + self.assertEqual(expected, url_params) + + def test_datasource_get_url_params_ds_config_override(self): + """Datasource configuration options can override url param defaults.""" + sys_cfg = { + 'datasource': { + 'MyTestSubclass': { + 'max_wait': '1', 'timeout': '2', 'retries': '3'}}} + datasource = DataSourceTestSubclassNet( + sys_cfg, self.distro, self.paths) + expected = (1, 2, 3) + url_params = datasource.get_url_params() + self.assertNotEqual( + (datasource.url_max_wait, datasource.url_timeout, + datasource.url_retries), + url_params) + self.assertEqual(expected, url_params) + + def test_datasource_get_url_params_is_zero_or_greater(self): + """get_url_params ignores timeouts with a value below 0.""" + # Set an override that is below 0 which gets ignored. + sys_cfg = {'datasource': {'_undef': {'timeout': '-1'}}} + datasource = DataSource(sys_cfg, self.distro, self.paths) + (_max_wait, timeout, _retries) = datasource.get_url_params() + self.assertEqual(0, timeout) + + def test_datasource_get_url_uses_defaults_on_errors(self): + """On invalid system config values for url_params defaults are used.""" + # All invalid values should be logged + sys_cfg = {'datasource': { + '_undef': { + 'max_wait': 'nope', 'timeout': 'bug', 'retries': 'nonint'}}} + datasource = DataSource(sys_cfg, self.distro, self.paths) + url_params = datasource.get_url_params() + expected = (datasource.url_max_wait, datasource.url_timeout, + datasource.url_retries) + self.assertEqual(expected, url_params) + logs = self.logs.getvalue() + expected_logs = [ + "Config max_wait 'nope' is not an int, using default '-1'", + "Config timeout 'bug' is not an int, using default '10'", + "Config retries 'nonint' is not an int, using default '5'", + ] + for log in expected_logs: + self.assertIn(log, logs) + + @mock.patch('cloudinit.sources.net.find_fallback_nic') + def test_fallback_interface_is_discovered(self, m_get_fallback_nic): + """The fallback_interface is discovered via find_fallback_nic.""" + m_get_fallback_nic.return_value = 'nic9' + self.assertEqual('nic9', self.datasource.fallback_interface) + + @mock.patch('cloudinit.sources.net.find_fallback_nic') + def test_fallback_interface_logs_undiscovered(self, m_get_fallback_nic): + """Log a warning when fallback_interface can not discover the nic.""" + self.datasource._cloud_name = 'MySupahCloud' + m_get_fallback_nic.return_value = None # Couldn't discover nic + self.assertIsNone(self.datasource.fallback_interface) + self.assertEqual( + 'WARNING: Did not find a fallback interface on MySupahCloud.\n', + self.logs.getvalue()) + + @mock.patch('cloudinit.sources.net.find_fallback_nic') + def test_wb_fallback_interface_is_cached(self, m_get_fallback_nic): + """The fallback_interface is cached and won't be rediscovered.""" + self.datasource._fallback_interface = 'nic10' + self.assertEqual('nic10', self.datasource.fallback_interface) + m_get_fallback_nic.assert_not_called() + def test__get_data_unimplemented(self): """Raise an error when _get_data is not implemented.""" with self.assertRaises(NotImplementedError) as context_manager: diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index ec333888..0d35dc29 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -40,6 +40,7 @@ DEFAULT_LOCAL = [ OVF.DataSourceOVF, SmartOS.DataSourceSmartOS, Ec2.DataSourceEc2Local, + OpenStack.DataSourceOpenStackLocal, ] DEFAULT_NETWORK = [ diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index bb180c08..fad73b21 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -16,7 +16,7 @@ from six import StringIO from cloudinit import helpers from cloudinit import settings -from cloudinit.sources import convert_vendordata +from cloudinit.sources import convert_vendordata, UNSET from cloudinit.sources import DataSourceOpenStack as ds from cloudinit.sources.helpers import openstack from cloudinit import util @@ -129,6 +129,8 @@ def _read_metadata_service(): class TestOpenStackDataSource(test_helpers.HttprettyTestCase): + + with_logs = True VERSION = 'latest' def setUp(self): @@ -223,11 +225,11 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): _register_uris(self.VERSION, {}, {}, os_files) self.assertRaises(openstack.BrokenMetadata, _read_metadata_service) - def test_datasource(self): + @test_helpers.mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + def test_datasource(self, m_dhcp): _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) - ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, - None, - helpers.Paths({'run_dir': self.tmp})) + ds_os = ds.DataSourceOpenStack( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) self.assertIsNone(ds_os.version) found = ds_os.get_data() self.assertTrue(found) @@ -241,6 +243,36 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertEqual(2, len(ds_os.files)) self.assertEqual(VENDOR_DATA, ds_os.vendordata_pure) self.assertIsNone(ds_os.vendordata_raw) + m_dhcp.assert_not_called() + + @hp.activate + @test_helpers.mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') + @test_helpers.mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + def test_local_datasource(self, m_dhcp, m_net): + """OpenStackLocal calls EphemeralDHCPNetwork and gets instance data.""" + _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) + ds_os_local = ds.DataSourceOpenStackLocal( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + ds_os_local._fallback_interface = 'eth9' # Monkey patch for dhcp + m_dhcp.return_value = [{ + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'broadcast-address': '192.168.2.255'}] + + self.assertIsNone(ds_os_local.version) + found = ds_os_local.get_data() + self.assertTrue(found) + self.assertEqual(2, ds_os_local.version) + md = dict(ds_os_local.metadata) + md.pop('instance-id', None) + md.pop('local-hostname', None) + self.assertEqual(OSTACK_META, md) + self.assertEqual(EC2_META, ds_os_local.ec2_metadata) + self.assertEqual(USER_DATA, ds_os_local.userdata_raw) + self.assertEqual(2, len(ds_os_local.files)) + self.assertEqual(VENDOR_DATA, ds_os_local.vendordata_pure) + self.assertIsNone(ds_os_local.vendordata_raw) + m_dhcp.assert_called_with('eth9') def test_bad_datasource_meta(self): os_files = copy.deepcopy(OS_FILES) @@ -255,6 +287,10 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): found = ds_os.get_data() self.assertFalse(found) self.assertIsNone(ds_os.version) + self.assertIn( + 'InvalidMetaDataException: Broken metadata address' + ' http://169.254.169.25', + self.logs.getvalue()) def test_no_datasource(self): os_files = copy.deepcopy(OS_FILES) @@ -274,6 +310,52 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertFalse(found) self.assertIsNone(ds_os.version) + def test_network_config_disabled_by_datasource_config(self): + """The network_config can be disabled from datasource config.""" + mock_path = ( + 'cloudinit.sources.DataSourceOpenStack.openstack.' + 'convert_net_json') + ds_os = ds.DataSourceOpenStack( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + ds_os.ds_cfg = {'apply_network_config': False} + sample_json = {'links': [{'ethernet_mac_address': 'mymac'}], + 'networks': [], 'services': []} + ds_os.network_json = sample_json # Ignore this content from metadata + with test_helpers.mock.patch(mock_path) as m_convert_json: + self.assertIsNone(ds_os.network_config) + m_convert_json.assert_not_called() + + def test_network_config_from_network_json(self): + """The datasource gets network_config from network_data.json.""" + mock_path = ( + 'cloudinit.sources.DataSourceOpenStack.openstack.' + 'convert_net_json') + example_cfg = {'version': 1, 'config': []} + ds_os = ds.DataSourceOpenStack( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + sample_json = {'links': [{'ethernet_mac_address': 'mymac'}], + 'networks': [], 'services': []} + ds_os.network_json = sample_json + with test_helpers.mock.patch(mock_path) as m_convert_json: + m_convert_json.return_value = example_cfg + self.assertEqual(example_cfg, ds_os.network_config) + self.assertIn( + 'network config provided via network_json', self.logs.getvalue()) + m_convert_json.assert_called_with(sample_json, known_macs=None) + + def test_network_config_cached(self): + """The datasource caches the network_config property.""" + mock_path = ( + 'cloudinit.sources.DataSourceOpenStack.openstack.' + 'convert_net_json') + example_cfg = {'version': 1, 'config': []} + ds_os = ds.DataSourceOpenStack( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + ds_os._network_config = example_cfg + with test_helpers.mock.patch(mock_path) as m_convert_json: + self.assertEqual(example_cfg, ds_os.network_config) + m_convert_json.assert_not_called() + def test_disabled_datasource(self): os_files = copy.deepcopy(OS_FILES) os_meta = copy.deepcopy(OSTACK_META) @@ -296,6 +378,35 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertFalse(found) self.assertIsNone(ds_os.version) + @hp.activate + def test_wb__crawl_metadata_does_not_persist(self): + """_crawl_metadata returns current metadata and does not cache.""" + _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) + ds_os = ds.DataSourceOpenStack( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + crawled_data = ds_os._crawl_metadata() + self.assertEqual(UNSET, ds_os.ec2_metadata) + self.assertIsNone(ds_os.userdata_raw) + self.assertEqual(0, len(ds_os.files)) + self.assertIsNone(ds_os.vendordata_raw) + self.assertEqual( + ['dsmode', 'ec2-metadata', 'files', 'metadata', 'networkdata', + 'userdata', 'vendordata', 'version'], + sorted(crawled_data.keys())) + self.assertEqual('local', crawled_data['dsmode']) + self.assertEqual(EC2_META, crawled_data['ec2-metadata']) + self.assertEqual(2, len(crawled_data['files'])) + md = copy.deepcopy(crawled_data['metadata']) + md.pop('instance-id') + md.pop('local-hostname') + self.assertEqual(OSTACK_META, md) + self.assertEqual( + json.loads(OS_FILES['openstack/latest/network_data.json']), + crawled_data['networkdata']) + self.assertEqual(USER_DATA, crawled_data['userdata']) + self.assertEqual(VENDOR_DATA, crawled_data['vendordata']) + self.assertEqual(2, crawled_data['version']) + class TestVendorDataLoading(test_helpers.TestCase): def cvj(self, data): -- cgit v1.2.3 From 3f99f4aba8af559f99f1d46b2b46bea7622da1d0 Mon Sep 17 00:00:00 2001 From: Dan McDonald Date: Thu, 24 May 2018 10:54:18 -0400 Subject: Enable SmartOS network metadata to work with netplan via per-subnet routes - Updated datadict reference URL - Store sdc:routes metadata in DatasourceSmartOS - Map sdc:routes values to per-interface subnet configuration - Added unittest Co-authored-by: Mike Gerdts LP: #1763512 --- cloudinit/sources/DataSourceSmartOS.py | 46 ++++++++++++++++++++++--- tests/unittests/test_datasource/test_smartos.py | 26 ++++++++++++++ 2 files changed, 67 insertions(+), 5 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index c91e4d59..f92e8b5c 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -17,7 +17,7 @@ # of a serial console. # # Certain behavior is defined by the DataDictionary -# http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html +# https://eng.joyent.com/mdata/datadict.html # Comments with "@datadictionary" are snippets of the definition import base64 @@ -298,6 +298,7 @@ class DataSourceSmartOS(sources.DataSource): self.userdata_raw = ud self.vendordata_raw = md['vendor-data'] self.network_data = md['network-data'] + self.routes_data = md['routes'] self._set_provisioned() return True @@ -321,7 +322,8 @@ class DataSourceSmartOS(sources.DataSource): convert_smartos_network_data( network_data=self.network_data, dns_servers=self.metadata['dns_servers'], - dns_domain=self.metadata['dns_domain'])) + dns_domain=self.metadata['dns_domain'], + routes=self.routes_data)) return self._network_config @@ -760,7 +762,8 @@ def get_smartos_environ(uname_version=None, product_name=None): # Convert SMARTOS 'sdc:nics' data to network_config yaml def convert_smartos_network_data(network_data=None, - dns_servers=None, dns_domain=None): + dns_servers=None, dns_domain=None, + routes=None): """Return a dictionary of network_config by parsing provided SMARTOS sdc:nics configuration data @@ -778,6 +781,10 @@ def convert_smartos_network_data(network_data=None, keys are related to ip configuration. For each ip in the 'ips' list we create a subnet entry under 'subnets' pairing the ip to a one in the 'gateways' list. + + Each route in sdc:routes is mapped to a route on each interface. + The sdc:routes properties 'dst' and 'gateway' map to 'network' and + 'gateway'. The 'linklocal' sdc:routes property is ignored. """ valid_keys = { @@ -800,6 +807,10 @@ def convert_smartos_network_data(network_data=None, 'scope', 'type', ], + 'route': [ + 'network', + 'gateway', + ], } if dns_servers: @@ -814,6 +825,9 @@ def convert_smartos_network_data(network_data=None, else: dns_domain = [] + if not routes: + routes = [] + def is_valid_ipv4(addr): return '.' in addr @@ -840,6 +854,7 @@ def convert_smartos_network_data(network_data=None, if ip == "dhcp": subnet = {'type': 'dhcp4'} else: + routeents = [] subnet = dict((k, v) for k, v in nic.items() if k in valid_keys['subnet']) subnet.update({ @@ -861,6 +876,25 @@ def convert_smartos_network_data(network_data=None, pgws[proto]['gw'] = gateways[0] subnet.update({'gateway': pgws[proto]['gw']}) + for route in routes: + rcfg = dict((k, v) for k, v in route.items() + if k in valid_keys['route']) + # Linux uses the value of 'gateway' to determine + # automatically if the route is a forward/next-hop + # (non-local IP for gateway) or an interface/resolver + # (local IP for gateway). So we can ignore the + # 'interface' attribute of sdc:routes, because SDC + # guarantees that the gateway is a local IP for + # "interface=true". + # + # Eventually we should be smart and compare "gateway" + # to see if it's in the prefix. We can then smartly + # add or not-add this route. But for now, + # when in doubt, use brute force! Routes for everyone! + rcfg.update({'network': route['dst']}) + routeents.append(rcfg) + subnet.update({'routes': routeents}) + subnets.append(subnet) cfg.update({'subnets': subnets}) config.append(cfg) @@ -904,12 +938,14 @@ if __name__ == "__main__": keyname = SMARTOS_ATTRIB_JSON[key] data[key] = client.get_json(keyname) elif key == "network_config": - for depkey in ('network-data', 'dns_servers', 'dns_domain'): + for depkey in ('network-data', 'dns_servers', 'dns_domain', + 'routes'): load_key(client, depkey, data) data[key] = convert_smartos_network_data( network_data=data['network-data'], dns_servers=data['dns_servers'], - dns_domain=data['dns_domain']) + dns_domain=data['dns_domain'], + routes=data['routes']) else: if key in SMARTOS_ATTRIB_MAP: keyname, strip = SMARTOS_ATTRIB_MAP[key] diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 706e8eb8..dca0b3d4 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -1027,6 +1027,32 @@ class TestNetworkConversion(TestCase): found = convert_net(SDC_NICS_SINGLE_GATEWAY) self.assertEqual(expected, found) + def test_routes_on_all_nics(self): + routes = [ + {'linklocal': False, 'dst': '3.0.0.0/8', 'gateway': '8.12.42.3'}, + {'linklocal': False, 'dst': '4.0.0.0/8', 'gateway': '10.210.1.4'}] + expected = { + 'version': 1, + 'config': [ + {'mac_address': '90:b8:d0:d8:82:b4', 'mtu': 1500, + 'name': 'net0', 'type': 'physical', + 'subnets': [{'address': '8.12.42.26/24', + 'gateway': '8.12.42.1', 'type': 'static', + 'routes': [{'network': '3.0.0.0/8', + 'gateway': '8.12.42.3'}, + {'network': '4.0.0.0/8', + 'gateway': '10.210.1.4'}]}]}, + {'mac_address': '90:b8:d0:0a:51:31', 'mtu': 1500, + 'name': 'net1', 'type': 'physical', + 'subnets': [{'address': '10.210.1.27/24', 'type': 'static', + 'routes': [{'network': '3.0.0.0/8', + 'gateway': '8.12.42.3'}, + {'network': '4.0.0.0/8', + 'gateway': '10.210.1.4'}]}]}]} + found = convert_net(SDC_NICS_SINGLE_GATEWAY, routes=routes) + self.maxDiff = None + self.assertEqual(expected, found) + @unittest2.skipUnless(get_smartos_environ() == SMARTOS_ENV_KVM, "Only supported on KVM and bhyve guests under SmartOS") -- cgit v1.2.3 From 9a41fce0c2399f05b2c904948c969623dc04e491 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 8 Jun 2018 12:11:31 -0400 Subject: subp: support combine_capture argument. This adds 'combine_capture' argument as was present in curtin's subp. It is useful to get interleaved output of a command. I noticed a need for it when looking at user_data_rhevm in DataSourceAltCloud. That will run a subcommand, logging its stdout but swallowing its stderr. Another thing to change to use this would be in udevadm_settle which currently just returns the subp() call. Also, add the docstring copied from curtin's subp. --- cloudinit/util.py | 63 +++++++++++++++++++++++++++++++++++++++----- tests/unittests/test_util.py | 8 ++++++ 2 files changed, 65 insertions(+), 6 deletions(-) (limited to 'tests') diff --git a/cloudinit/util.py b/cloudinit/util.py index d9b61cfe..0017de72 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1876,9 +1876,55 @@ def subp_blob_in_tempfile(blob, *args, **kwargs): return subp(*args, **kwargs) -def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, +def subp(args, data=None, rcs=None, env=None, capture=True, + combine_capture=False, shell=False, logstring=False, decode="replace", target=None, update_env=None, status_cb=None): + """Run a subprocess. + + :param args: command to run in a list. [cmd, arg1, arg2...] + :param data: input to the command, made available on its stdin. + :param rcs: + a list of allowed return codes. If subprocess exits with a value not + in this list, a ProcessExecutionError will be raised. By default, + data is returned as a string. See 'decode' parameter. + :param env: a dictionary for the command's environment. + :param capture: + boolean indicating if output should be captured. If True, then stderr + and stdout will be returned. If False, they will not be redirected. + :param combine_capture: + boolean indicating if stderr should be redirected to stdout. When True, + interleaved stderr and stdout will be returned as the first element of + a tuple, the second will be empty string or bytes (per decode). + if combine_capture is True, then output is captured independent of + the value of capture. + :param shell: boolean indicating if this should be run with a shell. + :param logstring: + the command will be logged to DEBUG. If it contains info that should + not be logged, then logstring will be logged instead. + :param decode: + if False, no decoding will be done and returned stdout and stderr will + be bytes. Other allowed values are 'strict', 'ignore', and 'replace'. + These values are passed through to bytes().decode() as the 'errors' + parameter. There is no support for decoding to other than utf-8. + :param target: + not supported, kwarg present only to make function signature similar + to curtin's subp. + :param update_env: + update the enviornment for this command with this dictionary. + this will not affect the current processes os.environ. + :param status_cb: + call this fuction with a single string argument before starting + and after finishing. + + :return + if not capturing, return is (None, None) + if capturing, stdout and stderr are returned. + if decode: + entries in tuple will be python2 unicode or python3 string + if not decode: + entries in tuple will be python2 string or python3 bytes + """ # not supported in cloud-init (yet), for now kept in the call signature # to ease maintaining code shared between cloud-init and curtin @@ -1904,7 +1950,8 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, status_cb('Begin run command: {command}\n'.format(command=command)) if not logstring: LOG.debug(("Running command %s with allowed return codes %s" - " (shell=%s, capture=%s)"), args, rcs, shell, capture) + " (shell=%s, capture=%s)"), + args, rcs, shell, 'combine' if combine_capture else capture) else: LOG.debug(("Running hidden command to protect sensitive " "input/output logstring: %s"), logstring) @@ -1915,6 +1962,9 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, if capture: stdout = subprocess.PIPE stderr = subprocess.PIPE + if combine_capture: + stdout = subprocess.PIPE + stderr = subprocess.STDOUT if data is None: # using devnull assures any reads get null, rather # than possibly waiting on input. @@ -1953,10 +2003,11 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, devnull_fp.close() # Just ensure blank instead of none. - if not out and capture: - out = b'' - if not err and capture: - err = b'' + if capture or combine_capture: + if not out: + out = b'' + if not err: + err = b'' if decode: def ldecode(data, m='utf-8'): if not isinstance(data, bytes): diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index d774f3dc..2db8e3fa 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -829,6 +829,14 @@ class TestSubp(helpers.CiTestCase): r'Missing #! in script\?', util.subp, (noshebang,)) + def test_subp_combined_stderr_stdout(self): + """Providing combine_capture as True redirects stderr to stdout.""" + data = b'hello world' + (out, err) = util.subp(self.stdin2err, capture=True, + combine_capture=True, decode=False, data=data) + self.assertEqual(b'', err) + self.assertEqual(data, out) + def test_returns_none_if_no_capture(self): (out, err) = util.subp(self.stdin2out, data=b'', capture=False) self.assertIsNone(err) -- cgit v1.2.3 From c3f1ad9abd4a28c1b4c1f34db28ac72a646cdca6 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 12 Jun 2018 09:23:08 -0600 Subject: netplan: fix mtu if provided by network config for all rendered types When network configuration for any interface defines maximum transmission values (MTU) the netplan, eni and sysconfig renders will take into account any device-level, or subnet-level mtu values. When network configuration has conflicting device-level and ipv4 subnet mtu values, the subnet-specific value is honored and a warning will be logged about any ignored device-level setting. LP: #1774666 --- cloudinit/net/eni.py | 20 +++++++++++++++++--- cloudinit/net/netplan.py | 22 ++++++++++++++-------- cloudinit/net/sysconfig.py | 7 +++++++ doc/rtd/topics/network-config-format-v1.rst | 27 +++++++++++++++++++++++++++ doc/rtd/topics/network-config-format-v2.rst | 6 ++++++ tests/unittests/test_net.py | 21 ++++++++++++++++++++- 6 files changed, 91 insertions(+), 12 deletions(-) (limited to 'tests') diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index c6a71d16..bd20a361 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -10,9 +10,12 @@ from . import ParserError from . import renderer from .network_state import subnet_is_ipv6 +from cloudinit import log as logging from cloudinit import util +LOG = logging.getLogger(__name__) + NET_CONFIG_COMMANDS = [ "pre-up", "up", "post-up", "down", "pre-down", "post-down", ] @@ -61,7 +64,7 @@ def _iface_add_subnet(iface, subnet): # TODO: switch to valid_map for attrs -def _iface_add_attrs(iface, index): +def _iface_add_attrs(iface, index, ipv4_subnet_mtu): # If the index is non-zero, this is an alias interface. Alias interfaces # represent additional interface addresses, and should not have additional # attributes. (extra attributes here are almost always either incorrect, @@ -100,6 +103,13 @@ def _iface_add_attrs(iface, index): value = 'on' if iface[key] else 'off' if not value or key in ignore_map: continue + if key == 'mtu' and ipv4_subnet_mtu: + if value != ipv4_subnet_mtu: + LOG.warning( + "Network config: ignoring %s device-level mtu:%s because" + " ipv4 subnet-level mtu:%s provided.", + iface['name'], value, ipv4_subnet_mtu) + continue if key in multiline_keys: for v in value: content.append(" {0} {1}".format(renames.get(key, key), v)) @@ -377,12 +387,15 @@ class Renderer(renderer.Renderer): subnets = iface.get('subnets', {}) if subnets: for index, subnet in enumerate(subnets): + ipv4_subnet_mtu = None iface['index'] = index iface['mode'] = subnet['type'] iface['control'] = subnet.get('control', 'auto') subnet_inet = 'inet' if subnet_is_ipv6(subnet): subnet_inet += '6' + else: + ipv4_subnet_mtu = subnet.get('mtu') iface['inet'] = subnet_inet if subnet['type'].startswith('dhcp'): iface['mode'] = 'dhcp' @@ -397,7 +410,7 @@ class Renderer(renderer.Renderer): _iface_start_entry( iface, index, render_hwaddress=render_hwaddress) + _iface_add_subnet(iface, subnet) + - _iface_add_attrs(iface, index) + _iface_add_attrs(iface, index, ipv4_subnet_mtu) ) for route in subnet.get('routes', []): lines.extend(self._render_route(route, indent=" ")) @@ -409,7 +422,8 @@ class Renderer(renderer.Renderer): if 'bond-master' in iface or 'bond-slaves' in iface: lines.append("auto {name}".format(**iface)) lines.append("iface {name} {inet} {mode}".format(**iface)) - lines.extend(_iface_add_attrs(iface, index=0)) + lines.extend( + _iface_add_attrs(iface, index=0, ipv4_subnet_mtu=None)) sections.append(lines) return sections diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index 63443484..40143634 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -34,7 +34,7 @@ def _get_params_dict_by_match(config, match): if key.startswith(match)) -def _extract_addresses(config, entry): +def _extract_addresses(config, entry, ifname): """This method parse a cloudinit.net.network_state dictionary (config) and maps netstate keys/values into a dictionary (entry) to represent netplan yaml. @@ -124,6 +124,15 @@ def _extract_addresses(config, entry): addresses.append(addr) + if 'mtu' in config: + entry_mtu = entry.get('mtu') + if entry_mtu and config['mtu'] != entry_mtu: + LOG.warning( + "Network config: ignoring %s device-level mtu:%s because" + " ipv4 subnet-level mtu:%s provided.", + ifname, config['mtu'], entry_mtu) + else: + entry['mtu'] = config['mtu'] if len(addresses) > 0: entry.update({'addresses': addresses}) if len(routes) > 0: @@ -262,10 +271,7 @@ class Renderer(renderer.Renderer): else: del eth['match'] del eth['set-name'] - if 'mtu' in ifcfg: - eth['mtu'] = ifcfg.get('mtu') - - _extract_addresses(ifcfg, eth) + _extract_addresses(ifcfg, eth, ifname) ethernets.update({ifname: eth}) elif if_type == 'bond': @@ -288,7 +294,7 @@ class Renderer(renderer.Renderer): slave_interfaces = ifcfg.get('bond-slaves') if slave_interfaces == 'none': _extract_bond_slaves_by_name(interfaces, bond, ifname) - _extract_addresses(ifcfg, bond) + _extract_addresses(ifcfg, bond, ifname) bonds.update({ifname: bond}) elif if_type == 'bridge': @@ -321,7 +327,7 @@ class Renderer(renderer.Renderer): if len(br_config) > 0: bridge.update({'parameters': br_config}) - _extract_addresses(ifcfg, bridge) + _extract_addresses(ifcfg, bridge, ifname) bridges.update({ifname: bridge}) elif if_type == 'vlan': @@ -333,7 +339,7 @@ class Renderer(renderer.Renderer): macaddr = ifcfg.get('mac_address', None) if macaddr is not None: vlan['macaddress'] = macaddr.lower() - _extract_addresses(ifcfg, vlan) + _extract_addresses(ifcfg, vlan, ifname) vlans.update({ifname: vlan}) # inject global nameserver values under each all interface which diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index e53b9f1b..3d719238 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -304,6 +304,13 @@ class Renderer(renderer.Renderer): mtu_key = 'IPV6_MTU' iface_cfg['IPV6INIT'] = True if 'mtu' in subnet: + mtu_mismatch = bool(mtu_key in iface_cfg and + subnet['mtu'] != iface_cfg[mtu_key]) + if mtu_mismatch: + LOG.warning( + 'Network config: ignoring %s device-level mtu:%s' + ' because ipv4 subnet-level mtu:%s provided.', + iface_cfg.name, iface_cfg[mtu_key], subnet['mtu']) iface_cfg[mtu_key] = subnet['mtu'] elif subnet_type == 'manual': # If the subnet has an MTU setting, then ONBOOT=True diff --git a/doc/rtd/topics/network-config-format-v1.rst b/doc/rtd/topics/network-config-format-v1.rst index 2f8ab54c..3b0148ca 100644 --- a/doc/rtd/topics/network-config-format-v1.rst +++ b/doc/rtd/topics/network-config-format-v1.rst @@ -130,6 +130,18 @@ the bond interfaces. The ``bond_interfaces`` key accepts a list of network device ``name`` values from the configuration. This list may be empty. +**mtu**: ** + +The MTU key represents a device's Maximum Transmission Unit, the largest size +packet or frame, specified in octets (eight-bit bytes), that can be sent in a +packet- or frame-based network. Specifying ``mtu`` is optional. + +.. note:: + + The possible supported values of a device's MTU is not available at + configuration time. It's possible to specify a value too large or to + small for a device and may be ignored by the device. + **params**: ** The ``params`` key in a bond holds a dictionary of bonding parameters. @@ -268,6 +280,21 @@ Type ``vlan`` requires the following keys: - ``vlan_link``: Specify the underlying link via its ``name``. - ``vlan_id``: Specify the VLAN numeric id. +The following optional keys are supported: + +**mtu**: ** + +The MTU key represents a device's Maximum Transmission Unit, the largest size +packet or frame, specified in octets (eight-bit bytes), that can be sent in a +packet- or frame-based network. Specifying ``mtu`` is optional. + +.. note:: + + The possible supported values of a device's MTU is not available at + configuration time. It's possible to specify a value too large or to + small for a device and may be ignored by the device. + + **VLAN Example**:: network: diff --git a/doc/rtd/topics/network-config-format-v2.rst b/doc/rtd/topics/network-config-format-v2.rst index 335d236a..ea370ef5 100644 --- a/doc/rtd/topics/network-config-format-v2.rst +++ b/doc/rtd/topics/network-config-format-v2.rst @@ -174,6 +174,12 @@ recognized by ``inet_pton(3)`` Example for IPv4: ``gateway4: 172.16.0.1`` Example for IPv6: ``gateway6: 2001:4::1`` +**mtu**: ** + +The MTU key represents a device's Maximum Transmission Unit, the largest size +packet or frame, specified in octets (eight-bit bytes), that can be sent in a +packet- or frame-based network. Specifying ``mtu`` is optional. + **nameservers**: *<(mapping)>* Set DNS servers and search domains, for manual address configuration. There diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index e13ca3ce..5ab61cf2 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -525,6 +525,7 @@ NETWORK_CONFIGS = { config: - type: 'physical' name: 'iface0' + mtu: 8999 subnets: - type: static address: 192.168.14.2/24 @@ -660,8 +661,8 @@ iface eth0.101 inet static dns-nameservers 192.168.0.10 10.23.23.134 dns-search barley.maas sacchromyces.maas brettanomyces.maas gateway 192.168.0.1 - hwaddress aa:bb:cc:dd:ee:11 mtu 1500 + hwaddress aa:bb:cc:dd:ee:11 vlan-raw-device eth0 vlan_id 101 @@ -757,6 +758,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true id: 101 link: eth0 macaddress: aa:bb:cc:dd:ee:11 + mtu: 1500 nameservers: addresses: - 192.168.0.10 @@ -920,6 +922,8 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true mtu: 1500 subnets: - type: static + # When 'mtu' matches device-level mtu, no warnings + mtu: 1500 address: 192.168.0.2/24 gateway: 192.168.0.1 dns_nameservers: @@ -1028,6 +1032,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true - type: bond name: bond0 mac_address: "aa:bb:cc:dd:e8:ff" + mtu: 9000 bond_interfaces: - bond0s0 - bond0s1 @@ -1070,6 +1075,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true interfaces: - bond0s0 - bond0s1 + mtu: 9000 parameters: mii-monitor-interval: 100 mode: active-backup @@ -1157,6 +1163,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true IPADDR1=192.168.1.2 IPV6ADDR=2001:1::1/92 IPV6INIT=yes + MTU=9000 NETMASK=255.255.255.0 NETMASK1=255.255.255.0 NM_CONTROLLED=no @@ -1203,6 +1210,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true name: en0 mac_address: "aa:bb:cc:dd:e8:00" - type: vlan + mtu: 2222 name: en0.99 vlan_link: en0 vlan_id: 99 @@ -1238,6 +1246,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true IPV6ADDR=2001:1::bbbb/96 IPV6INIT=yes IPV6_DEFAULTGW=2001:1::1 + MTU=2222 NETMASK=255.255.255.0 NETMASK1=255.255.255.0 NM_CONTROLLED=no @@ -1669,6 +1678,8 @@ iface eth1 inet dhcp class TestSysConfigRendering(CiTestCase): + with_logs = True + scripts_dir = '/etc/sysconfig/network-scripts' header = ('# Created by cloud-init on instance boot automatically, ' 'do not edit.\n#\n') @@ -1917,6 +1928,9 @@ USERCTL=no found = self._render_and_read(network_config=yaml.load(entry['yaml'])) self._compare_files_to_expected(entry['expected_sysconfig'], found) self._assert_headers(found) + self.assertNotIn( + 'WARNING: Network config: ignoring eth0.101 device-level mtu', + self.logs.getvalue()) def test_small_config(self): entry = NETWORK_CONFIGS['small'] @@ -1929,6 +1943,10 @@ USERCTL=no found = self._render_and_read(network_config=yaml.load(entry['yaml'])) self._compare_files_to_expected(entry['expected_sysconfig'], found) self._assert_headers(found) + expected_msg = ( + 'WARNING: Network config: ignoring iface0 device-level mtu:8999' + ' because ipv4 subnet-level mtu:9000 provided.') + self.assertIn(expected_msg, self.logs.getvalue()) def test_dhcpv6_only_config(self): entry = NETWORK_CONFIGS['dhcpv6_only'] @@ -2410,6 +2428,7 @@ class TestNetplanRoundTrip(CiTestCase): class TestEniRoundTrip(CiTestCase): + def _render_and_read(self, network_config=None, state=None, eni_path=None, netrules_path=None, dir=None): if dir is None: -- cgit v1.2.3 From 7b3c21615dac3b0d9163c9883309a2e7b675622a Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 12 Jun 2018 10:14:07 -0600 Subject: test: add optional --preserve-instance arg to integraiton tests By default, integration tests destroy the test instances after each test run. To aid debug and development of integration tests, support a --preserve-instance argument which will leave the modified test instance in a stopped state for further debug. --- doc/rtd/topics/tests.rst | 7 ++++++- tests/cloud_tests/args.py | 3 +++ tests/cloud_tests/collect.py | 3 ++- tests/cloud_tests/stage.py | 15 ++++++++++++--- 4 files changed, 23 insertions(+), 5 deletions(-) (limited to 'tests') diff --git a/doc/rtd/topics/tests.rst b/doc/rtd/topics/tests.rst index cac4a6e4..b83bd899 100644 --- a/doc/rtd/topics/tests.rst +++ b/doc/rtd/topics/tests.rst @@ -58,7 +58,8 @@ explaining how to run one or the other independently. $ tox -e citest -- run --verbose \ --os-name stretch --os-name xenial \ --deb cloud-init_0.7.8~my_patch_all.deb \ - --preserve-data --data-dir ~/collection + --preserve-data --data-dir ~/collection \ + --preserve-instance The above command will do the following: @@ -76,6 +77,10 @@ The above command will do the following: * ``--preserve-data`` always preserve collected data, do not remove data after successful test run +* ``--preserve-instance`` do not destroy the instance after test to allow + for debugging the stopped instance during integration test development. By + default, test instances are destroyed after the test completes. + * ``--data-dir ~/collection`` write collected data into `~/collection`, rather than using a temporary directory diff --git a/tests/cloud_tests/args.py b/tests/cloud_tests/args.py index c6c1877b..ab345491 100644 --- a/tests/cloud_tests/args.py +++ b/tests/cloud_tests/args.py @@ -62,6 +62,9 @@ ARG_SETS = { (('-d', '--data-dir'), {'help': 'directory to store test data in', 'action': 'store', 'metavar': 'DIR', 'required': False}), + (('--preserve-instance',), + {'help': 'do not destroy the instance under test', + 'action': 'store_true', 'default': False, 'required': False}), (('--preserve-data',), {'help': 'do not remove collected data after successful run', 'action': 'store_true', 'default': False, 'required': False}),), diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py index 78263bf5..75b50616 100644 --- a/tests/cloud_tests/collect.py +++ b/tests/cloud_tests/collect.py @@ -93,7 +93,8 @@ def collect_test_data(args, snapshot, os_name, test_name): # create test instance component = PlatformComponent( partial(platforms.get_instance, snapshot, user_data, - block=True, start=False, use_desc=test_name)) + block=True, start=False, use_desc=test_name), + preserve_instance=args.preserve_instance) LOG.info('collecting test data for test: %s', test_name) with component as instance: diff --git a/tests/cloud_tests/stage.py b/tests/cloud_tests/stage.py index 74a7d46d..d64a1dcc 100644 --- a/tests/cloud_tests/stage.py +++ b/tests/cloud_tests/stage.py @@ -12,9 +12,15 @@ from tests.cloud_tests import LOG class PlatformComponent(object): """Context manager to safely handle platform components.""" - def __init__(self, get_func): - """Store get_ function as partial with no args.""" + def __init__(self, get_func, preserve_instance=False): + """Store get_ function as partial with no args. + + @param get_func: Callable returning an instance from the platform. + @param preserve_instance: Boolean, when True, do not destroy instance + after test. Used for test development. + """ self.get_func = get_func + self.preserve_instance = preserve_instance def __enter__(self): """Create instance of platform component.""" @@ -24,7 +30,10 @@ class PlatformComponent(object): def __exit__(self, etype, value, trace): """Destroy instance.""" if self.instance is not None: - self.instance.destroy() + if self.preserve_instance: + LOG.info('Preserving test instance %s', self.instance.name) + else: + self.instance.destroy() def run_single(name, call): -- cgit v1.2.3 From 5ffcb511db8783aa9d32895f7017a7278d546f2f Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 12 Jun 2018 11:54:30 -0600 Subject: tests: skip chrony integration tests on lxd running artful or older A fix for chrony support per LP: #1589780 is not expected in Artful or older series. Skip the chrony suite of tests when running on a container and ubuntu series represented is <= artful as errors are expected. --- tests/cloud_tests/testcases/modules/ntp_chrony.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'tests') diff --git a/tests/cloud_tests/testcases/modules/ntp_chrony.py b/tests/cloud_tests/testcases/modules/ntp_chrony.py index 461630a8..7d341773 100644 --- a/tests/cloud_tests/testcases/modules/ntp_chrony.py +++ b/tests/cloud_tests/testcases/modules/ntp_chrony.py @@ -1,13 +1,24 @@ # This file is part of cloud-init. See LICENSE file for license information. """cloud-init Integration Test Verify Script.""" +import unittest + from tests.cloud_tests.testcases import base class TestNtpChrony(base.CloudTestCase): """Test ntp module with chrony client""" - def test_chrony_entires(self): + def setUp(self): + """Skip this suite of tests on lxd and artful or older.""" + if self.platform == 'lxd': + if self.is_distro('ubuntu') and self.os_version_cmp('artful') <= 0: + raise unittest.SkipTest( + 'No support for chrony on containers <= artful.' + ' LP: #1589780') + return super(TestNtpChrony, self).setUp() + + def test_chrony_entries(self): """Test chrony config entries""" out = self.get_data_file('chrony_conf') self.assertIn('.pool.ntp.org', out) -- cgit v1.2.3 From d0f6c4602f9cc412d372e10bd7411ff0214c1435 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 12 Jun 2018 13:15:31 -0600 Subject: tests: provide human-readable integration test summary when --verbose Integration tests will now provide a brief summary for test failures listed by platform and distribution. The failure summary will only consist of failed test name and assert error message. Drop the verbose dictionary of all integration test output because this content is unreadable given the large number of integration test results listed within this dictionary. --- tests/cloud_tests/verify.py | 47 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 46 insertions(+), 1 deletion(-) (limited to 'tests') diff --git a/tests/cloud_tests/verify.py b/tests/cloud_tests/verify.py index 5a68a484..bfb27444 100644 --- a/tests/cloud_tests/verify.py +++ b/tests/cloud_tests/verify.py @@ -56,6 +56,51 @@ def verify_data(data_dir, platform, os_name, tests): return res +def format_test_failures(test_result): + """Return a human-readable printable format of test failures.""" + if not test_result['failures']: + return '' + failure_hdr = ' test failures:' + failure_fmt = ' * {module}.{class}.{function}\n {error}' + output = [] + for failure in test_result['failures']: + if not output: + output = [failure_hdr] + output.append(failure_fmt.format(**failure)) + return '\n'.join(output) + + +def format_results(res): + """Return human-readable results as a string""" + platform_hdr = 'Platform: {platform}' + distro_hdr = ' Distro: {distro}' + distro_summary_fmt = ( + ' test modules passed:{passed} tests failed:{failed}') + output = [''] + counts = {} + for platform, platform_data in res.items(): + output.append(platform_hdr.format(platform=platform)) + counts[platform] = {} + for distro, distro_data in platform_data.items(): + distro_failure_output = [] + output.append(distro_hdr.format(distro=distro)) + counts[platform][distro] = {'passed': 0, 'failed': 0} + for _, test_result in distro_data.items(): + if test_result['passed']: + counts[platform][distro]['passed'] += 1 + else: + counts[platform][distro]['failed'] += len( + test_result['failures']) + failure_output = format_test_failures(test_result) + if failure_output: + distro_failure_output.append(failure_output) + output.append( + distro_summary_fmt.format(**counts[platform][distro])) + if distro_failure_output: + output.extend(distro_failure_output) + return '\n'.join(output) + + def verify(args): """Verify test data. @@ -90,7 +135,7 @@ def verify(args): failed += len(fail_list) # dump results - LOG.debug('verify results: %s', res) + LOG.debug('\n---- Verify summarized results:\n%s', format_results(res)) if args.result: util.merge_results({'verify': res}, args.result) -- cgit v1.2.3 From 27283c31f4bf85f40588cfa3b31389d70ec00243 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 12 Jun 2018 16:42:54 -0600 Subject: tests: fix salt_minion integration test on bionic and later In ubuntu, the salt-minion package version 2017.7.4+dfsg1-1 or later automatically moves any seed keys from /etc/salt/pki/minion/ to /var/lib/salt/pki/minion/. Fix integration tests to collect either files in either /etc/salt/pki/minion/ or /var/lib/salt/pki/minion/. --- tests/cloud_tests/testcases/modules/salt_minion.yaml | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'tests') diff --git a/tests/cloud_tests/testcases/modules/salt_minion.yaml b/tests/cloud_tests/testcases/modules/salt_minion.yaml index c24aa177..9227147c 100644 --- a/tests/cloud_tests/testcases/modules/salt_minion.yaml +++ b/tests/cloud_tests/testcases/modules/salt_minion.yaml @@ -28,10 +28,20 @@ collect_scripts: cat /etc/salt/minion_id minion.pem: | #!/bin/bash - cat /etc/salt/pki/minion/minion.pem + PRIV_KEYFILE=/etc/salt/pki/minion/minion.pem + if [ ! -f $PRIV_KEYFILE ]; then + # Bionic and later automatically moves /etc/salt/pki/minion/* + PRIV_KEYFILE=/var/lib/salt/pki/minion/minion.pem + fi + cat $PRIV_KEYFILE minion.pub: | #!/bin/bash - cat /etc/salt/pki/minion/minion.pub + PUB_KEYFILE=/etc/salt/pki/minion/minion.pub + if [ ! -f $PUB_KEYFILE ]; then + # Bionic and later automatically moves /etc/salt/pki/minion/* + PUB_KEYFILE=/var/lib/salt/pki/minion/minion.pub + fi + cat $PUB_KEYFILE grains: | #!/bin/bash cat /etc/salt/grains -- cgit v1.2.3 From 171378613ef4de3c1bb4170a10ec1748ac62f39f Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 13 Jun 2018 12:42:43 -0400 Subject: Fix get_proc_env for pids that have non-utf8 content in environment. There is no requirement that the environment of a process contains only utf-8 data. This modifies get_proc_env to support it reading data as binary and decoding if provided with an encoding. The default case is now that we now do: contents.decode('utf-8', 'replace') rather than contents.decode('utf-8', 'strict') LP: #1775371 --- cloudinit/util.py | 35 +++++++++++++++++---------- tests/unittests/test_util.py | 56 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 78 insertions(+), 13 deletions(-) (limited to 'tests') diff --git a/cloudinit/util.py b/cloudinit/util.py index 0017de72..26a41122 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2131,24 +2131,33 @@ def is_container(): return False -def get_proc_env(pid): +def get_proc_env(pid, encoding='utf-8', errors='replace'): """ Return the environment in a dict that a given process id was started with. - """ - env = {} - fn = os.path.join("/proc/", str(pid), "environ") + @param encoding: if true, then decoding will be done with + .decode(encoding, errors) and text will be returned. + if false then binary will be returned. + @param errors: only used if encoding is true.""" + fn = os.path.join("/proc", str(pid), "environ") + try: - contents = load_file(fn) - toks = contents.split("\x00") - for tok in toks: - if tok == "": - continue - (name, val) = tok.split("=", 1) - if name: - env[name] = val + contents = load_file(fn, decode=False) except (IOError, OSError): - pass + return {} + + env = {} + null, equal = (b"\x00", b"=") + if encoding: + null, equal = ("\x00", "=") + contents = contents.decode(encoding, errors) + + for tok in contents.split(null): + if not tok: + continue + (name, val) = tok.split(equal, 1) + if name: + env[name] = val return env diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 2db8e3fa..20479f66 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -1089,4 +1089,60 @@ class TestLoadShellContent(helpers.TestCase): '']))) +class TestGetProcEnv(helpers.TestCase): + """test get_proc_env.""" + null = b'\x00' + simple1 = b'HOME=/' + simple2 = b'PATH=/bin:/sbin' + bootflag = b'BOOTABLE_FLAG=\x80' # from LP: #1775371 + mixed = b'MIXED=' + b'ab\xccde' + + def _val_decoded(self, blob, encoding='utf-8', errors='replace'): + # return the value portion of key=val decoded. + return blob.split(b'=', 1)[1].decode(encoding, errors) + + @mock.patch("cloudinit.util.load_file") + def test_non_utf8_in_environment(self, m_load_file): + """env may have non utf-8 decodable content.""" + content = self.null.join( + (self.bootflag, self.simple1, self.simple2, self.mixed)) + m_load_file.return_value = content + + self.assertEqual( + {'BOOTABLE_FLAG': self._val_decoded(self.bootflag), + 'HOME': '/', 'PATH': '/bin:/sbin', + 'MIXED': self._val_decoded(self.mixed)}, + util.get_proc_env(1)) + self.assertEqual(1, m_load_file.call_count) + + @mock.patch("cloudinit.util.load_file") + def test_encoding_none_returns_bytes(self, m_load_file): + """encoding none returns bytes.""" + lines = (self.bootflag, self.simple1, self.simple2, self.mixed) + content = self.null.join(lines) + m_load_file.return_value = content + + self.assertEqual( + dict([t.split(b'=') for t in lines]), + util.get_proc_env(1, encoding=None)) + self.assertEqual(1, m_load_file.call_count) + + @mock.patch("cloudinit.util.load_file") + def test_all_utf8_encoded(self, m_load_file): + """common path where only utf-8 decodable content.""" + content = self.null.join((self.simple1, self.simple2)) + m_load_file.return_value = content + self.assertEqual( + {'HOME': '/', 'PATH': '/bin:/sbin'}, + util.get_proc_env(1)) + self.assertEqual(1, m_load_file.call_count) + + @mock.patch("cloudinit.util.load_file") + def test_non_existing_file_returns_empty_dict(self, m_load_file): + """as implemented, a non-existing pid returns empty dict. + This is how it was originally implemented.""" + m_load_file.side_effect = OSError("File does not exist.") + self.assertEqual({}, util.get_proc_env(1)) + self.assertEqual(1, m_load_file.call_count) + # vi: ts=4 expandtab -- cgit v1.2.3 From faa6f07e9de4058083a5f69ed508b6e24bd53b23 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 13 Jun 2018 12:54:43 -0400 Subject: Be more safe on string/bytes when writing multipart user-data to disk. When creating the multipart mime message that is written as user-data.txt.i, cloud-init losing data on conversion to some things as a string. LP: #1768600 Author: Scott Moser Co-Authored-By: Chad Smith --- cloudinit/user_data.py | 22 +++++++++++++--------- tests/unittests/test_data.py | 11 ++++++++++- 2 files changed, 23 insertions(+), 10 deletions(-) (limited to 'tests') diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index 8f6aba1e..ed83d2d8 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -337,8 +337,10 @@ def is_skippable(part): # Coverts a raw string into a mime message def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE): + """convert a string (more likely bytes) or a message into + a mime message.""" if not raw_data: - raw_data = '' + raw_data = b'' def create_binmsg(data, content_type): maintype, subtype = content_type.split("/", 1) @@ -346,15 +348,17 @@ def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE): msg.set_payload(data) return msg - try: - data = util.decode_binary(util.decomp_gzip(raw_data)) - if "mime-version:" in data[0:4096].lower(): - msg = util.message_from_string(data) - else: - msg = create_binmsg(data, content_type) - except UnicodeDecodeError: - msg = create_binmsg(raw_data, content_type) + if isinstance(raw_data, six.text_type): + bdata = raw_data.encode('utf-8') + else: + bdata = raw_data + bdata = util.decomp_gzip(bdata, decode=False) + if b"mime-version:" in bdata[0:4096].lower(): + msg = util.message_from_string(bdata.decode('utf-8')) + else: + msg = create_binmsg(bdata, content_type) return msg + # vi: ts=4 expandtab diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index 91d35cb8..3efe7adf 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -606,8 +606,10 @@ class TestUDProcess(helpers.ResourceUsingTestCase): class TestConvertString(helpers.TestCase): + def test_handles_binary_non_utf8_decodable(self): - blob = b'\x32\x99' + """Printable unicode (not utf8-decodable) is safely converted.""" + blob = b'#!/bin/bash\necho \xc3\x84\n' msg = ud.convert_string(blob) self.assertEqual(blob, msg.get_payload(decode=True)) @@ -621,6 +623,13 @@ class TestConvertString(helpers.TestCase): msg = ud.convert_string(text) self.assertEqual(text, msg.get_payload(decode=False)) + def test_handle_mime_parts(self): + """Mime parts are properly returned as a mime message.""" + message = MIMEBase("text", "plain") + message.set_payload("Just text") + msg = ud.convert_string(str(message)) + self.assertEqual("Just text", msg.get_payload(decode=False)) + class TestFetchBaseConfig(helpers.TestCase): def test_only_builtin_gets_builtin(self): -- cgit v1.2.3 From fef2616b9876d3d354b0de1a8e753361e52e77b0 Mon Sep 17 00:00:00 2001 From: Robert Schweikert Date: Fri, 15 Jun 2018 13:41:21 -0600 Subject: stages: fix tracebacks if a module stage is undefined or empty In /etc/cloud/cloud.cfg, users and imagees can configure which modules run during a specific cloud-init stage by modifying one of the following lists: cloud_init_modules, cloud_init_modules, cloud_init_final_modules. If any of the configured module lists are absent or empty, cloud-init will emit the same message it already does for existing lists that only contain modules which are not unsupported on that platform: No 'config' modules to run under section 'cloud_config_modules' LP: #1770462 --- cloudinit/stages.py | 4 +++- tests/unittests/test_runs/test_simple_run.py | 32 ++++++++++++++++++++++++++-- 2 files changed, 33 insertions(+), 3 deletions(-) (limited to 'tests') diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 3998cf68..286607bf 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -697,7 +697,9 @@ class Modules(object): module_list = [] if name not in self.cfg: return module_list - cfg_mods = self.cfg[name] + cfg_mods = self.cfg.get(name) + if not cfg_mods: + return module_list # Create 'module_list', an array of hashes # Where hash['mod'] = module name # hash['freq'] = frequency diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py index 762974e9..d67c422c 100644 --- a/tests/unittests/test_runs/test_simple_run.py +++ b/tests/unittests/test_runs/test_simple_run.py @@ -1,5 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. +import copy import os @@ -127,8 +128,9 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): """run_section forced skipped modules by using unverified_modules.""" # re-write cloud.cfg with unverified_modules override - self.cfg['unverified_modules'] = ['spacewalk'] # Would have skipped - cloud_cfg = util.yaml_dumps(self.cfg) + cfg = copy.deepcopy(self.cfg) + cfg['unverified_modules'] = ['spacewalk'] # Would have skipped + cloud_cfg = util.yaml_dumps(cfg) util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) util.write_file(os.path.join(self.new_root, 'etc', 'cloud', 'cloud.cfg'), cloud_cfg) @@ -150,4 +152,30 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): "running unverified_modules: 'spacewalk'", self.logs.getvalue()) + def test_none_ds_run_with_no_config_modules(self): + """run_section will report no modules run when none are configured.""" + + # re-write cloud.cfg with unverified_modules override + cfg = copy.deepcopy(self.cfg) + # Represent empty configuration in /etc/cloud/cloud.cfg + cfg['cloud_init_modules'] = None + cloud_cfg = util.yaml_dumps(cfg) + util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) + util.write_file(os.path.join(self.new_root, 'etc', + 'cloud', 'cloud.cfg'), cloud_cfg) + + initer = stages.Init() + initer.read_cfg() + initer.initialize() + initer.fetch() + initer.instancify() + initer.update() + initer.cloudify().run('consume_data', initer.consume_data, + args=[PER_INSTANCE], freq=PER_INSTANCE) + + mods = stages.Modules(initer) + (which_ran, failures) = mods.run_section('cloud_init_modules') + self.assertTrue(len(failures) == 0) + self.assertEqual([], which_ran) + # vi: ts=4 expandtab -- cgit v1.2.3 From 1efa8a0a030794cec68197100f31a856d0d264ab Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Fri, 15 Jun 2018 19:33:30 -0600 Subject: openstack: avoid unneeded metadata probe on non-openstack platforms OpenStack datasource is now discovered in init-local stage. In order to probe whether OpenStack metadata is present, it performs a costly sandboxed dhclient setup and metadata probe against http://169.254.169.254 for openstack data. Cloud-init properly detects non-OpenStack on EC2, but it spends precious time probing the metadata service also resulting in a confusing WARNING log about 'metadata not present'. To avoid the wasted cycles, and confusing warning, get_data will call a detect_openstack function to quickly determine whether the platform looks like OpenStack before trying to setup network to probe and crawl the metadata service. LP: #1776701 --- cloudinit/sources/DataSourceOpenStack.py | 23 ++++ cloudinit/util.py | 13 ++- doc/rtd/topics/datasources/openstack.rst | 15 +++ tests/unittests/test_datasource/test_openstack.py | 124 +++++++++++++++++++--- tests/unittests/test_util.py | 23 ++++ 5 files changed, 182 insertions(+), 16 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index 1a12a3f1..365af96a 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -23,6 +23,13 @@ DEFAULT_METADATA = { "instance-id": DEFAULT_IID, } +# OpenStack DMI constants +DMI_PRODUCT_NOVA = 'OpenStack Nova' +DMI_PRODUCT_COMPUTE = 'OpenStack Compute' +VALID_DMI_PRODUCT_NAMES = [DMI_PRODUCT_NOVA, DMI_PRODUCT_COMPUTE] +DMI_ASSET_TAG_OPENTELEKOM = 'OpenTelekomCloud' +VALID_DMI_ASSET_TAGS = [DMI_ASSET_TAG_OPENTELEKOM] + class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): @@ -114,6 +121,8 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): False when unable to contact metadata service or when metadata format is invalid or disabled. """ + if not detect_openstack(): + return False if self.perform_dhcp_setup: # Setup networking in init-local stage. try: with EphemeralDHCPv4(self.fallback_interface): @@ -205,6 +214,20 @@ def read_metadata_service(base_url, ssl_details=None, return reader.read_v2() +def detect_openstack(): + """Return True when a potential OpenStack platform is detected.""" + if not util.is_x86(): + return True # Non-Intel cpus don't properly report dmi product names + product_name = util.read_dmi_data('system-product-name') + if product_name in VALID_DMI_PRODUCT_NAMES: + return True + elif util.read_dmi_data('chassis-asset-tag') in VALID_DMI_ASSET_TAGS: + return True + elif util.get_proc_env(1).get('product_name') == DMI_PRODUCT_NOVA: + return True + return False + + # Used to match classes to dependencies datasources = [ (DataSourceOpenStackLocal, (sources.DEP_FILESYSTEM,)), diff --git a/cloudinit/util.py b/cloudinit/util.py index 26a41122..6da95113 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2629,6 +2629,16 @@ def _call_dmidecode(key, dmidecode_path): return None +def is_x86(uname_arch=None): + """Return True if platform is x86-based""" + if uname_arch is None: + uname_arch = os.uname()[4] + x86_arch_match = ( + uname_arch == 'x86_64' or + (uname_arch[0] == 'i' and uname_arch[2:] == '86')) + return x86_arch_match + + def read_dmi_data(key): """ Wrapper for reading DMI data. @@ -2656,8 +2666,7 @@ def read_dmi_data(key): # running dmidecode can be problematic on some arches (LP: #1243287) uname_arch = os.uname()[4] - if not (uname_arch == "x86_64" or - (uname_arch.startswith("i") and uname_arch[2:] == "86") or + if not (is_x86(uname_arch) or uname_arch == 'aarch64' or uname_arch == 'amd64'): LOG.debug("dmidata is not supported on %s", uname_arch) diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst index 0ea89943..421da08f 100644 --- a/doc/rtd/topics/datasources/openstack.rst +++ b/doc/rtd/topics/datasources/openstack.rst @@ -7,6 +7,21 @@ This datasource supports reading data from the `OpenStack Metadata Service `_. +Discovery +------------- +To determine whether a platform looks like it may be OpenStack, cloud-init +checks the following environment attributes as a potential OpenStack platform: + + * Maybe OpenStack if + + * **non-x86 cpu architecture**: because DMI data is buggy on some arches + * Is OpenStack **if x86 architecture and ANY** of the following + + * **/proc/1/environ**: Nova-lxd contains *product_name=OpenStack Nova* + * **DMI product_name**: Either *Openstack Nova* or *OpenStack Compute* + * **DMI chassis_asset_tag** is *OpenTelekomCloud* + + Configuration ------------- The following configuration can be set for the datasource in system diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index fad73b21..585acc33 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -69,6 +69,8 @@ EC2_VERSIONS = [ 'latest', ] +MOCK_PATH = 'cloudinit.sources.DataSourceOpenStack.' + # TODO _register_uris should leverage test_ec2.register_mock_metaserver. def _register_uris(version, ec2_files, ec2_meta, os_files): @@ -231,7 +233,10 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): ds_os = ds.DataSourceOpenStack( settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) self.assertIsNone(ds_os.version) - found = ds_os.get_data() + mock_path = MOCK_PATH + 'detect_openstack' + with test_helpers.mock.patch(mock_path) as m_detect_os: + m_detect_os.return_value = True + found = ds_os.get_data() self.assertTrue(found) self.assertEqual(2, ds_os.version) md = dict(ds_os.metadata) @@ -260,7 +265,10 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): 'broadcast-address': '192.168.2.255'}] self.assertIsNone(ds_os_local.version) - found = ds_os_local.get_data() + mock_path = MOCK_PATH + 'detect_openstack' + with test_helpers.mock.patch(mock_path) as m_detect_os: + m_detect_os.return_value = True + found = ds_os_local.get_data() self.assertTrue(found) self.assertEqual(2, ds_os_local.version) md = dict(ds_os_local.metadata) @@ -284,7 +292,10 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): None, helpers.Paths({'run_dir': self.tmp})) self.assertIsNone(ds_os.version) - found = ds_os.get_data() + mock_path = MOCK_PATH + 'detect_openstack' + with test_helpers.mock.patch(mock_path) as m_detect_os: + m_detect_os.return_value = True + found = ds_os.get_data() self.assertFalse(found) self.assertIsNone(ds_os.version) self.assertIn( @@ -306,15 +317,16 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): 'timeout': 0, } self.assertIsNone(ds_os.version) - found = ds_os.get_data() + mock_path = MOCK_PATH + 'detect_openstack' + with test_helpers.mock.patch(mock_path) as m_detect_os: + m_detect_os.return_value = True + found = ds_os.get_data() self.assertFalse(found) self.assertIsNone(ds_os.version) def test_network_config_disabled_by_datasource_config(self): """The network_config can be disabled from datasource config.""" - mock_path = ( - 'cloudinit.sources.DataSourceOpenStack.openstack.' - 'convert_net_json') + mock_path = MOCK_PATH + 'openstack.convert_net_json' ds_os = ds.DataSourceOpenStack( settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) ds_os.ds_cfg = {'apply_network_config': False} @@ -327,9 +339,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): def test_network_config_from_network_json(self): """The datasource gets network_config from network_data.json.""" - mock_path = ( - 'cloudinit.sources.DataSourceOpenStack.openstack.' - 'convert_net_json') + mock_path = MOCK_PATH + 'openstack.convert_net_json' example_cfg = {'version': 1, 'config': []} ds_os = ds.DataSourceOpenStack( settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) @@ -345,9 +355,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): def test_network_config_cached(self): """The datasource caches the network_config property.""" - mock_path = ( - 'cloudinit.sources.DataSourceOpenStack.openstack.' - 'convert_net_json') + mock_path = MOCK_PATH + 'openstack.convert_net_json' example_cfg = {'version': 1, 'config': []} ds_os = ds.DataSourceOpenStack( settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) @@ -374,7 +382,10 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): 'timeout': 0, } self.assertIsNone(ds_os.version) - found = ds_os.get_data() + mock_path = MOCK_PATH + 'detect_openstack' + with test_helpers.mock.patch(mock_path) as m_detect_os: + m_detect_os.return_value = True + found = ds_os.get_data() self.assertFalse(found) self.assertIsNone(ds_os.version) @@ -438,4 +449,89 @@ class TestVendorDataLoading(test_helpers.TestCase): data = {'foo': 'bar', 'cloud-init': ['VD_1', 'VD_2']} self.assertEqual(self.cvj(data), data['cloud-init']) + +@test_helpers.mock.patch(MOCK_PATH + 'util.is_x86') +class TestDetectOpenStack(test_helpers.CiTestCase): + + def test_detect_openstack_non_intel_x86(self, m_is_x86): + """Return True on non-intel platforms because dmi isn't conclusive.""" + m_is_x86.return_value = False + self.assertTrue( + ds.detect_openstack(), 'Expected detect_openstack == True') + + @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env') + @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data') + def test_not_detect_openstack_intel_x86_ec2(self, m_dmi, m_proc_env, + m_is_x86): + """Return False on EC2 platforms.""" + m_is_x86.return_value = True + # No product_name in proc/1/environ + m_proc_env.return_value = {'HOME': '/'} + + def fake_dmi_read(dmi_key): + if dmi_key == 'system-product-name': + return 'HVM domU' # Nothing 'openstackish' on EC2 + if dmi_key == 'chassis-asset-tag': + return '' # Empty string on EC2 + assert False, 'Unexpected dmi read of %s' % dmi_key + + m_dmi.side_effect = fake_dmi_read + self.assertFalse( + ds.detect_openstack(), 'Expected detect_openstack == False on EC2') + m_proc_env.assert_called_with(1) + + @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data') + def test_detect_openstack_intel_product_name_compute(self, m_dmi, + m_is_x86): + """Return True on OpenStack compute and nova instances.""" + m_is_x86.return_value = True + openstack_product_names = ['OpenStack Nova', 'OpenStack Compute'] + + for product_name in openstack_product_names: + m_dmi.return_value = product_name + self.assertTrue( + ds.detect_openstack(), 'Failed to detect_openstack') + + @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data') + def test_detect_openstack_opentelekomcloud_chassis_asset_tag(self, m_dmi, + m_is_x86): + """Return True on OpenStack reporting OpenTelekomCloud asset-tag.""" + m_is_x86.return_value = True + + def fake_dmi_read(dmi_key): + if dmi_key == 'system-product-name': + return 'HVM domU' # Nothing 'openstackish' on OpenTelekomCloud + if dmi_key == 'chassis-asset-tag': + return 'OpenTelekomCloud' + assert False, 'Unexpected dmi read of %s' % dmi_key + + m_dmi.side_effect = fake_dmi_read + self.assertTrue( + ds.detect_openstack(), + 'Expected detect_openstack == True on OpenTelekomCloud') + + @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env') + @test_helpers.mock.patch(MOCK_PATH + 'util.read_dmi_data') + def test_detect_openstack_by_proc_1_environ(self, m_dmi, m_proc_env, + m_is_x86): + """Return True when nova product_name specified in /proc/1/environ.""" + m_is_x86.return_value = True + # Nova product_name in proc/1/environ + m_proc_env.return_value = { + 'HOME': '/', 'product_name': 'OpenStack Nova'} + + def fake_dmi_read(dmi_key): + if dmi_key == 'system-product-name': + return 'HVM domU' # Nothing 'openstackish' + if dmi_key == 'chassis-asset-tag': + return '' # Nothin 'openstackish' + assert False, 'Unexpected dmi read of %s' % dmi_key + + m_dmi.side_effect = fake_dmi_read + self.assertTrue( + ds.detect_openstack(), + 'Expected detect_openstack == True on OpenTelekomCloud') + m_proc_env.assert_called_with(1) + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 20479f66..7a203ce2 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -468,6 +468,29 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase): self.assertIsNone(ret) +class TestIsX86(helpers.CiTestCase): + + def test_is_x86_matches_x86_types(self): + """is_x86 returns True if CPU architecture matches.""" + matched_arches = ['x86_64', 'i386', 'i586', 'i686'] + for arch in matched_arches: + self.assertTrue( + util.is_x86(arch), 'Expected is_x86 for arch "%s"' % arch) + + def test_is_x86_unmatched_types(self): + """is_x86 returns Fale on non-intel x86 architectures.""" + unmatched_arches = ['ia64', '9000/800', 'arm64v71'] + for arch in unmatched_arches: + self.assertFalse( + util.is_x86(arch), 'Expected not is_x86 for arch "%s"' % arch) + + @mock.patch('cloudinit.util.os.uname') + def test_is_x86_calls_uname_for_architecture(self, m_uname): + """is_x86 returns True if platform from uname matches.""" + m_uname.return_value = [0, 1, 2, 3, 'x86_64'] + self.assertTrue(util.is_x86()) + + class TestReadDMIData(helpers.FilesystemMockingTestCase): def setUp(self): -- cgit v1.2.3 From 4ce6720104ec92d8d7c5aa993bf7ec405a2f53db Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 15 Jun 2018 20:00:15 -0600 Subject: lxd: Delete default network and detach device if lxd-init created them. Newer versions (3.0.1+) of lxd create the 'lxdbr0' network when 'lxd init --auto' is invoked. When cloud-init is given a network configuration to pass on to lxc and that config had no name specified or 'lxdbr0', then cloud-init would fail to create the network as it already exists. Similarly, we need to remove the device from the default profile so that the attach code can work. Also, add a _lxc method and use it to make sure we're getting the --force-local flag everywhere. LP: #1776958 --- cloudinit/config/cc_lxd.py | 64 ++++++++++++++++--- tests/unittests/test_handler/test_handler_lxd.py | 80 +++++++++++++++++++----- 2 files changed, 120 insertions(+), 24 deletions(-) (limited to 'tests') diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py index 09374d2e..ac72ac4a 100644 --- a/cloudinit/config/cc_lxd.py +++ b/cloudinit/config/cc_lxd.py @@ -47,11 +47,16 @@ lxd-bridge will be configured accordingly. domain: """ +from cloudinit import log as logging from cloudinit import util import os distros = ['ubuntu'] +LOG = logging.getLogger(__name__) + +_DEFAULT_NETWORK_NAME = "lxdbr0" + def handle(name, cfg, cloud, log, args): # Get config @@ -109,6 +114,7 @@ def handle(name, cfg, cloud, log, args): # Set up lxd-bridge if bridge config is given dconf_comm = "debconf-communicate" if bridge_cfg: + net_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME) if os.path.exists("/etc/default/lxd-bridge") \ and util.which(dconf_comm): # Bridge configured through packaging @@ -135,15 +141,18 @@ def handle(name, cfg, cloud, log, args): else: # Built-in LXD bridge support cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg) + maybe_cleanup_default( + net_name=net_name, did_init=bool(init_cfg), + create=bool(cmd_create), attach=bool(cmd_attach)) if cmd_create: log.debug("Creating lxd bridge: %s" % " ".join(cmd_create)) - util.subp(cmd_create) + _lxc(cmd_create) if cmd_attach: log.debug("Setting up default lxd bridge: %s" % " ".join(cmd_create)) - util.subp(cmd_attach) + _lxc(cmd_attach) elif bridge_cfg: raise RuntimeError( @@ -204,10 +213,10 @@ def bridge_to_cmd(bridge_cfg): if bridge_cfg.get("mode") == "none": return None, None - bridge_name = bridge_cfg.get("name", "lxdbr0") + bridge_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME) cmd_create = [] - cmd_attach = ["lxc", "network", "attach-profile", bridge_name, - "default", "eth0", "--force-local"] + cmd_attach = ["network", "attach-profile", bridge_name, + "default", "eth0"] if bridge_cfg.get("mode") == "existing": return None, cmd_attach @@ -215,7 +224,7 @@ def bridge_to_cmd(bridge_cfg): if bridge_cfg.get("mode") != "new": raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode")) - cmd_create = ["lxc", "network", "create", bridge_name] + cmd_create = ["network", "create", bridge_name] if bridge_cfg.get("ipv4_address") and bridge_cfg.get("ipv4_netmask"): cmd_create.append("ipv4.address=%s/%s" % @@ -247,8 +256,47 @@ def bridge_to_cmd(bridge_cfg): if bridge_cfg.get("domain"): cmd_create.append("dns.domain=%s" % bridge_cfg.get("domain")) - cmd_create.append("--force-local") - return cmd_create, cmd_attach + +def _lxc(cmd): + env = {'LC_ALL': 'C'} + util.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env) + + +def maybe_cleanup_default(net_name, did_init, create, attach, + profile="default", nic_name="eth0"): + """Newer versions of lxc (3.0.1+) create a lxdbr0 network when + 'lxd init --auto' is run. Older versions did not. + + By removing ay that lxd-init created, we simply leave the add/attach + code in-tact. + + https://github.com/lxc/lxd/issues/4649""" + if net_name != _DEFAULT_NETWORK_NAME or not did_init: + return + + fail_assume_enoent = " failed. Assuming it did not exist." + succeeded = " succeeded." + if create: + msg = "Deletion of lxd network '%s'" % net_name + try: + _lxc(["network", "delete", net_name]) + LOG.debug(msg + succeeded) + except util.ProcessExecutionError as e: + if e.exit_code != 1: + raise e + LOG.debug(msg + fail_assume_enoent) + + if attach: + msg = "Removal of device '%s' from profile '%s'" % (nic_name, profile) + try: + _lxc(["profile", "device", "remove", profile, nic_name]) + LOG.debug(msg + succeeded) + except util.ProcessExecutionError as e: + if e.exit_code != 1: + raise e + LOG.debug(msg + fail_assume_enoent) + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py index a2054980..4dd7e09f 100644 --- a/tests/unittests/test_handler/test_handler_lxd.py +++ b/tests/unittests/test_handler/test_handler_lxd.py @@ -33,12 +33,16 @@ class TestLxd(t_help.CiTestCase): cc = cloud.Cloud(ds, paths, {}, d, None) return cc + @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") @mock.patch("cloudinit.config.cc_lxd.util") - def test_lxd_init(self, mock_util): + def test_lxd_init(self, mock_util, m_maybe_clean): cc = self._get_cloud('ubuntu') mock_util.which.return_value = True + m_maybe_clean.return_value = None cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, []) self.assertTrue(mock_util.which.called) + # no bridge config, so maybe_cleanup should not be called. + self.assertFalse(m_maybe_clean.called) init_call = mock_util.subp.call_args_list[0][0][0] self.assertEqual(init_call, ['lxd', 'init', '--auto', @@ -46,32 +50,39 @@ class TestLxd(t_help.CiTestCase): '--storage-backend=zfs', '--storage-pool=poolname']) + @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") @mock.patch("cloudinit.config.cc_lxd.util") - def test_lxd_install(self, mock_util): + def test_lxd_install(self, mock_util, m_maybe_clean): cc = self._get_cloud('ubuntu') cc.distro = mock.MagicMock() mock_util.which.return_value = None cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, []) self.assertNotIn('WARN', self.logs.getvalue()) self.assertTrue(cc.distro.install_packages.called) + cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, []) + self.assertFalse(m_maybe_clean.called) install_pkg = cc.distro.install_packages.call_args_list[0][0][0] self.assertEqual(sorted(install_pkg), ['lxd', 'zfs']) + @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") @mock.patch("cloudinit.config.cc_lxd.util") - def test_no_init_does_nothing(self, mock_util): + def test_no_init_does_nothing(self, mock_util, m_maybe_clean): cc = self._get_cloud('ubuntu') cc.distro = mock.MagicMock() cc_lxd.handle('cc_lxd', {'lxd': {}}, cc, self.logger, []) self.assertFalse(cc.distro.install_packages.called) self.assertFalse(mock_util.subp.called) + self.assertFalse(m_maybe_clean.called) + @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") @mock.patch("cloudinit.config.cc_lxd.util") - def test_no_lxd_does_nothing(self, mock_util): + def test_no_lxd_does_nothing(self, mock_util, m_maybe_clean): cc = self._get_cloud('ubuntu') cc.distro = mock.MagicMock() cc_lxd.handle('cc_lxd', {'package_update': True}, cc, self.logger, []) self.assertFalse(cc.distro.install_packages.called) self.assertFalse(mock_util.subp.called) + self.assertFalse(m_maybe_clean.called) def test_lxd_debconf_new_full(self): data = {"mode": "new", @@ -147,14 +158,13 @@ class TestLxd(t_help.CiTestCase): "domain": "lxd"} self.assertEqual( cc_lxd.bridge_to_cmd(data), - (["lxc", "network", "create", "testbr0", + (["network", "create", "testbr0", "ipv4.address=10.0.8.1/24", "ipv4.nat=true", "ipv4.dhcp.ranges=10.0.8.2-10.0.8.254", "ipv6.address=fd98:9e0:3744::1/64", - "ipv6.nat=true", "dns.domain=lxd", - "--force-local"], - ["lxc", "network", "attach-profile", - "testbr0", "default", "eth0", "--force-local"])) + "ipv6.nat=true", "dns.domain=lxd"], + ["network", "attach-profile", + "testbr0", "default", "eth0"])) def test_lxd_cmd_new_partial(self): data = {"mode": "new", @@ -163,19 +173,18 @@ class TestLxd(t_help.CiTestCase): "ipv6_nat": "true"} self.assertEqual( cc_lxd.bridge_to_cmd(data), - (["lxc", "network", "create", "lxdbr0", "ipv4.address=none", - "ipv6.address=fd98:9e0:3744::1/64", "ipv6.nat=true", - "--force-local"], - ["lxc", "network", "attach-profile", - "lxdbr0", "default", "eth0", "--force-local"])) + (["network", "create", "lxdbr0", "ipv4.address=none", + "ipv6.address=fd98:9e0:3744::1/64", "ipv6.nat=true"], + ["network", "attach-profile", + "lxdbr0", "default", "eth0"])) def test_lxd_cmd_existing(self): data = {"mode": "existing", "name": "testbr0"} self.assertEqual( cc_lxd.bridge_to_cmd(data), - (None, ["lxc", "network", "attach-profile", - "testbr0", "default", "eth0", "--force-local"])) + (None, ["network", "attach-profile", + "testbr0", "default", "eth0"])) def test_lxd_cmd_none(self): data = {"mode": "none"} @@ -183,4 +192,43 @@ class TestLxd(t_help.CiTestCase): cc_lxd.bridge_to_cmd(data), (None, None)) + +class TestLxdMaybeCleanupDefault(t_help.CiTestCase): + """Test the implementation of maybe_cleanup_default.""" + + defnet = cc_lxd._DEFAULT_NETWORK_NAME + + @mock.patch("cloudinit.config.cc_lxd._lxc") + def test_network_other_than_default_not_deleted(self, m_lxc): + """deletion or removal should only occur if bridge is default.""" + cc_lxd.maybe_cleanup_default( + net_name="lxdbr1", did_init=True, create=True, attach=True) + m_lxc.assert_not_called() + + @mock.patch("cloudinit.config.cc_lxd._lxc") + def test_did_init_false_does_not_delete(self, m_lxc): + """deletion or removal should only occur if did_init is True.""" + cc_lxd.maybe_cleanup_default( + net_name=self.defnet, did_init=False, create=True, attach=True) + m_lxc.assert_not_called() + + @mock.patch("cloudinit.config.cc_lxd._lxc") + def test_network_deleted_if_create_true(self, m_lxc): + """deletion of network should occur if create is True.""" + cc_lxd.maybe_cleanup_default( + net_name=self.defnet, did_init=True, create=True, attach=False) + m_lxc.assert_called_once_with(["network", "delete", self.defnet]) + + @mock.patch("cloudinit.config.cc_lxd._lxc") + def test_device_removed_if_attach_true(self, m_lxc): + """deletion of network should occur if create is True.""" + nic_name = "my_nic" + profile = "my_profile" + cc_lxd.maybe_cleanup_default( + net_name=self.defnet, did_init=True, create=False, attach=True, + profile=profile, nic_name=nic_name) + m_lxc.assert_called_once_with( + ["profile", "device", "remove", profile, nic_name]) + + # vi: ts=4 expandtab -- cgit v1.2.3 From 4d69fb44a5607e16843537be26758893f2dd79be Mon Sep 17 00:00:00 2001 From: Jacob Bednarz Date: Tue, 19 Jun 2018 16:04:17 -0600 Subject: Explicitly prevent `sudo` access for user module To deny a user elevated access, you can omit the `sudo` key from the `users` dictionary. This works fine however it's implicitly defined based on defaults of `cloud-init`. If the project moves to have `sudo` access allowed for all by default (quite unlikely but still possible) this will catch a few people out. This introduces the ability to define an explicit `sudo: False` in the `users` dictionary and it will prevent `sudo` access. The behaviour is identical to omitting the key. LP: #1771468 --- cloudinit/distros/__init__.py | 2 +- cloudinit/distros/freebsd.py | 2 +- doc/examples/cloud-config-user-groups.txt | 27 +++++++++++++++++------ tests/unittests/test_distros/test_create_users.py | 8 +++++++ 4 files changed, 30 insertions(+), 9 deletions(-) (limited to 'tests') diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 6c22b07f..ab0b0776 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -531,7 +531,7 @@ class Distro(object): self.lock_passwd(name) # Configure sudo access - if 'sudo' in kwargs: + if 'sudo' in kwargs and kwargs['sudo'] is not False: self.write_sudo_rules(name, kwargs['sudo']) # Import SSH keys diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 5b1718a4..ff22d568 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -266,7 +266,7 @@ class Distro(distros.Distro): self.lock_passwd(name) # Configure sudo access - if 'sudo' in kwargs: + if 'sudo' in kwargs and kwargs['sudo'] is not False: self.write_sudo_rules(name, kwargs['sudo']) # Import SSH keys diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt index 7bca24a3..01ecad7b 100644 --- a/doc/examples/cloud-config-user-groups.txt +++ b/doc/examples/cloud-config-user-groups.txt @@ -30,6 +30,11 @@ users: gecos: Magic Cloud App Daemon User inactive: true system: true + - name: fizzbuzz + sudo: False + ssh_authorized_keys: + - + - - snapuser: joe@joeuser.io # Valid Values: @@ -71,13 +76,21 @@ users: # no_log_init: When set to true, do not initialize lastlog and faillog database. # ssh_import_id: Optional. Import SSH ids # ssh_authorized_keys: Optional. [list] Add keys to user's authorized keys file -# sudo: Defaults to none. Set to the sudo string you want to use, i.e. -# ALL=(ALL) NOPASSWD:ALL. To add multiple rules, use the following -# format. -# sudo: -# - ALL=(ALL) NOPASSWD:/bin/mysql -# - ALL=(ALL) ALL -# Note: Please double check your syntax and make sure it is valid. +# sudo: Defaults to none. Accepts a sudo rule string, a list of sudo rule +# strings or False to explicitly deny sudo usage. Examples: +# +# Allow a user unrestricted sudo access. +# sudo: ALL=(ALL) NOPASSWD:ALL +# +# Adding multiple sudo rule strings. +# sudo: +# - ALL=(ALL) NOPASSWD:/bin/mysql +# - ALL=(ALL) ALL +# +# Prevent sudo access for a user. +# sudo: False +# +# Note: Please double check your syntax and make sure it is valid. # cloud-init does not parse/check the syntax of the sudo # directive. # system: Create the user as a system user. This means no home directory. diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/test_distros/test_create_users.py index 5670904a..07176caa 100644 --- a/tests/unittests/test_distros/test_create_users.py +++ b/tests/unittests/test_distros/test_create_users.py @@ -145,4 +145,12 @@ class TestCreateUser(TestCase): mock.call(['passwd', '-l', user])] self.assertEqual(m_subp.call_args_list, expected) + def test_explicit_sudo_false(self, m_subp, m_is_snappy): + user = 'foouser' + self.dist.create_user(user, sudo=False) + self.assertEqual( + m_subp.call_args_list, + [self._useradd2call([user, '-m']), + mock.call(['passwd', '-l', user])]) + # vi: ts=4 expandtab -- cgit v1.2.3