diff options
author | Scott Moser <smoser@brickies.net> | 2016-08-12 17:05:04 -0400 |
---|---|---|
committer | Scott Moser <smoser@brickies.net> | 2016-08-12 17:05:04 -0400 |
commit | bf1728902bd3e81e00aa9786a5b1c67e4f30a659 (patch) | |
tree | 857c914003fda2ec5223425a31b646cbafd9907b | |
parent | e28ba310872846e0bc60595aed353c17b760fdcb (diff) | |
parent | bc2c3267549b9067c017a34e22bbee18890aec06 (diff) | |
download | vyos-cloud-init-bf1728902bd3e81e00aa9786a5b1c67e4f30a659.tar.gz vyos-cloud-init-bf1728902bd3e81e00aa9786a5b1c67e4f30a659.zip |
Merge branch 'master' into ubuntu/devel
38 files changed, 1710 insertions, 333 deletions
@@ -1,3 +1,6 @@ +0.7.8: + - SmartOS: more improvements for network configuration + - add ntp configuration module [Ryan Harper] 0.7.7: - open 0.7.7 - Digital Ocean: add datasource for Digital Ocean. [Neal Shrader] diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py index 70d4e7c3..0086840f 100644 --- a/cloudinit/config/cc_lxd.py +++ b/cloudinit/config/cc_lxd.py @@ -47,6 +47,8 @@ Example config: from cloudinit import util +distros = ['ubuntu'] + def handle(name, cfg, cloud, log, args): # Get config diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py index ada535f8..b3089f30 100644 --- a/cloudinit/config/cc_mcollective.py +++ b/cloudinit/config/cc_mcollective.py @@ -19,6 +19,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. +import errno + import six from six import BytesIO @@ -38,16 +40,18 @@ LOG = logging.getLogger(__name__) def configure(config, server_cfg=SERVER_CFG, pubcert_file=PUBCERT_FILE, pricert_file=PRICERT_FILE): - # Read server.cfg values from the - # original file in order to be able to mix the rest up + # Read server.cfg (if it exists) values from the + # original file in order to be able to mix the rest up. try: - mcollective_config = ConfigObj(server_cfg, file_error=True) - existed = True - except IOError: - LOG.debug("Did not find file %s", server_cfg) - mcollective_config = ConfigObj() - existed = False - + old_contents = util.load_file(server_cfg, quiet=False, decode=False) + mcollective_config = ConfigObj(BytesIO(old_contents)) + except IOError as e: + if e.errno != errno.ENOENT: + raise + else: + LOG.debug("Did not find file %s (starting with an empty" + " config)", server_cfg) + mcollective_config = ConfigObj() for (cfg_name, cfg) in config.items(): if cfg_name == 'public-cert': util.write_file(pubcert_file, cfg, mode=0o644) @@ -74,12 +78,18 @@ def configure(config, server_cfg=SERVER_CFG, # Otherwise just try to convert it to a string mcollective_config[cfg_name] = str(cfg) - if existed: - # We got all our config as wanted we'll rename - # the previous server.cfg and create our new one - util.rename(server_cfg, "%s.old" % (server_cfg)) + try: + # We got all our config as wanted we'll copy + # the previous server.cfg and overwrite the old with our new one + util.copy(server_cfg, "%s.old" % (server_cfg)) + except IOError as e: + if e.errno == errno.ENOENT: + # Doesn't exist to copy... + pass + else: + raise - # Now we got the whole file, write to disk... + # Now we got the whole (new) file, write to disk... contents = BytesIO() mcollective_config.write(contents) util.write_file(server_cfg, contents.getvalue(), mode=0o644) diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py new file mode 100644 index 00000000..ad69aa34 --- /dev/null +++ b/cloudinit/config/cc_ntp.py @@ -0,0 +1,106 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2016 Canonical Ltd. +# +# Author: Ryan Harper <ryan.harper@canonical.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +from cloudinit import log as logging +from cloudinit.settings import PER_INSTANCE +from cloudinit import templater +from cloudinit import type_utils +from cloudinit import util + +import os + +LOG = logging.getLogger(__name__) + +frequency = PER_INSTANCE +NTP_CONF = '/etc/ntp.conf' +NR_POOL_SERVERS = 4 +distros = ['centos', 'debian', 'fedora', 'opensuse', 'ubuntu'] + + +def handle(name, cfg, cloud, log, _args): + """ + Enable and configure ntp + + ntp: + pools: ['0.{{distro}}.pool.ntp.org', '1.{{distro}}.pool.ntp.org'] + servers: ['192.168.2.1'] + + """ + + ntp_cfg = cfg.get('ntp', {}) + + if not isinstance(ntp_cfg, (dict)): + raise RuntimeError(("'ntp' key existed in config," + " but not a dictionary type," + " is a %s %instead"), type_utils.obj_name(ntp_cfg)) + + if 'ntp' not in cfg: + LOG.debug("Skipping module named %s," + "not present or disabled by cfg", name) + return True + + install_ntp(cloud.distro.install_packages, packages=['ntp'], + check_exe="ntpd") + rename_ntp_conf() + write_ntp_config_template(ntp_cfg, cloud) + + +def install_ntp(install_func, packages=None, check_exe="ntpd"): + if util.which(check_exe): + return + if packages is None: + packages = ['ntp'] + + install_func(packages) + + +def rename_ntp_conf(config=NTP_CONF): + if os.path.exists(config): + util.rename(config, config + ".dist") + + +def generate_server_names(distro): + names = [] + for x in range(0, NR_POOL_SERVERS): + name = "%d.%s.pool.ntp.org" % (x, distro) + names.append(name) + return names + + +def write_ntp_config_template(cfg, cloud): + servers = cfg.get('servers', []) + pools = cfg.get('pools', []) + + if len(servers) == 0 and len(pools) == 0: + LOG.debug('Adding distro default ntp pool servers') + pools = generate_server_names(cloud.distro.name) + + params = { + 'servers': servers, + 'pools': pools, + } + + template_fn = cloud.get_template_filename('ntp.conf.%s' % + (cloud.distro.name)) + if not template_fn: + template_fn = cloud.get_template_filename('ntp.conf') + if not template_fn: + raise RuntimeError(("No template found, " + "not rendering %s"), NTP_CONF) + + templater.render_to_file(template_fn, NTP_CONF, params) diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py index 3a113aea..d4ad724a 100644 --- a/cloudinit/config/cc_rh_subscription.py +++ b/cloudinit/config/cc_rh_subscription.py @@ -18,6 +18,8 @@ from cloudinit import util +distros = ['fedora', 'rhel'] + def handle(name, cfg, _cloud, log, _args): sm = SubscriptionManager(cfg) diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py index 1a485ee6..6bcd8382 100644 --- a/cloudinit/config/cc_snappy.py +++ b/cloudinit/config/cc_snappy.py @@ -68,6 +68,8 @@ BUILTIN_CFG = { 'config': {}, } +distros = ['ubuntu'] + def parse_filename(fname): fname = os.path.basename(fname) diff --git a/cloudinit/config/cc_ubuntu_init_switch.py b/cloudinit/config/cc_ubuntu_init_switch.py index 884d79f1..bffb4380 100644 --- a/cloudinit/config/cc_ubuntu_init_switch.py +++ b/cloudinit/config/cc_ubuntu_init_switch.py @@ -86,6 +86,8 @@ else fi """ +distros = ['ubuntu'] + def handle(name, cfg, cloud, log, args): """Handler method activated by cloud-init.""" diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 64fba869..22549e62 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -23,6 +23,8 @@ import six from cloudinit import util +distros = ['fedora', 'rhel'] + def _canonicalize_id(repo_id): repo_id = repo_id.lower().replace("-", "_") diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index a3529609..48136f7c 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -110,12 +110,6 @@ class DataSourceAltCloud(sources.DataSource): ''' - uname_arch = os.uname()[4] - if uname_arch.startswith("arm") or uname_arch == "aarch64": - # Disabling because dmi data is not available on ARM processors - LOG.debug("Disabling AltCloud datasource on arm (LP: #1243287)") - return 'UNKNOWN' - system_name = util.read_dmi_data("system-product-name") if not system_name: return 'UNKNOWN' diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py index d1f806d6..be74503b 100644 --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -16,7 +16,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from base64 import b64decode -import os import re from cloudinit.cs_utils import Cepko @@ -45,11 +44,6 @@ class DataSourceCloudSigma(sources.DataSource): Uses dmi data to detect if this instance of cloud-init is running in the CloudSigma's infrastructure. """ - uname_arch = os.uname()[4] - if uname_arch.startswith("arm") or uname_arch == "aarch64": - # Disabling because dmi data on ARM processors - LOG.debug("Disabling CloudSigma datasource on arm (LP: #1243287)") - return False LOG.debug("determining hypervisor product name via dmi data") sys_product_name = util.read_dmi_data("system-product-name") diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 91d6ff13..5c9edabe 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -134,7 +134,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): vd = results.get('vendordata') self.vendordata_pure = vd try: - self.vendordata_raw = openstack.convert_vendordata_json(vd) + self.vendordata_raw = sources.convert_vendordata(vd) except ValueError as e: LOG.warn("Invalid content in vendor-data: %s", e) self.vendordata_raw = None diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py index 44a17a00..fc596e17 100644 --- a/cloudinit/sources/DataSourceDigitalOcean.py +++ b/cloudinit/sources/DataSourceDigitalOcean.py @@ -1,6 +1,7 @@ # vi: ts=4 expandtab # # Author: Neal Shrader <neal@digitalocean.com> +# Author: Ben Howard <bh@digitalocean.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as @@ -14,22 +15,27 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. -from cloudinit import ec2_utils +# DigitalOcean Droplet API: +# https://developers.digitalocean.com/documentation/metadata/ + +import json + from cloudinit import log as logging from cloudinit import sources +from cloudinit import url_helper from cloudinit import util -import functools - - LOG = logging.getLogger(__name__) BUILTIN_DS_CONFIG = { - 'metadata_url': 'http://169.254.169.254/metadata/v1/', - 'mirrors_url': 'http://mirrors.digitalocean.com/' + 'metadata_url': 'http://169.254.169.254/metadata/v1.json', } -MD_RETRIES = 0 -MD_TIMEOUT = 1 + +# Wait for a up to a minute, retrying the meta-data server +# every 2 seconds. +MD_RETRIES = 30 +MD_TIMEOUT = 2 +MD_WAIT_RETRY = 2 class DataSourceDigitalOcean(sources.DataSource): @@ -40,43 +46,61 @@ class DataSourceDigitalOcean(sources.DataSource): util.get_cfg_by_path(sys_cfg, ["datasource", "DigitalOcean"], {}), BUILTIN_DS_CONFIG]) self.metadata_address = self.ds_cfg['metadata_url'] + self.retries = self.ds_cfg.get('retries', MD_RETRIES) + self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT) + self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY) - if self.ds_cfg.get('retries'): - self.retries = self.ds_cfg['retries'] - else: - self.retries = MD_RETRIES + def _get_sysinfo(self): + # DigitalOcean embeds vendor ID and instance/droplet_id in the + # SMBIOS information - if self.ds_cfg.get('timeout'): - self.timeout = self.ds_cfg['timeout'] - else: - self.timeout = MD_TIMEOUT + LOG.debug("checking if instance is a DigitalOcean droplet") + + # Detect if we are on DigitalOcean and return the Droplet's ID + vendor_name = util.read_dmi_data("system-manufacturer") + if vendor_name != "DigitalOcean": + return (False, None) - def get_data(self): - caller = functools.partial(util.read_file_or_url, - timeout=self.timeout, retries=self.retries) + LOG.info("running on DigitalOcean") - def mcaller(url): - return caller(url).contents + droplet_id = util.read_dmi_data("system-serial-number") + if droplet_id: + LOG.debug(("system identified via SMBIOS as DigitalOcean Droplet" + "{}").format(droplet_id)) + else: + LOG.critical(("system identified via SMBIOS as a DigitalOcean " + "Droplet, but did not provide an ID. Please file a " + "support ticket at: " + "https://cloud.digitalocean.com/support/tickets/" + "new")) - md = ec2_utils.MetadataMaterializer(mcaller(self.metadata_address), - base_url=self.metadata_address, - caller=mcaller) + return (True, droplet_id) - self.metadata = md.materialize() + def get_data(self, apply_filter=False): + (is_do, droplet_id) = self._get_sysinfo() - if self.metadata.get('id'): - return True - else: + # only proceed if we know we are on DigitalOcean + if not is_do: return False - def get_userdata_raw(self): - return "\n".join(self.metadata['user-data']) + LOG.debug("reading metadata from {}".format(self.metadata_address)) + response = url_helper.readurl(self.metadata_address, + timeout=self.timeout, + sec_between=self.wait_retry, + retries=self.retries) - def get_vendordata_raw(self): - return "\n".join(self.metadata['vendor-data']) + contents = util.decode_binary(response.contents) + decoded = json.loads(contents) + + self.metadata = decoded + self.metadata['instance-id'] = decoded.get('droplet_id', droplet_id) + self.metadata['local-hostname'] = decoded.get('hostname', droplet_id) + self.vendordata_raw = decoded.get("vendor_data", None) + self.userdata_raw = decoded.get("user_data", None) + return True def get_public_ssh_keys(self): - public_keys = self.metadata['public-keys'] + public_keys = self.metadata.get('public_keys', []) if isinstance(public_keys, list): return public_keys else: @@ -84,21 +108,17 @@ class DataSourceDigitalOcean(sources.DataSource): @property def availability_zone(self): - return self.metadata['region'] - - def get_instance_id(self): - return self.metadata['id'] - - def get_hostname(self, fqdn=False, resolve_ip=False): - return self.metadata['hostname'] - - def get_package_mirror_info(self): - return self.ds_cfg['mirrors_url'] + return self.metadata.get('region', 'default') @property def launch_index(self): return None + def check_instance_id(self, sys_cfg): + return sources.instance_id_matches_system_uuid( + self.get_instance_id(), 'system-serial-number') + + # Used to match classes to dependencies datasources = [ (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index d828f078..ab93c0a2 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -20,7 +20,6 @@ from __future__ import print_function -import errno import os import time @@ -32,7 +31,14 @@ from cloudinit import util LOG = logging.getLogger(__name__) MD_VERSION = "2012-03-01" -BINARY_FIELDS = ('user-data',) +DS_FIELDS = [ + # remote path, location in dictionary, binary data?, optional? + ("meta-data/instance-id", 'meta-data/instance-id', False, False), + ("meta-data/local-hostname", 'meta-data/local-hostname', False, False), + ("meta-data/public-keys", 'meta-data/public-keys', False, True), + ('meta-data/vendor-data', 'vendor-data', True, True), + ('user-data', 'user-data', True, True), +] class DataSourceMAAS(sources.DataSource): @@ -43,6 +49,7 @@ class DataSourceMAAS(sources.DataSource): instance-id user-data hostname + vendor-data """ def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -71,10 +78,7 @@ class DataSourceMAAS(sources.DataSource): mcfg = self.ds_cfg try: - (userdata, metadata) = read_maas_seed_dir(self.seed_dir) - self.userdata_raw = userdata - self.metadata = metadata - self.base_url = self.seed_dir + self._set_data(self.seed_dir, read_maas_seed_dir(self.seed_dir)) return True except MAASSeedDirNone: pass @@ -95,18 +99,29 @@ class DataSourceMAAS(sources.DataSource): if not self.wait_for_metadata_service(url): return False - self.base_url = url - - (userdata, metadata) = read_maas_seed_url( - self.base_url, read_file_or_url=self.oauth_helper.readurl, - paths=self.paths, retries=1) - self.userdata_raw = userdata - self.metadata = metadata + self._set_data( + url, read_maas_seed_url( + url, read_file_or_url=self.oauth_helper.readurl, + paths=self.paths, retries=1)) return True except Exception: util.logexc(LOG, "Failed fetching metadata from url %s", url) return False + def _set_data(self, url, data): + # takes a url for base_url and a tuple of userdata, metadata, vd. + self.base_url = url + ud, md, vd = data + self.userdata_raw = ud + self.metadata = md + self.vendordata_pure = vd + if vd: + try: + self.vendordata_raw = sources.convert_vendordata(vd) + except ValueError as e: + LOG.warn("Invalid content in vendor-data: %s", e) + self.vendordata_raw = None + def wait_for_metadata_service(self, url): mcfg = self.ds_cfg max_wait = 120 @@ -126,6 +141,8 @@ class DataSourceMAAS(sources.DataSource): LOG.warn("Failed to get timeout, using %s" % timeout) starttime = time.time() + if url.endswith("/"): + url = url[:-1] check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION) urls = [check_url] url = self.oauth_helper.wait_for_url( @@ -141,27 +158,13 @@ class DataSourceMAAS(sources.DataSource): def read_maas_seed_dir(seed_d): - """ - Return user-data and metadata for a maas seed dir in seed_d. - Expected format of seed_d are the following files: - * instance-id - * local-hostname - * user-data - """ - if not os.path.isdir(seed_d): + if seed_d.startswith("file://"): + seed_d = seed_d[7:] + if not os.path.isdir(seed_d) or len(os.listdir(seed_d)) == 0: raise MAASSeedDirNone("%s: not a directory") - files = ('local-hostname', 'instance-id', 'user-data', 'public-keys') - md = {} - for fname in files: - try: - md[fname] = util.load_file(os.path.join(seed_d, fname), - decode=fname not in BINARY_FIELDS) - except IOError as e: - if e.errno != errno.ENOENT: - raise - - return check_seed_contents(md, seed_d) + # seed_dir looks in seed_dir, not seed_dir/VERSION + return read_maas_seed_url("file://%s" % seed_d, version=None) def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None, @@ -175,73 +178,78 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None, * <seed_url>/<version>/meta-data/instance-id * <seed_url>/<version>/meta-data/local-hostname * <seed_url>/<version>/user-data + If version is None, then <version>/ will not be used. """ - base_url = "%s/%s" % (seed_url, version) - file_order = [ - 'local-hostname', - 'instance-id', - 'public-keys', - 'user-data', - ] - files = { - 'local-hostname': "%s/%s" % (base_url, 'meta-data/local-hostname'), - 'instance-id': "%s/%s" % (base_url, 'meta-data/instance-id'), - 'public-keys': "%s/%s" % (base_url, 'meta-data/public-keys'), - 'user-data': "%s/%s" % (base_url, 'user-data'), - } - if read_file_or_url is None: read_file_or_url = util.read_file_or_url + if seed_url.endswith("/"): + seed_url = seed_url[:-1] + md = {} - for name in file_order: - url = files.get(name) - if name == 'user-data': - item_retries = 0 + for path, dictname, binary, optional in DS_FIELDS: + if version is None: + url = "%s/%s" % (seed_url, path) else: - item_retries = retries - + url = "%s/%s/%s" % (seed_url, version, path) try: ssl_details = util.fetch_ssl_details(paths) - resp = read_file_or_url(url, retries=item_retries, - timeout=timeout, ssl_details=ssl_details) + resp = read_file_or_url(url, retries=retries, timeout=timeout, + ssl_details=ssl_details) if resp.ok(): - if name in BINARY_FIELDS: - md[name] = resp.contents + if binary: + md[path] = resp.contents else: - md[name] = util.decode_binary(resp.contents) + md[path] = util.decode_binary(resp.contents) else: LOG.warn(("Fetching from %s resulted in" " an invalid http code %s"), url, resp.code) except url_helper.UrlError as e: - if e.code != 404: - raise + if e.code == 404 and not optional: + raise MAASSeedDirMalformed( + "Missing required %s: %s" % (path, e)) + elif e.code != 404: + raise e + return check_seed_contents(md, seed_url) def check_seed_contents(content, seed): - """Validate if content is Is the content a dict that is valid as a - return for a datasource. - Either return a (userdata, metadata) tuple or + """Validate if dictionary content valid as a return for a datasource. + Either return a (userdata, metadata, vendordata) tuple or Raise MAASSeedDirMalformed or MAASSeedDirNone """ - md_required = ('instance-id', 'local-hostname') - if len(content) == 0: + ret = {} + missing = [] + for spath, dpath, _binary, optional in DS_FIELDS: + if spath not in content: + if not optional: + missing.append(spath) + continue + + if "/" in dpath: + top, _, p = dpath.partition("/") + if top not in ret: + ret[top] = {} + ret[top][p] = content[spath] + else: + ret[dpath] = content[spath] + + if len(ret) == 0: raise MAASSeedDirNone("%s: no data files found" % seed) - found = list(content.keys()) - missing = [k for k in md_required if k not in found] - if len(missing): + if missing: raise MAASSeedDirMalformed("%s: missing files %s" % (seed, missing)) - userdata = content.get('user-data', b"") - md = {} - for (key, val) in content.items(): - if key == 'user-data': - continue - md[key] = val + vd_data = None + if ret.get('vendor-data'): + err = object() + vd_data = util.load_yaml(ret.get('vendor-data'), default=err, + allowed=(object)) + if vd_data is err: + raise MAASSeedDirMalformed("vendor-data was not loadable as yaml.") - return (userdata, md) + return ret.get('user-data'), ret.get('meta-data'), vd_data class MAASSeedDirNone(Exception): @@ -272,6 +280,7 @@ if __name__ == "__main__": """ import argparse import pprint + import sys parser = argparse.ArgumentParser(description='Interact with MAAS DS') parser.add_argument("--config", metavar="file", @@ -289,17 +298,25 @@ if __name__ == "__main__": default=MD_VERSION) subcmds = parser.add_subparsers(title="subcommands", dest="subcmd") - subcmds.add_parser('crawl', help="crawl the datasource") - subcmds.add_parser('get', help="do a single GET of provided url") - subcmds.add_parser('check-seed', help="read andn verify seed at url") - - parser.add_argument("url", help="the data source to query") + for (name, help) in (('crawl', 'crawl the datasource'), + ('get', 'do a single GET of provided url'), + ('check-seed', 'read and verify seed at url')): + p = subcmds.add_parser(name, help=help) + p.add_argument("url", help="the datasource url", nargs='?', + default=None) args = parser.parse_args() creds = {'consumer_key': args.ckey, 'token_key': args.tkey, 'token_secret': args.tsec, 'consumer_secret': args.csec} + maaspkg_cfg = "/etc/cloud/cloud.cfg.d/90_dpkg_maas.cfg" + if (args.config is None and args.url is None and + os.path.exists(maaspkg_cfg) and + os.access(maaspkg_cfg, os.R_OK)): + sys.stderr.write("Used config in %s.\n" % maaspkg_cfg) + args.config = maaspkg_cfg + if args.config: cfg = util.read_conf(args.config) if 'datasource' in cfg: @@ -307,6 +324,12 @@ if __name__ == "__main__": for key in creds.keys(): if key in cfg and creds[key] is None: creds[key] = cfg[key] + if args.url is None and 'metadata_url' in cfg: + args.url = cfg['metadata_url'] + + if args.url is None: + sys.stderr.write("Must provide a url or a config with url.\n") + sys.exit(1) oauth_helper = url_helper.OauthUrlHelper(**creds) @@ -331,16 +354,20 @@ if __name__ == "__main__": printurl(url) if args.subcmd == "check-seed": + sys.stderr.write("Checking seed at %s\n" % args.url) readurl = oauth_helper.readurl if args.url[0] == "/" or args.url.startswith("file://"): - readurl = None - (userdata, metadata) = read_maas_seed_url( - args.url, version=args.apiver, read_file_or_url=readurl, - retries=2) - print("=== userdata ===") - print(userdata.decode()) - print("=== metadata ===") + (userdata, metadata, vd) = read_maas_seed_dir(args.url) + else: + (userdata, metadata, vd) = read_maas_seed_url( + args.url, version=args.apiver, read_file_or_url=readurl, + retries=2) + print("=== user-data ===") + print("N/A" if userdata is None else userdata.decode()) + print("=== meta-data ===") pprint.pprint(metadata) + print("=== vendor-data ===") + pprint.pprint("N/A" if vd is None else vd) elif args.subcmd == "get": printurl(args.url) diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index cdc9eef5..e6a0b5fe 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -52,7 +52,7 @@ class DataSourceNoCloud(sources.DataSource): found = [] mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': "", - 'network-config': {}} + 'network-config': None} try: # Parse the kernel command line, getting data passed in diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index c06d17f3..82558214 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -138,7 +138,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): vd = results.get('vendordata') self.vendordata_pure = vd try: - self.vendordata_raw = openstack.convert_vendordata_json(vd) + self.vendordata_raw = sources.convert_vendordata(vd) except ValueError as e: LOG.warn("Invalid content in vendor-data: %s", e) self.vendordata_raw = None diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index ccc86883..143ab368 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -60,11 +60,15 @@ SMARTOS_ATTRIB_MAP = { 'availability_zone': ('sdc:datacenter_name', True), 'vendor-data': ('sdc:vendor-data', False), 'operator-script': ('sdc:operator-script', False), + 'hostname': ('sdc:hostname', True), + 'dns_domain': ('sdc:dns_domain', True), } SMARTOS_ATTRIB_JSON = { # Cloud-init Key : (SmartOS Key known JSON) 'network-data': 'sdc:nics', + 'dns_servers': 'sdc:resolvers', + 'routes': 'sdc:routes', } SMARTOS_ENV_LX_BRAND = "lx-brand" @@ -311,7 +315,10 @@ class DataSourceSmartOS(sources.DataSource): if self._network_config is None: if self.network_data is not None: self._network_config = ( - convert_smartos_network_data(self.network_data)) + convert_smartos_network_data( + network_data=self.network_data, + dns_servers=self.metadata['dns_servers'], + dns_domain=self.metadata['dns_domain'])) return self._network_config @@ -445,7 +452,8 @@ class JoyentMetadataClient(object): class JoyentMetadataSocketClient(JoyentMetadataClient): - def __init__(self, socketpath): + def __init__(self, socketpath, smartos_type=SMARTOS_ENV_LX_BRAND): + super(JoyentMetadataSocketClient, self).__init__(smartos_type) self.socketpath = socketpath def open_transport(self): @@ -461,7 +469,7 @@ class JoyentMetadataSocketClient(JoyentMetadataClient): class JoyentMetadataSerialClient(JoyentMetadataClient): - def __init__(self, device, timeout=10, smartos_type=None): + def __init__(self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM): super(JoyentMetadataSerialClient, self).__init__(smartos_type) self.device = device self.timeout = timeout @@ -583,7 +591,8 @@ def jmc_client_factory( device=serial_device, timeout=serial_timeout, smartos_type=smartos_type) elif smartos_type == SMARTOS_ENV_LX_BRAND: - return JoyentMetadataSocketClient(socketpath=metadata_sockfile) + return JoyentMetadataSocketClient(socketpath=metadata_sockfile, + smartos_type=smartos_type) raise ValueError("Unknown value for smartos_type: %s" % smartos_type) @@ -644,14 +653,8 @@ def write_boot_content(content, content_f, link=None, shebang=False, util.logexc(LOG, "failed establishing content link: %s", e) -def get_smartos_environ(uname_version=None, product_name=None, - uname_arch=None): +def get_smartos_environ(uname_version=None, product_name=None): uname = os.uname() - if uname_arch is None: - uname_arch = uname[4] - - if uname_arch.startswith("arm") or uname_arch == "aarch64": - return None # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but # report 'BrandZ virtual linux' as the kernel version @@ -671,8 +674,9 @@ def get_smartos_environ(uname_version=None, product_name=None, return None -# Covert SMARTOS 'sdc:nics' data to network_config yaml -def convert_smartos_network_data(network_data=None): +# Convert SMARTOS 'sdc:nics' data to network_config yaml +def convert_smartos_network_data(network_data=None, + dns_servers=None, dns_domain=None): """Return a dictionary of network_config by parsing provided SMARTOS sdc:nics configuration data @@ -706,9 +710,7 @@ def convert_smartos_network_data(network_data=None): 'broadcast', 'dns_nameservers', 'dns_search', - 'gateway', 'metric', - 'netmask', 'pointopoint', 'routes', 'scope', @@ -716,6 +718,29 @@ def convert_smartos_network_data(network_data=None): ], } + if dns_servers: + if not isinstance(dns_servers, (list, tuple)): + dns_servers = [dns_servers] + else: + dns_servers = [] + + if dns_domain: + if not isinstance(dns_domain, (list, tuple)): + dns_domain = [dns_domain] + else: + dns_domain = [] + + def is_valid_ipv4(addr): + return '.' in addr + + def is_valid_ipv6(addr): + return ':' in addr + + pgws = { + 'ipv4': {'match': is_valid_ipv4, 'gw': None}, + 'ipv6': {'match': is_valid_ipv6, 'gw': None}, + } + config = [] for nic in network_data: cfg = dict((k, v) for k, v in nic.items() @@ -727,18 +752,40 @@ def convert_smartos_network_data(network_data=None): cfg.update({'mac_address': nic['mac']}) subnets = [] - for ip, gw in zip(nic['ips'], nic['gateways']): - subnet = dict((k, v) for k, v in nic.items() - if k in valid_keys['subnet']) - subnet.update({ - 'type': 'static', - 'address': ip, - 'gateway': gw, - }) + for ip in nic.get('ips', []): + if ip == "dhcp": + subnet = {'type': 'dhcp4'} + else: + subnet = dict((k, v) for k, v in nic.items() + if k in valid_keys['subnet']) + subnet.update({ + 'type': 'static', + 'address': ip, + }) + + proto = 'ipv4' if is_valid_ipv4(ip) else 'ipv6' + # Only use gateways for 'primary' nics + if 'primary' in nic and nic.get('primary', False): + # the ips and gateways list may be N to M, here + # we map the ip index into the gateways list, + # and handle the case that we could have more ips + # than gateways. we only consume the first gateway + if not pgws[proto]['gw']: + gateways = [gw for gw in nic.get('gateways', []) + if pgws[proto]['match'](gw)] + if len(gateways): + pgws[proto]['gw'] = gateways[0] + subnet.update({'gateway': pgws[proto]['gw']}) + subnets.append(subnet) cfg.update({'subnets': subnets}) config.append(cfg) + if dns_servers: + config.append( + {'type': 'nameserver', 'address': dns_servers, + 'search': dns_domain}) + return {'version': 1, 'config': config} @@ -761,21 +808,36 @@ if __name__ == "__main__": sys.exit(1) if len(sys.argv) == 1: keys = (list(SMARTOS_ATTRIB_JSON.keys()) + - list(SMARTOS_ATTRIB_MAP.keys())) + list(SMARTOS_ATTRIB_MAP.keys()) + ['network_config']) else: keys = sys.argv[1:] - data = {} - for key in keys: + def load_key(client, key, data): + if key in data: + return data[key] + if key in SMARTOS_ATTRIB_JSON: keyname = SMARTOS_ATTRIB_JSON[key] - data[key] = jmc.get_json(keyname) + data[key] = client.get_json(keyname) + elif key == "network_config": + for depkey in ('network-data', 'dns_servers', 'dns_domain'): + load_key(client, depkey, data) + data[key] = convert_smartos_network_data( + network_data=data['network-data'], + dns_servers=data['dns_servers'], + dns_domain=data['dns_domain']) else: if key in SMARTOS_ATTRIB_MAP: keyname, strip = SMARTOS_ATTRIB_MAP[key] else: keyname, strip = (key, False) - val = jmc.get(keyname, strip=strip) - data[key] = jmc.get(keyname, strip=strip) + data[key] = client.get(keyname, strip=strip) + + return data[key] + + data = {} + for key in keys: + load_key(client=jmc, key=key, data=data) - print(json.dumps(data, indent=1)) + print(json.dumps(data, indent=1, sort_keys=True, + separators=(',', ': '))) diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 87b8e524..d1395270 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -21,8 +21,8 @@ # along with this program. If not, see <http://www.gnu.org/licenses/>. import abc +import copy import os - import six from cloudinit import importer @@ -355,6 +355,31 @@ def instance_id_matches_system_uuid(instance_id, field='system-uuid'): return instance_id.lower() == dmi_value.lower() +def convert_vendordata(data, recurse=True): + """data: a loaded object (strings, arrays, dicts). + return something suitable for cloudinit vendordata_raw. + + if data is: + None: return None + string: return string + list: return data + the list is then processed in UserDataProcessor + dict: return convert_vendordata(data.get('cloud-init')) + """ + if not data: + return None + if isinstance(data, six.string_types): + return data + if isinstance(data, list): + return copy.deepcopy(data) + if isinstance(data, dict): + if recurse is True: + return convert_vendordata(data.get('cloud-init'), + recurse=False) + raise ValueError("vendordata['cloud-init'] cannot be dict") + raise ValueError("Unknown data type for vendordata: %s" % type(data)) + + # 'depends' is a list of dependencies (DEP_FILESYSTEM) # ds_list is a list of 2 item lists # ds_list = [ diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 2e7a1d47..84322e0e 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -571,7 +571,7 @@ def convert_net_json(network_json=None, known_macs=None): subnet['ipv6'] = True subnets.append(subnet) cfg.update({'subnets': subnets}) - if link['type'] in ['ethernet', 'vif', 'ovs', 'phy', 'bridge']: + if link['type'] in ['ethernet', 'vif', 'ovs', 'phy', 'bridge', 'tap']: cfg.update({ 'type': 'physical', 'mac_address': link['ethernet_mac_address']}) @@ -621,28 +621,3 @@ def convert_net_json(network_json=None, known_macs=None): config.append(cfg) return {'version': 1, 'config': config} - - -def convert_vendordata_json(data, recurse=True): - """data: a loaded json *object* (strings, arrays, dicts). - return something suitable for cloudinit vendordata_raw. - - if data is: - None: return None - string: return string - list: return data - the list is then processed in UserDataProcessor - dict: return convert_vendordata_json(data.get('cloud-init')) - """ - if not data: - return None - if isinstance(data, six.string_types): - return data - if isinstance(data, list): - return copy.deepcopy(data) - if isinstance(data, dict): - if recurse is True: - return convert_vendordata_json(data.get('cloud-init'), - recurse=False) - raise ValueError("vendordata['cloud-init'] cannot be dict") - raise ValueError("Unknown data type for vendordata: %s" % type(data)) diff --git a/cloudinit/util.py b/cloudinit/util.py index e5dd61a0..226628cc 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2227,10 +2227,17 @@ def read_dmi_data(key): If all of the above fail to find a value, None will be returned. """ + syspath_value = _read_dmi_syspath(key) if syspath_value is not None: return syspath_value + # running dmidecode can be problematic on some arches (LP: #1243287) + uname_arch = os.uname()[4] + if uname_arch.startswith("arm") or uname_arch == "aarch64": + LOG.debug("dmidata is not supported on %s", uname_arch) + return None + dmidecode_path = which('dmidecode') if dmidecode_path: return _call_dmidecode(key, dmidecode_path) diff --git a/config/cloud.cfg b/config/cloud.cfg index a6afcc83..2d7fb473 100644 --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -45,6 +45,7 @@ cloud_config_modules: - emit_upstart - disk_setup - mounts + - ntp - ssh-import-id - locale - set-passwords diff --git a/doc/examples/cloud-config-ntp.txt b/doc/examples/cloud-config-ntp.txt new file mode 100644 index 00000000..2fc656e4 --- /dev/null +++ b/doc/examples/cloud-config-ntp.txt @@ -0,0 +1,27 @@ +#cloud-config + +# ntp: configure ntp services +# servers: List of NTP servers with which to sync +# pools: List of NTP pool servers with which to sync (pools are typically +# DNS hostnames which resolve to different specific servers to load +# balance a set of services) +# +# Each server in the list will be added in list-order in the following format: +# +# [pool|server] <server entry> iburst +# +# +# If no servers or pools are defined but ntp is enabled, then cloud-init will +# render the distro default list of pools +# +# pools = [ +# '0.{distro}.pool.ntp.org', +# '1.{distro}.pool.ntp.org', +# '2.{distro}.pool.ntp.org', +# '3.{distro}.pool.ntp.org', +# ] +# + +ntp: + pools: ['0.company.pool.ntp.org', '1.company.pool.ntp.org', 'ntp.myorg.org'] + servers: ['my.ntp.server.local', 'ntp.ubuntu.com', '192.168.23.2'] diff --git a/requirements.txt b/requirements.txt index cc1dc05f..0c4951f5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -22,7 +22,7 @@ oauthlib # that the built-in config parser is not sufficent (ie # when we need to preserve comments, or do not have a top-level # section)... -configobj +configobj>=5.0.2 # All new style configurations are in the yaml format pyyaml diff --git a/templates/ntp.conf.debian.tmpl b/templates/ntp.conf.debian.tmpl new file mode 100644 index 00000000..3f07eeaa --- /dev/null +++ b/templates/ntp.conf.debian.tmpl @@ -0,0 +1,63 @@ +## template:jinja + +# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help + +driftfile /var/lib/ntp/ntp.drift + +# Enable this if you want statistics to be logged. +#statsdir /var/log/ntpstats/ + +statistics loopstats peerstats clockstats +filegen loopstats file loopstats type day enable +filegen peerstats file peerstats type day enable +filegen clockstats file clockstats type day enable + + +# You do need to talk to an NTP server or two (or three). +#server ntp.your-provider.example + +# pool.ntp.org maps to about 1000 low-stratum NTP servers. Your server will +# pick a different set every time it starts up. Please consider joining the +# pool: <http://www.pool.ntp.org/join.html> +{% if pools -%}# pools{% endif %} +{% for pool in pools -%} +pool {{pool}} iburst +{% endfor %} +{%- if servers %}# servers +{% endif %} +{% for server in servers -%} +server {{server}} iburst +{% endfor %} + +# Access control configuration; see /usr/share/doc/ntp-doc/html/accopt.html for +# details. The web page <http://support.ntp.org/bin/view/Support/AccessRestrictions> +# might also be helpful. +# +# Note that "restrict" applies to both servers and clients, so a configuration +# that might be intended to block requests from certain clients could also end +# up blocking replies from your own upstream servers. + +# By default, exchange time with everybody, but don't allow configuration. +restrict -4 default kod notrap nomodify nopeer noquery limited +restrict -6 default kod notrap nomodify nopeer noquery limited + +# Local users may interrogate the ntp server more closely. +restrict 127.0.0.1 +restrict ::1 + +# Needed for adding pool entries +restrict source notrap nomodify noquery + +# Clients from this (example!) subnet have unlimited access, but only if +# cryptographically authenticated. +#restrict 192.168.123.0 mask 255.255.255.0 notrust + + +# If you want to provide time to your local subnet, change the next line. +# (Again, the address is an example only.) +#broadcast 192.168.123.255 + +# If you want to listen to time broadcasts on your local subnet, de-comment the +# next lines. Please do this only if you trust everybody on the network! +#disable auth +#broadcastclient diff --git a/templates/ntp.conf.fedora.tmpl b/templates/ntp.conf.fedora.tmpl new file mode 100644 index 00000000..af7b1b09 --- /dev/null +++ b/templates/ntp.conf.fedora.tmpl @@ -0,0 +1,66 @@ +## template:jinja + +# For more information about this file, see the man pages +# ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5). + +driftfile /var/lib/ntp/drift + +# Permit time synchronization with our time source, but do not +# permit the source to query or modify the service on this system. +restrict default nomodify notrap nopeer noquery + +# Permit all access over the loopback interface. This could +# be tightened as well, but to do so would effect some of +# the administrative functions. +restrict 127.0.0.1 +restrict ::1 + +# Hosts on local network are less restricted. +#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap + +# Use public servers from the pool.ntp.org project. +# Please consider joining the pool (http://www.pool.ntp.org/join.html). +{% if pools %}# pools +{% endif %} +{% for pool in pools -%} +pool {{pool}} iburst +{% endfor %} +{%- if servers %}# servers +{% endif %} +{% for server in servers -%} +server {{server}} iburst +{% endfor %} + +#broadcast 192.168.1.255 autokey # broadcast server +#broadcastclient # broadcast client +#broadcast 224.0.1.1 autokey # multicast server +#multicastclient 224.0.1.1 # multicast client +#manycastserver 239.255.254.254 # manycast server +#manycastclient 239.255.254.254 autokey # manycast client + +# Enable public key cryptography. +#crypto + +includefile /etc/ntp/crypto/pw + +# Key file containing the keys and key identifiers used when operating +# with symmetric key cryptography. +keys /etc/ntp/keys + +# Specify the key identifiers which are trusted. +#trustedkey 4 8 42 + +# Specify the key identifier to use with the ntpdc utility. +#requestkey 8 + +# Specify the key identifier to use with the ntpq utility. +#controlkey 8 + +# Enable writing of statistics records. +#statistics clockstats cryptostats loopstats peerstats + +# Disable the monitoring facility to prevent amplification attacks using ntpdc +# monlist command when default restrict does not include the noquery flag. See +# CVE-2013-5211 for more details. +# Note: Monitoring will not be disabled with the limited restriction flag. +disable monitor diff --git a/templates/ntp.conf.rhel.tmpl b/templates/ntp.conf.rhel.tmpl new file mode 100644 index 00000000..62b47764 --- /dev/null +++ b/templates/ntp.conf.rhel.tmpl @@ -0,0 +1,61 @@ +## template:jinja + +# For more information about this file, see the man pages +# ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5). + +driftfile /var/lib/ntp/drift + +# Permit time synchronization with our time source, but do not +# permit the source to query or modify the service on this system. +restrict default kod nomodify notrap nopeer noquery +restrict -6 default kod nomodify notrap nopeer noquery + +# Permit all access over the loopback interface. This could +# be tightened as well, but to do so would effect some of +# the administrative functions. +restrict 127.0.0.1 +restrict -6 ::1 + +# Hosts on local network are less restricted. +#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap + +# Use public servers from the pool.ntp.org project. +# Please consider joining the pool (http://www.pool.ntp.org/join.html). +{% if pools %}# pools +{% endif %} +{% for pool in pools -%} +pool {{pool}} iburst +{% endfor %} +{%- if servers %}# servers +{% endif %} +{% for server in servers -%} +server {{server}} iburst +{% endfor %} + +#broadcast 192.168.1.255 autokey # broadcast server +#broadcastclient # broadcast client +#broadcast 224.0.1.1 autokey # multicast server +#multicastclient 224.0.1.1 # multicast client +#manycastserver 239.255.254.254 # manycast server +#manycastclient 239.255.254.254 autokey # manycast client + +# Enable public key cryptography. +#crypto + +includefile /etc/ntp/crypto/pw + +# Key file containing the keys and key identifiers used when operating +# with symmetric key cryptography. +keys /etc/ntp/keys + +# Specify the key identifiers which are trusted. +#trustedkey 4 8 42 + +# Specify the key identifier to use with the ntpdc utility. +#requestkey 8 + +# Specify the key identifier to use with the ntpq utility. +#controlkey 8 + +# Enable writing of statistics records. +#statistics clockstats cryptostats loopstats peerstats diff --git a/templates/ntp.conf.sles.tmpl b/templates/ntp.conf.sles.tmpl new file mode 100644 index 00000000..5c5fc4db --- /dev/null +++ b/templates/ntp.conf.sles.tmpl @@ -0,0 +1,100 @@ +## template:jinja + +################################################################################ +## /etc/ntp.conf +## +## Sample NTP configuration file. +## See package 'ntp-doc' for documentation, Mini-HOWTO and FAQ. +## Copyright (c) 1998 S.u.S.E. GmbH Fuerth, Germany. +## +## Author: Michael Andres, <ma@suse.de> +## Michael Skibbe, <mskibbe@suse.de> +## +################################################################################ + +## +## Radio and modem clocks by convention have addresses in the +## form 127.127.t.u, where t is the clock type and u is a unit +## number in the range 0-3. +## +## Most of these clocks require support in the form of a +## serial port or special bus peripheral. The particular +## device is normally specified by adding a soft link +## /dev/device-u to the particular hardware device involved, +## where u correspond to the unit number above. +## +## Generic DCF77 clock on serial port (Conrad DCF77) +## Address: 127.127.8.u +## Serial Port: /dev/refclock-u +## +## (create soft link /dev/refclock-0 to the particular ttyS?) +## +# server 127.127.8.0 mode 5 prefer + +## +## Undisciplined Local Clock. This is a fake driver intended for backup +## and when no outside source of synchronized time is available. +## +# server 127.127.1.0 # local clock (LCL) +# fudge 127.127.1.0 stratum 10 # LCL is unsynchronized + +## +## Add external Servers using +## # rcntpd addserver <yourserver> +## The servers will only be added to the currently running instance, not +## to /etc/ntp.conf. +## +{% if pools %}# pools +{% endif %} +{% for pool in pools -%} +pool {{pool}} iburst +{% endfor %} +{%- if servers %}# servers +{% endif %} +{% for server in servers -%} +server {{server}} iburst +{% endfor %} + +# Access control configuration; see /usr/share/doc/packages/ntp/html/accopt.html for +# details. The web page <http://support.ntp.org/bin/view/Support/AccessRestrictions> +# might also be helpful. +# +# Note that "restrict" applies to both servers and clients, so a configuration +# that might be intended to block requests from certain clients could also end +# up blocking replies from your own upstream servers. + +# By default, exchange time with everybody, but don't allow configuration. +restrict -4 default notrap nomodify nopeer noquery +restrict -6 default notrap nomodify nopeer noquery + +# Local users may interrogate the ntp server more closely. +restrict 127.0.0.1 +restrict ::1 + +# Clients from this (example!) subnet have unlimited access, but only if +# cryptographically authenticated. +#restrict 192.168.123.0 mask 255.255.255.0 notrust + +## +## Miscellaneous stuff +## + +driftfile /var/lib/ntp/drift/ntp.drift # path for drift file + +logfile /var/log/ntp # alternate log file +# logconfig =syncstatus + sysevents +# logconfig =all + +# statsdir /tmp/ # directory for statistics files +# filegen peerstats file peerstats type day enable +# filegen loopstats file loopstats type day enable +# filegen clockstats file clockstats type day enable + +# +# Authentication stuff +# +keys /etc/ntp.keys # path for keys file +trustedkey 1 # define trusted keys +requestkey 1 # key (7) for accessing server variables +controlkey 1 # key (6) for accessing server variables + diff --git a/templates/ntp.conf.ubuntu.tmpl b/templates/ntp.conf.ubuntu.tmpl new file mode 100644 index 00000000..862a4fbd --- /dev/null +++ b/templates/ntp.conf.ubuntu.tmpl @@ -0,0 +1,75 @@ +## template:jinja + +# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help + +driftfile /var/lib/ntp/ntp.drift + +# Enable this if you want statistics to be logged. +#statsdir /var/log/ntpstats/ + +statistics loopstats peerstats clockstats +filegen loopstats file loopstats type day enable +filegen peerstats file peerstats type day enable +filegen clockstats file clockstats type day enable + +# Specify one or more NTP servers. + +# Use servers from the NTP Pool Project. Approved by Ubuntu Technical Board +# on 2011-02-08 (LP: #104525). See http://www.pool.ntp.org/join.html for +# more information. +{% if pools %}# pools +{% endif %} +{% for pool in pools -%} +pool {{pool}} iburst +{% endfor %} +{%- if servers %}# servers +{% endif %} +{% for server in servers -%} +server {{server}} iburst +{% endfor %} + +# Use Ubuntu's ntp server as a fallback. +# pool ntp.ubuntu.com + +# Access control configuration; see /usr/share/doc/ntp-doc/html/accopt.html for +# details. The web page <http://support.ntp.org/bin/view/Support/AccessRestrictions> +# might also be helpful. +# +# Note that "restrict" applies to both servers and clients, so a configuration +# that might be intended to block requests from certain clients could also end +# up blocking replies from your own upstream servers. + +# By default, exchange time with everybody, but don't allow configuration. +restrict -4 default kod notrap nomodify nopeer noquery limited +restrict -6 default kod notrap nomodify nopeer noquery limited + +# Local users may interrogate the ntp server more closely. +restrict 127.0.0.1 +restrict ::1 + +# Needed for adding pool entries +restrict source notrap nomodify noquery + +# Clients from this (example!) subnet have unlimited access, but only if +# cryptographically authenticated. +#restrict 192.168.123.0 mask 255.255.255.0 notrust + + +# If you want to provide time to your local subnet, change the next line. +# (Again, the address is an example only.) +#broadcast 192.168.123.255 + +# If you want to listen to time broadcasts on your local subnet, de-comment the +# next lines. Please do this only if you trust everybody on the network! +#disable auth +#broadcastclient + +#Changes recquired to use pps synchonisation as explained in documentation: +#http://www.ntp.org/ntpfaq/NTP-s-config-adv.htm#AEN3918 + +#server 127.127.8.1 mode 135 prefer # Meinberg GPS167 with PPS +#fudge 127.127.8.1 time1 0.0042 # relative to PPS for my hardware + +#server 127.127.22.1 # ATOM(PPS) +#fudge 127.127.22.1 flag3 1 # enable PPS API + diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index 972245df..de2cf638 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -256,7 +256,9 @@ def populate_dir(path, files): if not os.path.exists(path): os.makedirs(path) for (name, content) in files.items(): - with open(os.path.join(path, name), "wb") as fp: + p = os.path.join(path, name) + util.ensure_dir(os.path.dirname(p)) + with open(p, "wb") as fp: if isinstance(content, six.binary_type): fp.write(content) else: diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 18551b92..d0269943 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -101,6 +101,41 @@ NETWORK_DATA_2 = { "type": "vif", "id": "eth1", "vif_id": "vif-foo2"}] } +# This network data ha 'tap' type for a link. +NETWORK_DATA_3 = { + "services": [{"type": "dns", "address": "172.16.36.11"}, + {"type": "dns", "address": "172.16.36.12"}], + "networks": [ + {"network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e", + "type": "ipv4", "netmask": "255.255.255.128", + "link": "tap77a0dc5b-72", "ip_address": "172.17.48.18", + "id": "network0", + "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0", + "gateway": "172.17.48.1"}]}, + {"network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e", + "type": "ipv6", "netmask": "ffff:ffff:ffff:ffff::", + "link": "tap77a0dc5b-72", + "ip_address": "fdb8:52d0:9d14:0:f816:3eff:fe9f:70d", + "id": "network1", + "routes": [{"netmask": "::", "network": "::", + "gateway": "fdb8:52d0:9d14::1"}]}, + {"network_id": "1f53cb0e-72d3-47c7-94b9-ff4397c5fe54", + "type": "ipv4", "netmask": "255.255.255.128", + "link": "tap7d6b7bec-93", "ip_address": "172.16.48.13", + "id": "network2", + "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0", + "gateway": "172.16.48.1"}, + {"netmask": "255.255.0.0", "network": "172.16.0.0", + "gateway": "172.16.48.1"}]}], + "links": [ + {"ethernet_mac_address": "fa:16:3e:dd:50:9a", "mtu": None, + "type": "tap", "id": "tap77a0dc5b-72", + "vif_id": "77a0dc5b-720e-41b7-bfa7-1b2ff62e0d48"}, + {"ethernet_mac_address": "fa:16:3e:a8:14:69", "mtu": None, + "type": "tap", "id": "tap7d6b7bec-93", + "vif_id": "7d6b7bec-93e6-4c03-869a-ddc5014892d5"} + ] +} KNOWN_MACS = { 'fa:16:3e:69:b0:58': 'enp0s1', @@ -555,6 +590,15 @@ class TestConvertNetworkData(TestCase): eni_rendering = f.read() self.assertIn("route add default gw 2.2.2.9", eni_rendering) + def test_conversion_with_tap(self): + ncfg = openstack.convert_net_json(NETWORK_DATA_3, + known_macs=KNOWN_MACS) + physicals = set() + for i in ncfg['config']: + if i.get('type') == "physical": + physicals.add(i['name']) + self.assertEqual(physicals, set(('foo1', 'foo2'))) + def cfg_ds_from_dir(seed_d): cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, None, diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/test_datasource/test_digitalocean.py index 8936a1e3..f5d2ef35 100644 --- a/tests/unittests/test_datasource/test_digitalocean.py +++ b/tests/unittests/test_datasource/test_digitalocean.py @@ -15,68 +15,58 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. -import re - -from six.moves.urllib_parse import urlparse +import json from cloudinit import helpers from cloudinit import settings from cloudinit.sources import DataSourceDigitalOcean from .. import helpers as test_helpers +from ..helpers import HttprettyTestCase httpretty = test_helpers.import_httpretty() -# Abbreviated for the test -DO_INDEX = """id - hostname - user-data - vendor-data - public-keys - region""" - -DO_MULTIPLE_KEYS = """ssh-rsa AAAAB3NzaC1yc2EAAAA... neal@digitalocean.com - ssh-rsa AAAAB3NzaC1yc2EAAAA... neal2@digitalocean.com""" -DO_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... neal@digitalocean.com" +DO_MULTIPLE_KEYS = ["ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@do.co", + "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@do.co"] +DO_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@do.co" DO_META = { - '': DO_INDEX, - 'user-data': '#!/bin/bash\necho "user-data"', - 'vendor-data': '#!/bin/bash\necho "vendor-data"', - 'public-keys': DO_SINGLE_KEY, + 'user_data': 'user_data_here', + 'vendor_data': 'vendor_data_here', + 'public_keys': DO_SINGLE_KEY, 'region': 'nyc3', 'id': '2000000', 'hostname': 'cloudinit-test', } -MD_URL_RE = re.compile(r'http://169.254.169.254/metadata/v1/.*') +MD_URL = 'http://169.254.169.254/metadata/v1.json' + + +def _mock_dmi(): + return (True, DO_META.get('id')) def _request_callback(method, uri, headers): - url_path = urlparse(uri).path - if url_path.startswith('/metadata/v1/'): - path = url_path.split('/metadata/v1/')[1:][0] - else: - path = None - if path in DO_META: - return (200, headers, DO_META.get(path)) - else: - return (404, headers, '') + return (200, headers, json.dumps(DO_META)) -class TestDataSourceDigitalOcean(test_helpers.HttprettyTestCase): +class TestDataSourceDigitalOcean(HttprettyTestCase): + """ + Test reading the meta-data + """ def setUp(self): self.ds = DataSourceDigitalOcean.DataSourceDigitalOcean( settings.CFG_BUILTIN, None, helpers.Paths({})) + self.ds._get_sysinfo = _mock_dmi super(TestDataSourceDigitalOcean, self).setUp() @httpretty.activate def test_connection(self): httpretty.register_uri( - httpretty.GET, MD_URL_RE, - body=_request_callback) + httpretty.GET, MD_URL, + body=json.dumps(DO_META)) success = self.ds.get_data() self.assertTrue(success) @@ -84,14 +74,14 @@ class TestDataSourceDigitalOcean(test_helpers.HttprettyTestCase): @httpretty.activate def test_metadata(self): httpretty.register_uri( - httpretty.GET, MD_URL_RE, + httpretty.GET, MD_URL, body=_request_callback) self.ds.get_data() - self.assertEqual(DO_META.get('user-data'), + self.assertEqual(DO_META.get('user_data'), self.ds.get_userdata_raw()) - self.assertEqual(DO_META.get('vendor-data'), + self.assertEqual(DO_META.get('vendor_data'), self.ds.get_vendordata_raw()) self.assertEqual(DO_META.get('region'), @@ -103,11 +93,8 @@ class TestDataSourceDigitalOcean(test_helpers.HttprettyTestCase): self.assertEqual(DO_META.get('hostname'), self.ds.get_hostname()) - self.assertEqual('http://mirrors.digitalocean.com/', - self.ds.get_package_mirror_info()) - # Single key - self.assertEqual([DO_META.get('public-keys')], + self.assertEqual([DO_META.get('public_keys')], self.ds.get_public_ssh_keys()) self.assertIsInstance(self.ds.get_public_ssh_keys(), list) @@ -116,12 +103,12 @@ class TestDataSourceDigitalOcean(test_helpers.HttprettyTestCase): def test_multiple_ssh_keys(self): DO_META['public_keys'] = DO_MULTIPLE_KEYS httpretty.register_uri( - httpretty.GET, MD_URL_RE, + httpretty.GET, MD_URL, body=_request_callback) self.ds.get_data() # Multiple keys - self.assertEqual(DO_META.get('public-keys').splitlines(), + self.assertEqual(DO_META.get('public_keys'), self.ds.get_public_ssh_keys()) self.assertIsInstance(self.ds.get_public_ssh_keys(), list) diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index f66f1c6d..0126c883 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -2,6 +2,7 @@ from copy import copy import os import shutil import tempfile +import yaml from cloudinit.sources import DataSourceMAAS from cloudinit import url_helper @@ -24,41 +25,44 @@ class TestMAASDataSource(TestCase): def test_seed_dir_valid(self): """Verify a valid seeddir is read as such.""" - data = {'instance-id': 'i-valid01', - 'local-hostname': 'valid01-hostname', - 'user-data': b'valid01-userdata', + userdata = b'valid01-userdata' + data = {'meta-data/instance-id': 'i-valid01', + 'meta-data/local-hostname': 'valid01-hostname', + 'user-data': userdata, 'public-keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname'} my_d = os.path.join(self.tmp, "valid") populate_dir(my_d, data) - (userdata, metadata) = DataSourceMAAS.read_maas_seed_dir(my_d) + ud, md, vd = DataSourceMAAS.read_maas_seed_dir(my_d) - self.assertEqual(userdata, data['user-data']) + self.assertEqual(userdata, ud) for key in ('instance-id', 'local-hostname'): - self.assertEqual(data[key], metadata[key]) + self.assertEqual(data["meta-data/" + key], md[key]) # verify that 'userdata' is not returned as part of the metadata - self.assertFalse(('user-data' in metadata)) + self.assertFalse(('user-data' in md)) + self.assertEqual(vd, None) def test_seed_dir_valid_extra(self): """Verify extra files do not affect seed_dir validity.""" - data = {'instance-id': 'i-valid-extra', - 'local-hostname': 'valid-extra-hostname', - 'user-data': b'valid-extra-userdata', 'foo': 'bar'} + userdata = b'valid-extra-userdata' + data = {'meta-data/instance-id': 'i-valid-extra', + 'meta-data/local-hostname': 'valid-extra-hostname', + 'user-data': userdata, 'foo': 'bar'} my_d = os.path.join(self.tmp, "valid_extra") populate_dir(my_d, data) - (userdata, metadata) = DataSourceMAAS.read_maas_seed_dir(my_d) + ud, md, vd = DataSourceMAAS.read_maas_seed_dir(my_d) - self.assertEqual(userdata, data['user-data']) + self.assertEqual(userdata, ud) for key in ('instance-id', 'local-hostname'): - self.assertEqual(data[key], metadata[key]) + self.assertEqual(data['meta-data/' + key], md[key]) # additional files should not just appear as keys in metadata atm - self.assertFalse(('foo' in metadata)) + self.assertFalse(('foo' in md)) def test_seed_dir_invalid(self): """Verify that invalid seed_dir raises MAASSeedDirMalformed.""" @@ -97,67 +101,60 @@ class TestMAASDataSource(TestCase): DataSourceMAAS.read_maas_seed_dir, os.path.join(self.tmp, "nonexistantdirectory")) + def mock_read_maas_seed_url(self, data, seed, version="19991231"): + """mock up readurl to appear as a web server at seed has provided data. + return what read_maas_seed_url returns.""" + def my_readurl(*args, **kwargs): + if len(args): + url = args[0] + else: + url = kwargs['url'] + prefix = "%s/%s/" % (seed, version) + if not url.startswith(prefix): + raise ValueError("unexpected call %s" % url) + + short = url[len(prefix):] + if short not in data: + raise url_helper.UrlError("not found", code=404, url=url) + return url_helper.StringResponse(data[short]) + + # Now do the actual call of the code under test. + with mock.patch("cloudinit.url_helper.readurl") as mock_readurl: + mock_readurl.side_effect = my_readurl + return DataSourceMAAS.read_maas_seed_url(seed, version=version) + def test_seed_url_valid(self): """Verify that valid seed_url is read as such.""" valid = { 'meta-data/instance-id': 'i-instanceid', 'meta-data/local-hostname': 'test-hostname', 'meta-data/public-keys': 'test-hostname', + 'meta-data/vendor-data': b'my-vendordata', 'user-data': b'foodata', } - valid_order = [ - 'meta-data/local-hostname', - 'meta-data/instance-id', - 'meta-data/public-keys', - 'user-data', - ] my_seed = "http://example.com/xmeta" my_ver = "1999-99-99" - my_headers = {'header1': 'value1', 'header2': 'value2'} - - def my_headers_cb(url): - return my_headers - - # Each time url_helper.readurl() is called, something different is - # returned based on the canned data above. We need to build up a list - # of side effect return values, which the mock will return. At the - # same time, we'll build up a list of expected call arguments for - # asserting after the code under test is run. - calls = [] - - def side_effect(): - for key in valid_order: - resp = valid.get(key) - url = "%s/%s/%s" % (my_seed, my_ver, key) - calls.append( - mock.call(url, headers=None, timeout=mock.ANY, - data=mock.ANY, sec_between=mock.ANY, - ssl_details=mock.ANY, retries=mock.ANY, - headers_cb=my_headers_cb, - exception_cb=mock.ANY)) - yield url_helper.StringResponse(resp) - - # Now do the actual call of the code under test. - with mock.patch.object(url_helper, 'readurl', - side_effect=side_effect()) as mockobj: - userdata, metadata = DataSourceMAAS.read_maas_seed_url( - my_seed, version=my_ver) - - self.assertEqual(b"foodata", userdata) - self.assertEqual(metadata['instance-id'], - valid['meta-data/instance-id']) - self.assertEqual(metadata['local-hostname'], - valid['meta-data/local-hostname']) - - mockobj.has_calls(calls) - - def test_seed_url_invalid(self): - """Verify that invalid seed_url raises MAASSeedDirMalformed.""" - pass - - def test_seed_url_missing(self): - """Verify seed_url with no found entries raises MAASSeedDirNone.""" - pass + ud, md, vd = self.mock_read_maas_seed_url(valid, my_seed, my_ver) + + self.assertEqual(valid['meta-data/instance-id'], md['instance-id']) + self.assertEqual( + valid['meta-data/local-hostname'], md['local-hostname']) + self.assertEqual(valid['meta-data/public-keys'], md['public-keys']) + self.assertEqual(valid['user-data'], ud) + # vendor-data is yaml, which decodes a string + self.assertEqual(valid['meta-data/vendor-data'].decode(), vd) + + def test_seed_url_vendor_data_dict(self): + expected_vd = {'key1': 'value1'} + valid = { + 'meta-data/instance-id': 'i-instanceid', + 'meta-data/local-hostname': 'test-hostname', + 'meta-data/vendor-data': yaml.safe_dump(expected_vd).encode(), + } + ud, md, vd = self.mock_read_maas_seed_url( + valid, "http://example.com/foo") + self.assertEqual(valid['meta-data/instance-id'], md['instance-id']) + self.assertEqual(expected_vd, vd) # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index b0fa1130..f6a46ce9 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -6,7 +6,7 @@ from ..helpers import TestCase, populate_dir, mock, ExitStack import os import shutil import tempfile - +import textwrap import yaml @@ -129,6 +129,89 @@ class TestNoCloudDataSource(TestCase): self.assertFalse(dsrc.vendordata) self.assertTrue(ret) + def test_metadata_network_interfaces(self): + gateway = "103.225.10.1" + md = { + 'instance-id': 'i-abcd', + 'local-hostname': 'hostname1', + 'network-interfaces': textwrap.dedent("""\ + auto eth0 + iface eth0 inet static + hwaddr 00:16:3e:70:e1:04 + address 103.225.10.12 + netmask 255.255.255.0 + gateway """ + gateway + """ + dns-servers 8.8.8.8""")} + + populate_dir( + os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': b"ud", + 'meta-data': yaml.dump(md) + "\n"}) + + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + + ds = DataSourceNoCloud.DataSourceNoCloud + + dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertTrue(ret) + # very simple check just for the strings above + self.assertIn(gateway, str(dsrc.network_config)) + + def test_metadata_network_config(self): + # network-config needs to get into network_config + netconf = {'version': 1, + 'config': [{'type': 'physical', 'name': 'interface0', + 'subnets': [{'type': 'dhcp'}]}]} + populate_dir( + os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': b"ud", + 'meta-data': "instance-id: IID\n", + 'network-config': yaml.dump(netconf) + "\n"}) + + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + + ds = DataSourceNoCloud.DataSourceNoCloud + + dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(netconf, dsrc.network_config) + + def test_metadata_network_config_over_interfaces(self): + # network-config should override meta-data/network-interfaces + gateway = "103.225.10.1" + md = { + 'instance-id': 'i-abcd', + 'local-hostname': 'hostname1', + 'network-interfaces': textwrap.dedent("""\ + auto eth0 + iface eth0 inet static + hwaddr 00:16:3e:70:e1:04 + address 103.225.10.12 + netmask 255.255.255.0 + gateway """ + gateway + """ + dns-servers 8.8.8.8""")} + + netconf = {'version': 1, + 'config': [{'type': 'physical', 'name': 'interface0', + 'subnets': [{'type': 'dhcp'}]}]} + populate_dir( + os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': b"ud", + 'meta-data': yaml.dump(md) + "\n", + 'network-config': yaml.dump(netconf) + "\n"}) + + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + + ds = DataSourceNoCloud.DataSourceNoCloud + + dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(netconf, dsrc.network_config) + self.assertNotIn(gateway, str(dsrc.network_config)) + class TestParseCommandLineData(TestCase): diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index 5c8592c5..97b99a18 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -27,6 +27,7 @@ from six import StringIO from cloudinit import helpers from cloudinit import settings +from cloudinit.sources import convert_vendordata from cloudinit.sources import DataSourceOpenStack as ds from cloudinit.sources.helpers import openstack from cloudinit import util @@ -318,7 +319,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): class TestVendorDataLoading(test_helpers.TestCase): def cvj(self, data): - return openstack.convert_vendordata_json(data) + return convert_vendordata(data) def test_vd_load_none(self): # non-existant vendor-data should return none diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 9c6c8768..0532f986 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -36,6 +36,8 @@ import uuid from cloudinit import serial from cloudinit.sources import DataSourceSmartOS +from cloudinit.sources.DataSourceSmartOS import ( + convert_smartos_network_data as convert_net) import six @@ -86,6 +88,229 @@ SDC_NICS = json.loads(""" ] """) + +SDC_NICS_ALT = json.loads(""" +[ + { + "interface": "net0", + "mac": "90:b8:d0:ae:64:51", + "vlan_id": 324, + "nic_tag": "external", + "gateway": "8.12.42.1", + "gateways": [ + "8.12.42.1" + ], + "netmask": "255.255.255.0", + "ip": "8.12.42.51", + "ips": [ + "8.12.42.51/24" + ], + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model": "virtio", + "mtu": 1500, + "primary": true + }, + { + "interface": "net1", + "mac": "90:b8:d0:bd:4f:9c", + "vlan_id": 600, + "nic_tag": "internal", + "netmask": "255.255.255.0", + "ip": "10.210.1.217", + "ips": [ + "10.210.1.217/24" + ], + "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model": "virtio", + "mtu": 1500 + } +] +""") + +SDC_NICS_DHCP = json.loads(""" +[ + { + "interface": "net0", + "mac": "90:b8:d0:ae:64:51", + "vlan_id": 324, + "nic_tag": "external", + "gateway": "8.12.42.1", + "gateways": [ + "8.12.42.1" + ], + "netmask": "255.255.255.0", + "ip": "8.12.42.51", + "ips": [ + "8.12.42.51/24" + ], + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model": "virtio", + "mtu": 1500, + "primary": true + }, + { + "interface": "net1", + "mac": "90:b8:d0:bd:4f:9c", + "vlan_id": 600, + "nic_tag": "internal", + "netmask": "255.255.255.0", + "ip": "10.210.1.217", + "ips": [ + "dhcp" + ], + "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model": "virtio", + "mtu": 1500 + } +] +""") + +SDC_NICS_MIP = json.loads(""" +[ + { + "interface": "net0", + "mac": "90:b8:d0:ae:64:51", + "vlan_id": 324, + "nic_tag": "external", + "gateway": "8.12.42.1", + "gateways": [ + "8.12.42.1" + ], + "netmask": "255.255.255.0", + "ip": "8.12.42.51", + "ips": [ + "8.12.42.51/24", + "8.12.42.52/24" + ], + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model": "virtio", + "mtu": 1500, + "primary": true + }, + { + "interface": "net1", + "mac": "90:b8:d0:bd:4f:9c", + "vlan_id": 600, + "nic_tag": "internal", + "netmask": "255.255.255.0", + "ip": "10.210.1.217", + "ips": [ + "10.210.1.217/24", + "10.210.1.151/24" + ], + "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model": "virtio", + "mtu": 1500 + } +] +""") + +SDC_NICS_MIP_IPV6 = json.loads(""" +[ + { + "interface": "net0", + "mac": "90:b8:d0:ae:64:51", + "vlan_id": 324, + "nic_tag": "external", + "gateway": "8.12.42.1", + "gateways": [ + "8.12.42.1" + ], + "netmask": "255.255.255.0", + "ip": "8.12.42.51", + "ips": [ + "2001:4800:78ff:1b:be76:4eff:fe06:96b3/64", + "8.12.42.51/24" + ], + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model": "virtio", + "mtu": 1500, + "primary": true + }, + { + "interface": "net1", + "mac": "90:b8:d0:bd:4f:9c", + "vlan_id": 600, + "nic_tag": "internal", + "netmask": "255.255.255.0", + "ip": "10.210.1.217", + "ips": [ + "10.210.1.217/24" + ], + "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model": "virtio", + "mtu": 1500 + } +] +""") + +SDC_NICS_IPV4_IPV6 = json.loads(""" +[ + { + "interface": "net0", + "mac": "90:b8:d0:ae:64:51", + "vlan_id": 324, + "nic_tag": "external", + "gateway": "8.12.42.1", + "gateways": ["8.12.42.1", "2001::1", "2001::2"], + "netmask": "255.255.255.0", + "ip": "8.12.42.51", + "ips": ["2001::10/64", "8.12.42.51/24", "2001::11/64", + "8.12.42.52/32"], + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model": "virtio", + "mtu": 1500, + "primary": true + }, + { + "interface": "net1", + "mac": "90:b8:d0:bd:4f:9c", + "vlan_id": 600, + "nic_tag": "internal", + "netmask": "255.255.255.0", + "ip": "10.210.1.217", + "ips": ["10.210.1.217/24"], + "gateways": ["10.210.1.210"], + "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model": "virtio", + "mtu": 1500 + } +] +""") + +SDC_NICS_SINGLE_GATEWAY = json.loads(""" +[ + { + "interface":"net0", + "mac":"90:b8:d0:d8:82:b4", + "vlan_id":324, + "nic_tag":"external", + "gateway":"8.12.42.1", + "gateways":["8.12.42.1"], + "netmask":"255.255.255.0", + "ip":"8.12.42.26", + "ips":["8.12.42.26/24"], + "network_uuid":"992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model":"virtio", + "mtu":1500, + "primary":true + }, + { + "interface":"net1", + "mac":"90:b8:d0:0a:51:31", + "vlan_id":600, + "nic_tag":"internal", + "netmask":"255.255.255.0", + "ip":"10.210.1.27", + "ips":["10.210.1.27/24"], + "network_uuid":"98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model":"virtio", + "mtu":1500 + } +] +""") + + MOCK_RETURNS = { 'hostname': 'test-host', 'root_authorized_keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname', @@ -524,20 +749,135 @@ class TestJoyentMetadataClient(FilesystemMockingTestCase): class TestNetworkConversion(TestCase): - def test_convert_simple(self): expected = { 'version': 1, 'config': [ {'name': 'net0', 'type': 'physical', 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', - 'netmask': '255.255.255.0', 'address': '8.12.42.102/24'}], 'mtu': 1500, 'mac_address': '90:b8:d0:f5:e4:f5'}, {'name': 'net1', 'type': 'physical', - 'subnets': [{'type': 'static', 'gateway': '192.168.128.1', - 'netmask': '255.255.252.0', + 'subnets': [{'type': 'static', 'address': '192.168.128.93/22'}], 'mtu': 8500, 'mac_address': '90:b8:d0:a5:ff:cd'}]} - found = DataSourceSmartOS.convert_smartos_network_data(SDC_NICS) + found = convert_net(SDC_NICS) + self.assertEqual(expected, found) + + def test_convert_simple_alt(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.51/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'static', + 'address': '10.210.1.217/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} + found = convert_net(SDC_NICS_ALT) + self.assertEqual(expected, found) + + def test_convert_simple_dhcp(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.51/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'dhcp4'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} + found = convert_net(SDC_NICS_DHCP) + self.assertEqual(expected, found) + + def test_convert_simple_multi_ip(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.51/24'}, + {'type': 'static', + 'address': '8.12.42.52/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'static', + 'address': '10.210.1.217/24'}, + {'type': 'static', + 'address': '10.210.1.151/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} + found = convert_net(SDC_NICS_MIP) + self.assertEqual(expected, found) + + def test_convert_with_dns(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.51/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'dhcp4'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}, + {'type': 'nameserver', + 'address': ['8.8.8.8', '8.8.8.1'], 'search': ["local"]}]} + found = convert_net( + network_data=SDC_NICS_DHCP, dns_servers=['8.8.8.8', '8.8.8.1'], + dns_domain="local") + self.assertEqual(expected, found) + + def test_convert_simple_multi_ipv6(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'address': + '2001:4800:78ff:1b:be76:4eff:fe06:96b3/64'}, + {'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.51/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'static', + 'address': '10.210.1.217/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} + found = convert_net(SDC_NICS_MIP_IPV6) + self.assertEqual(expected, found) + + def test_convert_simple_both_ipv4_ipv6(self): + expected = { + 'version': 1, + 'config': [ + {'mac_address': '90:b8:d0:ae:64:51', 'mtu': 1500, + 'name': 'net0', 'type': 'physical', + 'subnets': [{'address': '2001::10/64', 'gateway': '2001::1', + 'type': 'static'}, + {'address': '8.12.42.51/24', + 'gateway': '8.12.42.1', + 'type': 'static'}, + {'address': '2001::11/64', 'type': 'static'}, + {'address': '8.12.42.52/32', 'type': 'static'}]}, + {'mac_address': '90:b8:d0:bd:4f:9c', 'mtu': 1500, + 'name': 'net1', 'type': 'physical', + 'subnets': [{'address': '10.210.1.217/24', + 'type': 'static'}]}]} + found = convert_net(SDC_NICS_IPV4_IPV6) + self.assertEqual(expected, found) + + def test_gateways_not_on_all_nics(self): + expected = { + 'version': 1, + 'config': [ + {'mac_address': '90:b8:d0:d8:82:b4', 'mtu': 1500, + 'name': 'net0', 'type': 'physical', + 'subnets': [{'address': '8.12.42.26/24', + 'gateway': '8.12.42.1', 'type': 'static'}]}, + {'mac_address': '90:b8:d0:0a:51:31', 'mtu': 1500, + 'name': 'net1', 'type': 'physical', + 'subnets': [{'address': '10.210.1.27/24', + 'type': 'static'}]}]} + found = convert_net(SDC_NICS_SINGLE_GATEWAY) self.assertEqual(expected, found) diff --git a/tests/unittests/test_handler/test_handler_mcollective.py b/tests/unittests/test_handler/test_handler_mcollective.py index 8fa0147a..c3a5a634 100644 --- a/tests/unittests/test_handler/test_handler_mcollective.py +++ b/tests/unittests/test_handler/test_handler_mcollective.py @@ -138,6 +138,7 @@ class TestHandler(t_help.TestCase): def test_mcollective_install(self, mock_util): cc = self._get_cloud('ubuntu') cc.distro = t_help.mock.MagicMock() + mock_util.load_file.return_value = b"" mycfg = {'mcollective': {'conf': {'loglevel': 'debug'}}} cc_mcollective.handle('cc_mcollective', mycfg, cc, LOG, []) self.assertTrue(cc.distro.install_packages.called) diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py new file mode 100644 index 00000000..1c7bb06a --- /dev/null +++ b/tests/unittests/test_handler/test_handler_ntp.py @@ -0,0 +1,274 @@ +from cloudinit.config import cc_ntp +from cloudinit.sources import DataSourceNone +from cloudinit import templater +from cloudinit import (distros, helpers, cloud, util) +from ..helpers import FilesystemMockingTestCase, mock + +import logging +import os +import shutil +import tempfile + +LOG = logging.getLogger(__name__) + +NTP_TEMPLATE = """ +## template: jinja + +{% if pools %}# pools +{% endif %} +{% for pool in pools -%} +pool {{pool}} iburst +{% endfor %} +{%- if servers %}# servers +{% endif %} +{% for server in servers -%} +server {{server}} iburst +{% endfor %} + +""" + + +NTP_EXPECTED_UBUNTU = """ +# pools +pool 0.mycompany.pool.ntp.org iburst +# servers +server 192.168.23.3 iburst + +""" + + +class TestNtp(FilesystemMockingTestCase): + + def setUp(self): + super(TestNtp, self).setUp() + self.subp = util.subp + self.new_root = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, self.new_root) + + def _get_cloud(self, distro, metadata=None): + self.patchUtils(self.new_root) + paths = helpers.Paths({}) + cls = distros.fetch(distro) + mydist = cls(distro, {}, paths) + myds = DataSourceNone.DataSourceNone({}, mydist, paths) + if metadata: + myds.metadata.update(metadata) + return cloud.Cloud(myds, paths, {}, mydist, None) + + @mock.patch("cloudinit.config.cc_ntp.util") + def test_ntp_install(self, mock_util): + cc = self._get_cloud('ubuntu') + cc.distro = mock.MagicMock() + cc.distro.name = 'ubuntu' + mock_util.which.return_value = None + install_func = mock.MagicMock() + + cc_ntp.install_ntp(install_func, packages=['ntpx'], check_exe='ntpdx') + + self.assertTrue(install_func.called) + mock_util.which.assert_called_with('ntpdx') + install_pkg = install_func.call_args_list[0][0][0] + self.assertEqual(sorted(install_pkg), ['ntpx']) + + @mock.patch("cloudinit.config.cc_ntp.util") + def test_ntp_install_not_needed(self, mock_util): + cc = self._get_cloud('ubuntu') + cc.distro = mock.MagicMock() + cc.distro.name = 'ubuntu' + mock_util.which.return_value = ["/usr/sbin/ntpd"] + cc_ntp.install_ntp(cc) + self.assertFalse(cc.distro.install_packages.called) + + def test_ntp_rename_ntp_conf(self): + with mock.patch.object(os.path, 'exists', + return_value=True) as mockpath: + with mock.patch.object(util, 'rename') as mockrename: + cc_ntp.rename_ntp_conf() + + mockpath.assert_called_with('/etc/ntp.conf') + mockrename.assert_called_with('/etc/ntp.conf', '/etc/ntp.conf.dist') + + def test_ntp_rename_ntp_conf_skip_missing(self): + with mock.patch.object(os.path, 'exists', + return_value=False) as mockpath: + with mock.patch.object(util, 'rename') as mockrename: + cc_ntp.rename_ntp_conf() + + mockpath.assert_called_with('/etc/ntp.conf') + mockrename.assert_not_called() + + def ntp_conf_render(self, distro): + """ntp_conf_render + Test rendering of a ntp.conf from template for a given distro + """ + + cfg = {'ntp': {}} + mycloud = self._get_cloud(distro) + distro_names = cc_ntp.generate_server_names(distro) + + with mock.patch.object(templater, 'render_to_file') as mocktmpl: + with mock.patch.object(os.path, 'isfile', return_value=True): + with mock.patch.object(util, 'rename'): + cc_ntp.write_ntp_config_template(cfg, mycloud) + + mocktmpl.assert_called_once_with( + ('/etc/cloud/templates/ntp.conf.%s.tmpl' % distro), + '/etc/ntp.conf', + {'servers': [], 'pools': distro_names}) + + def test_ntp_conf_render_rhel(self): + """Test templater.render_to_file() for rhel""" + self.ntp_conf_render('rhel') + + def test_ntp_conf_render_debian(self): + """Test templater.render_to_file() for debian""" + self.ntp_conf_render('debian') + + def test_ntp_conf_render_fedora(self): + """Test templater.render_to_file() for fedora""" + self.ntp_conf_render('fedora') + + def test_ntp_conf_render_sles(self): + """Test templater.render_to_file() for sles""" + self.ntp_conf_render('sles') + + def test_ntp_conf_render_ubuntu(self): + """Test templater.render_to_file() for ubuntu""" + self.ntp_conf_render('ubuntu') + + def test_ntp_conf_servers_no_pools(self): + distro = 'ubuntu' + pools = [] + servers = ['192.168.2.1'] + cfg = { + 'ntp': { + 'pools': pools, + 'servers': servers, + } + } + mycloud = self._get_cloud(distro) + + with mock.patch.object(templater, 'render_to_file') as mocktmpl: + with mock.patch.object(os.path, 'isfile', return_value=True): + with mock.patch.object(util, 'rename'): + cc_ntp.write_ntp_config_template(cfg.get('ntp'), mycloud) + + mocktmpl.assert_called_once_with( + ('/etc/cloud/templates/ntp.conf.%s.tmpl' % distro), + '/etc/ntp.conf', + {'servers': servers, 'pools': pools}) + + def test_ntp_conf_custom_pools_no_server(self): + distro = 'ubuntu' + pools = ['0.mycompany.pool.ntp.org'] + servers = [] + cfg = { + 'ntp': { + 'pools': pools, + 'servers': servers, + } + } + mycloud = self._get_cloud(distro) + + with mock.patch.object(templater, 'render_to_file') as mocktmpl: + with mock.patch.object(os.path, 'isfile', return_value=True): + with mock.patch.object(util, 'rename'): + cc_ntp.write_ntp_config_template(cfg.get('ntp'), mycloud) + + mocktmpl.assert_called_once_with( + ('/etc/cloud/templates/ntp.conf.%s.tmpl' % distro), + '/etc/ntp.conf', + {'servers': servers, 'pools': pools}) + + def test_ntp_conf_custom_pools_and_server(self): + distro = 'ubuntu' + pools = ['0.mycompany.pool.ntp.org'] + servers = ['192.168.23.3'] + cfg = { + 'ntp': { + 'pools': pools, + 'servers': servers, + } + } + mycloud = self._get_cloud(distro) + + with mock.patch.object(templater, 'render_to_file') as mocktmpl: + with mock.patch.object(os.path, 'isfile', return_value=True): + with mock.patch.object(util, 'rename'): + cc_ntp.write_ntp_config_template(cfg.get('ntp'), mycloud) + + mocktmpl.assert_called_once_with( + ('/etc/cloud/templates/ntp.conf.%s.tmpl' % distro), + '/etc/ntp.conf', + {'servers': servers, 'pools': pools}) + + def test_ntp_conf_contents_match(self): + """Test rendered contents of /etc/ntp.conf for ubuntu""" + pools = ['0.mycompany.pool.ntp.org'] + servers = ['192.168.23.3'] + cfg = { + 'ntp': { + 'pools': pools, + 'servers': servers, + } + } + mycloud = self._get_cloud('ubuntu') + side_effect = [NTP_TEMPLATE.lstrip()] + + # work backwards from util.write_file and mock out call path + # write_ntp_config_template() + # cloud.get_template_filename() + # os.path.isfile() + # templater.render_to_file() + # templater.render_from_file() + # util.load_file() + # util.write_file() + # + with mock.patch.object(util, 'write_file') as mockwrite: + with mock.patch.object(util, 'load_file', side_effect=side_effect): + with mock.patch.object(os.path, 'isfile', return_value=True): + with mock.patch.object(util, 'rename'): + cc_ntp.write_ntp_config_template(cfg.get('ntp'), + mycloud) + + mockwrite.assert_called_once_with( + '/etc/ntp.conf', + NTP_EXPECTED_UBUNTU, + mode=420) + + def test_ntp_handler(self): + """Test ntp handler renders ubuntu ntp.conf template""" + pools = ['0.mycompany.pool.ntp.org'] + servers = ['192.168.23.3'] + cfg = { + 'ntp': { + 'pools': pools, + 'servers': servers, + } + } + mycloud = self._get_cloud('ubuntu') + side_effect = [NTP_TEMPLATE.lstrip()] + + with mock.patch.object(util, 'which', return_value=None): + with mock.patch.object(os.path, 'exists'): + with mock.patch.object(util, 'write_file') as mockwrite: + with mock.patch.object(util, 'load_file', + side_effect=side_effect): + with mock.patch.object(os.path, 'isfile', + return_value=True): + with mock.patch.object(util, 'rename'): + cc_ntp.handle("notimportant", cfg, + mycloud, LOG, None) + + mockwrite.assert_called_once_with( + '/etc/ntp.conf', + NTP_EXPECTED_UBUNTU, + mode=420) + + @mock.patch("cloudinit.config.cc_ntp.util") + def test_no_ntpcfg_does_nothing(self, mock_util): + cc = self._get_cloud('ubuntu') + cc.distro = mock.MagicMock() + cc_ntp.handle('cc_ntp', {}, cc, LOG, []) + self.assertFalse(cc.distro.install_packages.called) + self.assertFalse(mock_util.subp.called) diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 37a984ac..73369cd3 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -371,8 +371,30 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase): self._create_sysfs_parent_directory() expected_dmi_value = 'dmidecode-used' self._configure_dmidecode_return('use-dmidecode', expected_dmi_value) - self.assertEqual(expected_dmi_value, - util.read_dmi_data('use-dmidecode')) + with mock.patch("cloudinit.util.os.uname") as m_uname: + m_uname.return_value = ('x-sysname', 'x-nodename', + 'x-release', 'x-version', 'x86_64') + self.assertEqual(expected_dmi_value, + util.read_dmi_data('use-dmidecode')) + + def test_dmidecode_not_used_on_arm(self): + self.patch_mapping({}) + self._create_sysfs_parent_directory() + dmi_val = 'from-dmidecode' + dmi_name = 'use-dmidecode' + self._configure_dmidecode_return(dmi_name, dmi_val) + + expected = {'armel': None, 'aarch64': None, 'x86_64': dmi_val} + found = {} + # we do not run the 'dmi-decode' binary on some arches + # verify that anything requested that is not in the sysfs dir + # will return None on those arches. + with mock.patch("cloudinit.util.os.uname") as m_uname: + for arch in expected: + m_uname.return_value = ('x-sysname', 'x-nodename', + 'x-release', 'x-version', arch) + found[arch] = util.read_dmi_data(dmi_name) + self.assertEqual(expected, found) def test_none_returned_if_neither_source_has_data(self): self.patch_mapping({}) diff --git a/tools/read-version b/tools/read-version index 5ecf7247..c10f9b46 100755 --- a/tools/read-version +++ b/tools/read-version @@ -71,8 +71,8 @@ else: version_long = None # version is X.Y.Z[+xxx.gHASH] -# version_long is None or X.Y.Z+xxx.gHASH -release = version.partition("+")[0] +# version_long is None or X.Y.Z-xxx-gHASH +release = version.partition("-")[0] extra = None commit = None distance = None |