diff options
author | Scott Moser <smoser@brickies.net> | 2016-08-12 17:05:04 -0400 |
---|---|---|
committer | Scott Moser <smoser@brickies.net> | 2016-08-12 17:05:04 -0400 |
commit | bf1728902bd3e81e00aa9786a5b1c67e4f30a659 (patch) | |
tree | 857c914003fda2ec5223425a31b646cbafd9907b /cloudinit | |
parent | e28ba310872846e0bc60595aed353c17b760fdcb (diff) | |
parent | bc2c3267549b9067c017a34e22bbee18890aec06 (diff) | |
download | vyos-cloud-init-bf1728902bd3e81e00aa9786a5b1c67e4f30a659.tar.gz vyos-cloud-init-bf1728902bd3e81e00aa9786a5b1c67e4f30a659.zip |
Merge branch 'master' into ubuntu/devel
Diffstat (limited to 'cloudinit')
-rw-r--r-- | cloudinit/config/cc_lxd.py | 2 | ||||
-rw-r--r-- | cloudinit/config/cc_mcollective.py | 38 | ||||
-rw-r--r-- | cloudinit/config/cc_ntp.py | 106 | ||||
-rw-r--r-- | cloudinit/config/cc_rh_subscription.py | 2 | ||||
-rw-r--r-- | cloudinit/config/cc_snappy.py | 2 | ||||
-rw-r--r-- | cloudinit/config/cc_ubuntu_init_switch.py | 2 | ||||
-rw-r--r-- | cloudinit/config/cc_yum_add_repo.py | 2 | ||||
-rw-r--r-- | cloudinit/sources/DataSourceAltCloud.py | 6 | ||||
-rw-r--r-- | cloudinit/sources/DataSourceCloudSigma.py | 6 | ||||
-rw-r--r-- | cloudinit/sources/DataSourceConfigDrive.py | 2 | ||||
-rw-r--r-- | cloudinit/sources/DataSourceDigitalOcean.py | 106 | ||||
-rw-r--r-- | cloudinit/sources/DataSourceMAAS.py | 199 | ||||
-rw-r--r-- | cloudinit/sources/DataSourceNoCloud.py | 2 | ||||
-rw-r--r-- | cloudinit/sources/DataSourceOpenStack.py | 2 | ||||
-rw-r--r-- | cloudinit/sources/DataSourceSmartOS.py | 122 | ||||
-rw-r--r-- | cloudinit/sources/__init__.py | 27 | ||||
-rw-r--r-- | cloudinit/sources/helpers/openstack.py | 27 | ||||
-rw-r--r-- | cloudinit/util.py | 7 |
18 files changed, 445 insertions, 215 deletions
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py index 70d4e7c3..0086840f 100644 --- a/cloudinit/config/cc_lxd.py +++ b/cloudinit/config/cc_lxd.py @@ -47,6 +47,8 @@ Example config: from cloudinit import util +distros = ['ubuntu'] + def handle(name, cfg, cloud, log, args): # Get config diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py index ada535f8..b3089f30 100644 --- a/cloudinit/config/cc_mcollective.py +++ b/cloudinit/config/cc_mcollective.py @@ -19,6 +19,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. +import errno + import six from six import BytesIO @@ -38,16 +40,18 @@ LOG = logging.getLogger(__name__) def configure(config, server_cfg=SERVER_CFG, pubcert_file=PUBCERT_FILE, pricert_file=PRICERT_FILE): - # Read server.cfg values from the - # original file in order to be able to mix the rest up + # Read server.cfg (if it exists) values from the + # original file in order to be able to mix the rest up. try: - mcollective_config = ConfigObj(server_cfg, file_error=True) - existed = True - except IOError: - LOG.debug("Did not find file %s", server_cfg) - mcollective_config = ConfigObj() - existed = False - + old_contents = util.load_file(server_cfg, quiet=False, decode=False) + mcollective_config = ConfigObj(BytesIO(old_contents)) + except IOError as e: + if e.errno != errno.ENOENT: + raise + else: + LOG.debug("Did not find file %s (starting with an empty" + " config)", server_cfg) + mcollective_config = ConfigObj() for (cfg_name, cfg) in config.items(): if cfg_name == 'public-cert': util.write_file(pubcert_file, cfg, mode=0o644) @@ -74,12 +78,18 @@ def configure(config, server_cfg=SERVER_CFG, # Otherwise just try to convert it to a string mcollective_config[cfg_name] = str(cfg) - if existed: - # We got all our config as wanted we'll rename - # the previous server.cfg and create our new one - util.rename(server_cfg, "%s.old" % (server_cfg)) + try: + # We got all our config as wanted we'll copy + # the previous server.cfg and overwrite the old with our new one + util.copy(server_cfg, "%s.old" % (server_cfg)) + except IOError as e: + if e.errno == errno.ENOENT: + # Doesn't exist to copy... + pass + else: + raise - # Now we got the whole file, write to disk... + # Now we got the whole (new) file, write to disk... contents = BytesIO() mcollective_config.write(contents) util.write_file(server_cfg, contents.getvalue(), mode=0o644) diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py new file mode 100644 index 00000000..ad69aa34 --- /dev/null +++ b/cloudinit/config/cc_ntp.py @@ -0,0 +1,106 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2016 Canonical Ltd. +# +# Author: Ryan Harper <ryan.harper@canonical.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +from cloudinit import log as logging +from cloudinit.settings import PER_INSTANCE +from cloudinit import templater +from cloudinit import type_utils +from cloudinit import util + +import os + +LOG = logging.getLogger(__name__) + +frequency = PER_INSTANCE +NTP_CONF = '/etc/ntp.conf' +NR_POOL_SERVERS = 4 +distros = ['centos', 'debian', 'fedora', 'opensuse', 'ubuntu'] + + +def handle(name, cfg, cloud, log, _args): + """ + Enable and configure ntp + + ntp: + pools: ['0.{{distro}}.pool.ntp.org', '1.{{distro}}.pool.ntp.org'] + servers: ['192.168.2.1'] + + """ + + ntp_cfg = cfg.get('ntp', {}) + + if not isinstance(ntp_cfg, (dict)): + raise RuntimeError(("'ntp' key existed in config," + " but not a dictionary type," + " is a %s %instead"), type_utils.obj_name(ntp_cfg)) + + if 'ntp' not in cfg: + LOG.debug("Skipping module named %s," + "not present or disabled by cfg", name) + return True + + install_ntp(cloud.distro.install_packages, packages=['ntp'], + check_exe="ntpd") + rename_ntp_conf() + write_ntp_config_template(ntp_cfg, cloud) + + +def install_ntp(install_func, packages=None, check_exe="ntpd"): + if util.which(check_exe): + return + if packages is None: + packages = ['ntp'] + + install_func(packages) + + +def rename_ntp_conf(config=NTP_CONF): + if os.path.exists(config): + util.rename(config, config + ".dist") + + +def generate_server_names(distro): + names = [] + for x in range(0, NR_POOL_SERVERS): + name = "%d.%s.pool.ntp.org" % (x, distro) + names.append(name) + return names + + +def write_ntp_config_template(cfg, cloud): + servers = cfg.get('servers', []) + pools = cfg.get('pools', []) + + if len(servers) == 0 and len(pools) == 0: + LOG.debug('Adding distro default ntp pool servers') + pools = generate_server_names(cloud.distro.name) + + params = { + 'servers': servers, + 'pools': pools, + } + + template_fn = cloud.get_template_filename('ntp.conf.%s' % + (cloud.distro.name)) + if not template_fn: + template_fn = cloud.get_template_filename('ntp.conf') + if not template_fn: + raise RuntimeError(("No template found, " + "not rendering %s"), NTP_CONF) + + templater.render_to_file(template_fn, NTP_CONF, params) diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py index 3a113aea..d4ad724a 100644 --- a/cloudinit/config/cc_rh_subscription.py +++ b/cloudinit/config/cc_rh_subscription.py @@ -18,6 +18,8 @@ from cloudinit import util +distros = ['fedora', 'rhel'] + def handle(name, cfg, _cloud, log, _args): sm = SubscriptionManager(cfg) diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py index 1a485ee6..6bcd8382 100644 --- a/cloudinit/config/cc_snappy.py +++ b/cloudinit/config/cc_snappy.py @@ -68,6 +68,8 @@ BUILTIN_CFG = { 'config': {}, } +distros = ['ubuntu'] + def parse_filename(fname): fname = os.path.basename(fname) diff --git a/cloudinit/config/cc_ubuntu_init_switch.py b/cloudinit/config/cc_ubuntu_init_switch.py index 884d79f1..bffb4380 100644 --- a/cloudinit/config/cc_ubuntu_init_switch.py +++ b/cloudinit/config/cc_ubuntu_init_switch.py @@ -86,6 +86,8 @@ else fi """ +distros = ['ubuntu'] + def handle(name, cfg, cloud, log, args): """Handler method activated by cloud-init.""" diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 64fba869..22549e62 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -23,6 +23,8 @@ import six from cloudinit import util +distros = ['fedora', 'rhel'] + def _canonicalize_id(repo_id): repo_id = repo_id.lower().replace("-", "_") diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index a3529609..48136f7c 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -110,12 +110,6 @@ class DataSourceAltCloud(sources.DataSource): ''' - uname_arch = os.uname()[4] - if uname_arch.startswith("arm") or uname_arch == "aarch64": - # Disabling because dmi data is not available on ARM processors - LOG.debug("Disabling AltCloud datasource on arm (LP: #1243287)") - return 'UNKNOWN' - system_name = util.read_dmi_data("system-product-name") if not system_name: return 'UNKNOWN' diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py index d1f806d6..be74503b 100644 --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -16,7 +16,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from base64 import b64decode -import os import re from cloudinit.cs_utils import Cepko @@ -45,11 +44,6 @@ class DataSourceCloudSigma(sources.DataSource): Uses dmi data to detect if this instance of cloud-init is running in the CloudSigma's infrastructure. """ - uname_arch = os.uname()[4] - if uname_arch.startswith("arm") or uname_arch == "aarch64": - # Disabling because dmi data on ARM processors - LOG.debug("Disabling CloudSigma datasource on arm (LP: #1243287)") - return False LOG.debug("determining hypervisor product name via dmi data") sys_product_name = util.read_dmi_data("system-product-name") diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 91d6ff13..5c9edabe 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -134,7 +134,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): vd = results.get('vendordata') self.vendordata_pure = vd try: - self.vendordata_raw = openstack.convert_vendordata_json(vd) + self.vendordata_raw = sources.convert_vendordata(vd) except ValueError as e: LOG.warn("Invalid content in vendor-data: %s", e) self.vendordata_raw = None diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py index 44a17a00..fc596e17 100644 --- a/cloudinit/sources/DataSourceDigitalOcean.py +++ b/cloudinit/sources/DataSourceDigitalOcean.py @@ -1,6 +1,7 @@ # vi: ts=4 expandtab # # Author: Neal Shrader <neal@digitalocean.com> +# Author: Ben Howard <bh@digitalocean.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as @@ -14,22 +15,27 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. -from cloudinit import ec2_utils +# DigitalOcean Droplet API: +# https://developers.digitalocean.com/documentation/metadata/ + +import json + from cloudinit import log as logging from cloudinit import sources +from cloudinit import url_helper from cloudinit import util -import functools - - LOG = logging.getLogger(__name__) BUILTIN_DS_CONFIG = { - 'metadata_url': 'http://169.254.169.254/metadata/v1/', - 'mirrors_url': 'http://mirrors.digitalocean.com/' + 'metadata_url': 'http://169.254.169.254/metadata/v1.json', } -MD_RETRIES = 0 -MD_TIMEOUT = 1 + +# Wait for a up to a minute, retrying the meta-data server +# every 2 seconds. +MD_RETRIES = 30 +MD_TIMEOUT = 2 +MD_WAIT_RETRY = 2 class DataSourceDigitalOcean(sources.DataSource): @@ -40,43 +46,61 @@ class DataSourceDigitalOcean(sources.DataSource): util.get_cfg_by_path(sys_cfg, ["datasource", "DigitalOcean"], {}), BUILTIN_DS_CONFIG]) self.metadata_address = self.ds_cfg['metadata_url'] + self.retries = self.ds_cfg.get('retries', MD_RETRIES) + self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT) + self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY) - if self.ds_cfg.get('retries'): - self.retries = self.ds_cfg['retries'] - else: - self.retries = MD_RETRIES + def _get_sysinfo(self): + # DigitalOcean embeds vendor ID and instance/droplet_id in the + # SMBIOS information - if self.ds_cfg.get('timeout'): - self.timeout = self.ds_cfg['timeout'] - else: - self.timeout = MD_TIMEOUT + LOG.debug("checking if instance is a DigitalOcean droplet") + + # Detect if we are on DigitalOcean and return the Droplet's ID + vendor_name = util.read_dmi_data("system-manufacturer") + if vendor_name != "DigitalOcean": + return (False, None) - def get_data(self): - caller = functools.partial(util.read_file_or_url, - timeout=self.timeout, retries=self.retries) + LOG.info("running on DigitalOcean") - def mcaller(url): - return caller(url).contents + droplet_id = util.read_dmi_data("system-serial-number") + if droplet_id: + LOG.debug(("system identified via SMBIOS as DigitalOcean Droplet" + "{}").format(droplet_id)) + else: + LOG.critical(("system identified via SMBIOS as a DigitalOcean " + "Droplet, but did not provide an ID. Please file a " + "support ticket at: " + "https://cloud.digitalocean.com/support/tickets/" + "new")) - md = ec2_utils.MetadataMaterializer(mcaller(self.metadata_address), - base_url=self.metadata_address, - caller=mcaller) + return (True, droplet_id) - self.metadata = md.materialize() + def get_data(self, apply_filter=False): + (is_do, droplet_id) = self._get_sysinfo() - if self.metadata.get('id'): - return True - else: + # only proceed if we know we are on DigitalOcean + if not is_do: return False - def get_userdata_raw(self): - return "\n".join(self.metadata['user-data']) + LOG.debug("reading metadata from {}".format(self.metadata_address)) + response = url_helper.readurl(self.metadata_address, + timeout=self.timeout, + sec_between=self.wait_retry, + retries=self.retries) - def get_vendordata_raw(self): - return "\n".join(self.metadata['vendor-data']) + contents = util.decode_binary(response.contents) + decoded = json.loads(contents) + + self.metadata = decoded + self.metadata['instance-id'] = decoded.get('droplet_id', droplet_id) + self.metadata['local-hostname'] = decoded.get('hostname', droplet_id) + self.vendordata_raw = decoded.get("vendor_data", None) + self.userdata_raw = decoded.get("user_data", None) + return True def get_public_ssh_keys(self): - public_keys = self.metadata['public-keys'] + public_keys = self.metadata.get('public_keys', []) if isinstance(public_keys, list): return public_keys else: @@ -84,21 +108,17 @@ class DataSourceDigitalOcean(sources.DataSource): @property def availability_zone(self): - return self.metadata['region'] - - def get_instance_id(self): - return self.metadata['id'] - - def get_hostname(self, fqdn=False, resolve_ip=False): - return self.metadata['hostname'] - - def get_package_mirror_info(self): - return self.ds_cfg['mirrors_url'] + return self.metadata.get('region', 'default') @property def launch_index(self): return None + def check_instance_id(self, sys_cfg): + return sources.instance_id_matches_system_uuid( + self.get_instance_id(), 'system-serial-number') + + # Used to match classes to dependencies datasources = [ (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index d828f078..ab93c0a2 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -20,7 +20,6 @@ from __future__ import print_function -import errno import os import time @@ -32,7 +31,14 @@ from cloudinit import util LOG = logging.getLogger(__name__) MD_VERSION = "2012-03-01" -BINARY_FIELDS = ('user-data',) +DS_FIELDS = [ + # remote path, location in dictionary, binary data?, optional? + ("meta-data/instance-id", 'meta-data/instance-id', False, False), + ("meta-data/local-hostname", 'meta-data/local-hostname', False, False), + ("meta-data/public-keys", 'meta-data/public-keys', False, True), + ('meta-data/vendor-data', 'vendor-data', True, True), + ('user-data', 'user-data', True, True), +] class DataSourceMAAS(sources.DataSource): @@ -43,6 +49,7 @@ class DataSourceMAAS(sources.DataSource): instance-id user-data hostname + vendor-data """ def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -71,10 +78,7 @@ class DataSourceMAAS(sources.DataSource): mcfg = self.ds_cfg try: - (userdata, metadata) = read_maas_seed_dir(self.seed_dir) - self.userdata_raw = userdata - self.metadata = metadata - self.base_url = self.seed_dir + self._set_data(self.seed_dir, read_maas_seed_dir(self.seed_dir)) return True except MAASSeedDirNone: pass @@ -95,18 +99,29 @@ class DataSourceMAAS(sources.DataSource): if not self.wait_for_metadata_service(url): return False - self.base_url = url - - (userdata, metadata) = read_maas_seed_url( - self.base_url, read_file_or_url=self.oauth_helper.readurl, - paths=self.paths, retries=1) - self.userdata_raw = userdata - self.metadata = metadata + self._set_data( + url, read_maas_seed_url( + url, read_file_or_url=self.oauth_helper.readurl, + paths=self.paths, retries=1)) return True except Exception: util.logexc(LOG, "Failed fetching metadata from url %s", url) return False + def _set_data(self, url, data): + # takes a url for base_url and a tuple of userdata, metadata, vd. + self.base_url = url + ud, md, vd = data + self.userdata_raw = ud + self.metadata = md + self.vendordata_pure = vd + if vd: + try: + self.vendordata_raw = sources.convert_vendordata(vd) + except ValueError as e: + LOG.warn("Invalid content in vendor-data: %s", e) + self.vendordata_raw = None + def wait_for_metadata_service(self, url): mcfg = self.ds_cfg max_wait = 120 @@ -126,6 +141,8 @@ class DataSourceMAAS(sources.DataSource): LOG.warn("Failed to get timeout, using %s" % timeout) starttime = time.time() + if url.endswith("/"): + url = url[:-1] check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION) urls = [check_url] url = self.oauth_helper.wait_for_url( @@ -141,27 +158,13 @@ class DataSourceMAAS(sources.DataSource): def read_maas_seed_dir(seed_d): - """ - Return user-data and metadata for a maas seed dir in seed_d. - Expected format of seed_d are the following files: - * instance-id - * local-hostname - * user-data - """ - if not os.path.isdir(seed_d): + if seed_d.startswith("file://"): + seed_d = seed_d[7:] + if not os.path.isdir(seed_d) or len(os.listdir(seed_d)) == 0: raise MAASSeedDirNone("%s: not a directory") - files = ('local-hostname', 'instance-id', 'user-data', 'public-keys') - md = {} - for fname in files: - try: - md[fname] = util.load_file(os.path.join(seed_d, fname), - decode=fname not in BINARY_FIELDS) - except IOError as e: - if e.errno != errno.ENOENT: - raise - - return check_seed_contents(md, seed_d) + # seed_dir looks in seed_dir, not seed_dir/VERSION + return read_maas_seed_url("file://%s" % seed_d, version=None) def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None, @@ -175,73 +178,78 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None, * <seed_url>/<version>/meta-data/instance-id * <seed_url>/<version>/meta-data/local-hostname * <seed_url>/<version>/user-data + If version is None, then <version>/ will not be used. """ - base_url = "%s/%s" % (seed_url, version) - file_order = [ - 'local-hostname', - 'instance-id', - 'public-keys', - 'user-data', - ] - files = { - 'local-hostname': "%s/%s" % (base_url, 'meta-data/local-hostname'), - 'instance-id': "%s/%s" % (base_url, 'meta-data/instance-id'), - 'public-keys': "%s/%s" % (base_url, 'meta-data/public-keys'), - 'user-data': "%s/%s" % (base_url, 'user-data'), - } - if read_file_or_url is None: read_file_or_url = util.read_file_or_url + if seed_url.endswith("/"): + seed_url = seed_url[:-1] + md = {} - for name in file_order: - url = files.get(name) - if name == 'user-data': - item_retries = 0 + for path, dictname, binary, optional in DS_FIELDS: + if version is None: + url = "%s/%s" % (seed_url, path) else: - item_retries = retries - + url = "%s/%s/%s" % (seed_url, version, path) try: ssl_details = util.fetch_ssl_details(paths) - resp = read_file_or_url(url, retries=item_retries, - timeout=timeout, ssl_details=ssl_details) + resp = read_file_or_url(url, retries=retries, timeout=timeout, + ssl_details=ssl_details) if resp.ok(): - if name in BINARY_FIELDS: - md[name] = resp.contents + if binary: + md[path] = resp.contents else: - md[name] = util.decode_binary(resp.contents) + md[path] = util.decode_binary(resp.contents) else: LOG.warn(("Fetching from %s resulted in" " an invalid http code %s"), url, resp.code) except url_helper.UrlError as e: - if e.code != 404: - raise + if e.code == 404 and not optional: + raise MAASSeedDirMalformed( + "Missing required %s: %s" % (path, e)) + elif e.code != 404: + raise e + return check_seed_contents(md, seed_url) def check_seed_contents(content, seed): - """Validate if content is Is the content a dict that is valid as a - return for a datasource. - Either return a (userdata, metadata) tuple or + """Validate if dictionary content valid as a return for a datasource. + Either return a (userdata, metadata, vendordata) tuple or Raise MAASSeedDirMalformed or MAASSeedDirNone """ - md_required = ('instance-id', 'local-hostname') - if len(content) == 0: + ret = {} + missing = [] + for spath, dpath, _binary, optional in DS_FIELDS: + if spath not in content: + if not optional: + missing.append(spath) + continue + + if "/" in dpath: + top, _, p = dpath.partition("/") + if top not in ret: + ret[top] = {} + ret[top][p] = content[spath] + else: + ret[dpath] = content[spath] + + if len(ret) == 0: raise MAASSeedDirNone("%s: no data files found" % seed) - found = list(content.keys()) - missing = [k for k in md_required if k not in found] - if len(missing): + if missing: raise MAASSeedDirMalformed("%s: missing files %s" % (seed, missing)) - userdata = content.get('user-data', b"") - md = {} - for (key, val) in content.items(): - if key == 'user-data': - continue - md[key] = val + vd_data = None + if ret.get('vendor-data'): + err = object() + vd_data = util.load_yaml(ret.get('vendor-data'), default=err, + allowed=(object)) + if vd_data is err: + raise MAASSeedDirMalformed("vendor-data was not loadable as yaml.") - return (userdata, md) + return ret.get('user-data'), ret.get('meta-data'), vd_data class MAASSeedDirNone(Exception): @@ -272,6 +280,7 @@ if __name__ == "__main__": """ import argparse import pprint + import sys parser = argparse.ArgumentParser(description='Interact with MAAS DS') parser.add_argument("--config", metavar="file", @@ -289,17 +298,25 @@ if __name__ == "__main__": default=MD_VERSION) subcmds = parser.add_subparsers(title="subcommands", dest="subcmd") - subcmds.add_parser('crawl', help="crawl the datasource") - subcmds.add_parser('get', help="do a single GET of provided url") - subcmds.add_parser('check-seed', help="read andn verify seed at url") - - parser.add_argument("url", help="the data source to query") + for (name, help) in (('crawl', 'crawl the datasource'), + ('get', 'do a single GET of provided url'), + ('check-seed', 'read and verify seed at url')): + p = subcmds.add_parser(name, help=help) + p.add_argument("url", help="the datasource url", nargs='?', + default=None) args = parser.parse_args() creds = {'consumer_key': args.ckey, 'token_key': args.tkey, 'token_secret': args.tsec, 'consumer_secret': args.csec} + maaspkg_cfg = "/etc/cloud/cloud.cfg.d/90_dpkg_maas.cfg" + if (args.config is None and args.url is None and + os.path.exists(maaspkg_cfg) and + os.access(maaspkg_cfg, os.R_OK)): + sys.stderr.write("Used config in %s.\n" % maaspkg_cfg) + args.config = maaspkg_cfg + if args.config: cfg = util.read_conf(args.config) if 'datasource' in cfg: @@ -307,6 +324,12 @@ if __name__ == "__main__": for key in creds.keys(): if key in cfg and creds[key] is None: creds[key] = cfg[key] + if args.url is None and 'metadata_url' in cfg: + args.url = cfg['metadata_url'] + + if args.url is None: + sys.stderr.write("Must provide a url or a config with url.\n") + sys.exit(1) oauth_helper = url_helper.OauthUrlHelper(**creds) @@ -331,16 +354,20 @@ if __name__ == "__main__": printurl(url) if args.subcmd == "check-seed": + sys.stderr.write("Checking seed at %s\n" % args.url) readurl = oauth_helper.readurl if args.url[0] == "/" or args.url.startswith("file://"): - readurl = None - (userdata, metadata) = read_maas_seed_url( - args.url, version=args.apiver, read_file_or_url=readurl, - retries=2) - print("=== userdata ===") - print(userdata.decode()) - print("=== metadata ===") + (userdata, metadata, vd) = read_maas_seed_dir(args.url) + else: + (userdata, metadata, vd) = read_maas_seed_url( + args.url, version=args.apiver, read_file_or_url=readurl, + retries=2) + print("=== user-data ===") + print("N/A" if userdata is None else userdata.decode()) + print("=== meta-data ===") pprint.pprint(metadata) + print("=== vendor-data ===") + pprint.pprint("N/A" if vd is None else vd) elif args.subcmd == "get": printurl(args.url) diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index cdc9eef5..e6a0b5fe 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -52,7 +52,7 @@ class DataSourceNoCloud(sources.DataSource): found = [] mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': "", - 'network-config': {}} + 'network-config': None} try: # Parse the kernel command line, getting data passed in diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index c06d17f3..82558214 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -138,7 +138,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): vd = results.get('vendordata') self.vendordata_pure = vd try: - self.vendordata_raw = openstack.convert_vendordata_json(vd) + self.vendordata_raw = sources.convert_vendordata(vd) except ValueError as e: LOG.warn("Invalid content in vendor-data: %s", e) self.vendordata_raw = None diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index ccc86883..143ab368 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -60,11 +60,15 @@ SMARTOS_ATTRIB_MAP = { 'availability_zone': ('sdc:datacenter_name', True), 'vendor-data': ('sdc:vendor-data', False), 'operator-script': ('sdc:operator-script', False), + 'hostname': ('sdc:hostname', True), + 'dns_domain': ('sdc:dns_domain', True), } SMARTOS_ATTRIB_JSON = { # Cloud-init Key : (SmartOS Key known JSON) 'network-data': 'sdc:nics', + 'dns_servers': 'sdc:resolvers', + 'routes': 'sdc:routes', } SMARTOS_ENV_LX_BRAND = "lx-brand" @@ -311,7 +315,10 @@ class DataSourceSmartOS(sources.DataSource): if self._network_config is None: if self.network_data is not None: self._network_config = ( - convert_smartos_network_data(self.network_data)) + convert_smartos_network_data( + network_data=self.network_data, + dns_servers=self.metadata['dns_servers'], + dns_domain=self.metadata['dns_domain'])) return self._network_config @@ -445,7 +452,8 @@ class JoyentMetadataClient(object): class JoyentMetadataSocketClient(JoyentMetadataClient): - def __init__(self, socketpath): + def __init__(self, socketpath, smartos_type=SMARTOS_ENV_LX_BRAND): + super(JoyentMetadataSocketClient, self).__init__(smartos_type) self.socketpath = socketpath def open_transport(self): @@ -461,7 +469,7 @@ class JoyentMetadataSocketClient(JoyentMetadataClient): class JoyentMetadataSerialClient(JoyentMetadataClient): - def __init__(self, device, timeout=10, smartos_type=None): + def __init__(self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM): super(JoyentMetadataSerialClient, self).__init__(smartos_type) self.device = device self.timeout = timeout @@ -583,7 +591,8 @@ def jmc_client_factory( device=serial_device, timeout=serial_timeout, smartos_type=smartos_type) elif smartos_type == SMARTOS_ENV_LX_BRAND: - return JoyentMetadataSocketClient(socketpath=metadata_sockfile) + return JoyentMetadataSocketClient(socketpath=metadata_sockfile, + smartos_type=smartos_type) raise ValueError("Unknown value for smartos_type: %s" % smartos_type) @@ -644,14 +653,8 @@ def write_boot_content(content, content_f, link=None, shebang=False, util.logexc(LOG, "failed establishing content link: %s", e) -def get_smartos_environ(uname_version=None, product_name=None, - uname_arch=None): +def get_smartos_environ(uname_version=None, product_name=None): uname = os.uname() - if uname_arch is None: - uname_arch = uname[4] - - if uname_arch.startswith("arm") or uname_arch == "aarch64": - return None # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but # report 'BrandZ virtual linux' as the kernel version @@ -671,8 +674,9 @@ def get_smartos_environ(uname_version=None, product_name=None, return None -# Covert SMARTOS 'sdc:nics' data to network_config yaml -def convert_smartos_network_data(network_data=None): +# Convert SMARTOS 'sdc:nics' data to network_config yaml +def convert_smartos_network_data(network_data=None, + dns_servers=None, dns_domain=None): """Return a dictionary of network_config by parsing provided SMARTOS sdc:nics configuration data @@ -706,9 +710,7 @@ def convert_smartos_network_data(network_data=None): 'broadcast', 'dns_nameservers', 'dns_search', - 'gateway', 'metric', - 'netmask', 'pointopoint', 'routes', 'scope', @@ -716,6 +718,29 @@ def convert_smartos_network_data(network_data=None): ], } + if dns_servers: + if not isinstance(dns_servers, (list, tuple)): + dns_servers = [dns_servers] + else: + dns_servers = [] + + if dns_domain: + if not isinstance(dns_domain, (list, tuple)): + dns_domain = [dns_domain] + else: + dns_domain = [] + + def is_valid_ipv4(addr): + return '.' in addr + + def is_valid_ipv6(addr): + return ':' in addr + + pgws = { + 'ipv4': {'match': is_valid_ipv4, 'gw': None}, + 'ipv6': {'match': is_valid_ipv6, 'gw': None}, + } + config = [] for nic in network_data: cfg = dict((k, v) for k, v in nic.items() @@ -727,18 +752,40 @@ def convert_smartos_network_data(network_data=None): cfg.update({'mac_address': nic['mac']}) subnets = [] - for ip, gw in zip(nic['ips'], nic['gateways']): - subnet = dict((k, v) for k, v in nic.items() - if k in valid_keys['subnet']) - subnet.update({ - 'type': 'static', - 'address': ip, - 'gateway': gw, - }) + for ip in nic.get('ips', []): + if ip == "dhcp": + subnet = {'type': 'dhcp4'} + else: + subnet = dict((k, v) for k, v in nic.items() + if k in valid_keys['subnet']) + subnet.update({ + 'type': 'static', + 'address': ip, + }) + + proto = 'ipv4' if is_valid_ipv4(ip) else 'ipv6' + # Only use gateways for 'primary' nics + if 'primary' in nic and nic.get('primary', False): + # the ips and gateways list may be N to M, here + # we map the ip index into the gateways list, + # and handle the case that we could have more ips + # than gateways. we only consume the first gateway + if not pgws[proto]['gw']: + gateways = [gw for gw in nic.get('gateways', []) + if pgws[proto]['match'](gw)] + if len(gateways): + pgws[proto]['gw'] = gateways[0] + subnet.update({'gateway': pgws[proto]['gw']}) + subnets.append(subnet) cfg.update({'subnets': subnets}) config.append(cfg) + if dns_servers: + config.append( + {'type': 'nameserver', 'address': dns_servers, + 'search': dns_domain}) + return {'version': 1, 'config': config} @@ -761,21 +808,36 @@ if __name__ == "__main__": sys.exit(1) if len(sys.argv) == 1: keys = (list(SMARTOS_ATTRIB_JSON.keys()) + - list(SMARTOS_ATTRIB_MAP.keys())) + list(SMARTOS_ATTRIB_MAP.keys()) + ['network_config']) else: keys = sys.argv[1:] - data = {} - for key in keys: + def load_key(client, key, data): + if key in data: + return data[key] + if key in SMARTOS_ATTRIB_JSON: keyname = SMARTOS_ATTRIB_JSON[key] - data[key] = jmc.get_json(keyname) + data[key] = client.get_json(keyname) + elif key == "network_config": + for depkey in ('network-data', 'dns_servers', 'dns_domain'): + load_key(client, depkey, data) + data[key] = convert_smartos_network_data( + network_data=data['network-data'], + dns_servers=data['dns_servers'], + dns_domain=data['dns_domain']) else: if key in SMARTOS_ATTRIB_MAP: keyname, strip = SMARTOS_ATTRIB_MAP[key] else: keyname, strip = (key, False) - val = jmc.get(keyname, strip=strip) - data[key] = jmc.get(keyname, strip=strip) + data[key] = client.get(keyname, strip=strip) + + return data[key] + + data = {} + for key in keys: + load_key(client=jmc, key=key, data=data) - print(json.dumps(data, indent=1)) + print(json.dumps(data, indent=1, sort_keys=True, + separators=(',', ': '))) diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 87b8e524..d1395270 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -21,8 +21,8 @@ # along with this program. If not, see <http://www.gnu.org/licenses/>. import abc +import copy import os - import six from cloudinit import importer @@ -355,6 +355,31 @@ def instance_id_matches_system_uuid(instance_id, field='system-uuid'): return instance_id.lower() == dmi_value.lower() +def convert_vendordata(data, recurse=True): + """data: a loaded object (strings, arrays, dicts). + return something suitable for cloudinit vendordata_raw. + + if data is: + None: return None + string: return string + list: return data + the list is then processed in UserDataProcessor + dict: return convert_vendordata(data.get('cloud-init')) + """ + if not data: + return None + if isinstance(data, six.string_types): + return data + if isinstance(data, list): + return copy.deepcopy(data) + if isinstance(data, dict): + if recurse is True: + return convert_vendordata(data.get('cloud-init'), + recurse=False) + raise ValueError("vendordata['cloud-init'] cannot be dict") + raise ValueError("Unknown data type for vendordata: %s" % type(data)) + + # 'depends' is a list of dependencies (DEP_FILESYSTEM) # ds_list is a list of 2 item lists # ds_list = [ diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 2e7a1d47..84322e0e 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -571,7 +571,7 @@ def convert_net_json(network_json=None, known_macs=None): subnet['ipv6'] = True subnets.append(subnet) cfg.update({'subnets': subnets}) - if link['type'] in ['ethernet', 'vif', 'ovs', 'phy', 'bridge']: + if link['type'] in ['ethernet', 'vif', 'ovs', 'phy', 'bridge', 'tap']: cfg.update({ 'type': 'physical', 'mac_address': link['ethernet_mac_address']}) @@ -621,28 +621,3 @@ def convert_net_json(network_json=None, known_macs=None): config.append(cfg) return {'version': 1, 'config': config} - - -def convert_vendordata_json(data, recurse=True): - """data: a loaded json *object* (strings, arrays, dicts). - return something suitable for cloudinit vendordata_raw. - - if data is: - None: return None - string: return string - list: return data - the list is then processed in UserDataProcessor - dict: return convert_vendordata_json(data.get('cloud-init')) - """ - if not data: - return None - if isinstance(data, six.string_types): - return data - if isinstance(data, list): - return copy.deepcopy(data) - if isinstance(data, dict): - if recurse is True: - return convert_vendordata_json(data.get('cloud-init'), - recurse=False) - raise ValueError("vendordata['cloud-init'] cannot be dict") - raise ValueError("Unknown data type for vendordata: %s" % type(data)) diff --git a/cloudinit/util.py b/cloudinit/util.py index e5dd61a0..226628cc 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2227,10 +2227,17 @@ def read_dmi_data(key): If all of the above fail to find a value, None will be returned. """ + syspath_value = _read_dmi_syspath(key) if syspath_value is not None: return syspath_value + # running dmidecode can be problematic on some arches (LP: #1243287) + uname_arch = os.uname()[4] + if uname_arch.startswith("arm") or uname_arch == "aarch64": + LOG.debug("dmidata is not supported on %s", uname_arch) + return None + dmidecode_path = which('dmidecode') if dmidecode_path: return _call_dmidecode(key, dmidecode_path) |