summaryrefslogtreecommitdiff
path: root/cloudinit/config
diff options
context:
space:
mode:
authorChad Smith <chad.smith@canonical.com>2017-10-06 13:22:54 -0600
committerChad Smith <chad.smith@canonical.com>2017-10-06 13:22:54 -0600
commit9fd022780ae516df3499b17b2d69b72fc502917c (patch)
treebc33ac6296f374414ccb15dce233a4293b8633d3 /cloudinit/config
parent89630a6658c099d59f2766493a35c2ad266a8f42 (diff)
parent45d361cb0b7f5e4e7d79522bd285871898358623 (diff)
downloadvyos-cloud-init-9fd022780ae516df3499b17b2d69b72fc502917c.tar.gz
vyos-cloud-init-9fd022780ae516df3499b17b2d69b72fc502917c.zip
merge from master at 17.1-17-g45d361cb
Diffstat (limited to 'cloudinit/config')
-rw-r--r--cloudinit/config/cc_bootcmd.py90
-rw-r--r--cloudinit/config/cc_chef.py44
-rw-r--r--cloudinit/config/cc_landscape.py4
-rw-r--r--cloudinit/config/cc_ntp.py106
-rw-r--r--cloudinit/config/cc_puppet.py33
-rw-r--r--cloudinit/config/cc_resizefs.py157
-rw-r--r--cloudinit/config/cc_resolv_conf.py2
-rw-r--r--cloudinit/config/cc_runcmd.py84
-rw-r--r--cloudinit/config/cc_snappy.py4
-rwxr-xr-xcloudinit/config/cc_ssh_authkey_fingerprints.py4
-rw-r--r--cloudinit/config/cc_ubuntu_init_switch.py160
-rw-r--r--cloudinit/config/cc_zypper_add_repo.py218
-rw-r--r--cloudinit/config/schema.py224
13 files changed, 718 insertions, 412 deletions
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
index 604f93b0..233da1ef 100644
--- a/cloudinit/config/cc_bootcmd.py
+++ b/cloudinit/config/cc_bootcmd.py
@@ -3,44 +3,73 @@
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Chad Smith <chad.smith@canonical.com>
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Bootcmd
--------
-**Summary:** run commands early in boot process
-
-This module runs arbitrary commands very early in the boot process,
-only slightly after a boothook would run. This is very similar to a
-boothook, but more user friendly. The environment variable ``INSTANCE_ID``
-will be set to the current instance id for all run commands. Commands can be
-specified either as lists or strings. For invocation details, see ``runcmd``.
-
-.. note::
- bootcmd should only be used for things that could not be done later in the
- boot process.
-
-**Internal name:** ``cc_bootcmd``
-
-**Module frequency:** per always
-
-**Supported distros:** all
-
-**Config keys**::
-
- bootcmd:
- - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts
- - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ]
-"""
+"""Bootcmd: run arbitrary commands early in the boot process."""
import os
+from textwrap import dedent
+from cloudinit.config.schema import (
+ get_schema_doc, validate_cloudconfig_schema)
from cloudinit.settings import PER_ALWAYS
+from cloudinit import temp_utils
from cloudinit import util
frequency = PER_ALWAYS
+# The schema definition for each cloud-config module is a strict contract for
+# describing supported configuration parameters for each cloud-config section.
+# It allows cloud-config to validate and alert users to invalid or ignored
+# configuration options before actually attempting to deploy with said
+# configuration.
+
+distros = ['all']
+
+schema = {
+ 'id': 'cc_bootcmd',
+ 'name': 'Bootcmd',
+ 'title': 'Run arbitrary commands early in the boot process',
+ 'description': dedent("""\
+ This module runs arbitrary commands very early in the boot process,
+ only slightly after a boothook would run. This is very similar to a
+ boothook, but more user friendly. The environment variable
+ ``INSTANCE_ID`` will be set to the current instance id for all run
+ commands. Commands can be specified either as lists or strings. For
+ invocation details, see ``runcmd``.
+
+ .. note::
+ bootcmd should only be used for things that could not be done later
+ in the boot process."""),
+ 'distros': distros,
+ 'examples': [dedent("""\
+ bootcmd:
+ - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts
+ - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ]
+ """)],
+ 'frequency': PER_ALWAYS,
+ 'type': 'object',
+ 'properties': {
+ 'bootcmd': {
+ 'type': 'array',
+ 'items': {
+ 'oneOf': [
+ {'type': 'array', 'items': {'type': 'string'}},
+ {'type': 'string'}]
+ },
+ 'additionalItems': False, # Reject items of non-string non-list
+ 'additionalProperties': False,
+ 'minItems': 1,
+ 'required': [],
+ 'uniqueItems': True
+ }
+ }
+}
+
+__doc__ = get_schema_doc(schema) # Supplement python help()
+
def handle(name, cfg, cloud, log, _args):
@@ -49,13 +78,14 @@ def handle(name, cfg, cloud, log, _args):
" no 'bootcmd' key in configuration"), name)
return
- with util.ExtendedTemporaryFile(suffix=".sh") as tmpf:
+ validate_cloudconfig_schema(cfg, schema)
+ with temp_utils.ExtendedTemporaryFile(suffix=".sh") as tmpf:
try:
content = util.shellify(cfg["bootcmd"])
tmpf.write(util.encode_text(content))
tmpf.flush()
- except Exception:
- util.logexc(log, "Failed to shellify bootcmd")
+ except Exception as e:
+ util.logexc(log, "Failed to shellify bootcmd: %s", str(e))
raise
try:
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index 02c70b10..46abedd1 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -58,6 +58,9 @@ file).
log_level:
log_location:
node_name:
+ omnibus_url:
+ omnibus_url_retries:
+ omnibus_version:
pid_file:
server_url:
show_time:
@@ -279,6 +282,31 @@ def run_chef(chef_cfg, log):
util.subp(cmd, capture=False)
+def install_chef_from_omnibus(url=None, retries=None, omnibus_version=None):
+ """Install an omnibus unified package from url.
+
+ @param url: URL where blob of chef content may be downloaded. Defaults to
+ OMNIBUS_URL.
+ @param retries: Number of retries to perform when attempting to read url.
+ Defaults to OMNIBUS_URL_RETRIES
+ @param omnibus_version: Optional version string to require for omnibus
+ install.
+ """
+ if url is None:
+ url = OMNIBUS_URL
+ if retries is None:
+ retries = OMNIBUS_URL_RETRIES
+
+ if omnibus_version is None:
+ args = []
+ else:
+ args = ['-v', omnibus_version]
+ content = url_helper.readurl(url=url, retries=retries).contents
+ return util.subp_blob_in_tempfile(
+ blob=content, args=args,
+ basename='chef-omnibus-install', capture=False)
+
+
def install_chef(cloud, chef_cfg, log):
# If chef is not installed, we install chef based on 'install_type'
install_type = util.get_cfg_option_str(chef_cfg, 'install_type',
@@ -297,17 +325,11 @@ def install_chef(cloud, chef_cfg, log):
# This will install and run the chef-client from packages
cloud.distro.install_packages(('chef',))
elif install_type == 'omnibus':
- # This will install as a omnibus unified package
- url = util.get_cfg_option_str(chef_cfg, "omnibus_url", OMNIBUS_URL)
- retries = max(0, util.get_cfg_option_int(chef_cfg,
- "omnibus_url_retries",
- default=OMNIBUS_URL_RETRIES))
- content = url_helper.readurl(url=url, retries=retries).contents
- with util.tempdir() as tmpd:
- # Use tmpdir over tmpfile to avoid 'text file busy' on execute
- tmpf = "%s/chef-omnibus-install" % tmpd
- util.write_file(tmpf, content, mode=0o700)
- util.subp([tmpf], capture=False)
+ omnibus_version = util.get_cfg_option_str(chef_cfg, "omnibus_version")
+ install_chef_from_omnibus(
+ url=util.get_cfg_option_str(chef_cfg, "omnibus_url"),
+ retries=util.get_cfg_option_int(chef_cfg, "omnibus_url_retries"),
+ omnibus_version=omnibus_version)
else:
log.warn("Unknown chef install type '%s'", install_type)
run = False
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index 86b71383..8f9f1abd 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -57,7 +57,7 @@ The following default client config is provided, but can be overridden::
import os
-from six import StringIO
+from six import BytesIO
from configobj import ConfigObj
@@ -109,7 +109,7 @@ def handle(_name, cfg, cloud, log, _args):
ls_cloudcfg,
]
merged = merge_together(merge_data)
- contents = StringIO()
+ contents = BytesIO()
merged.write(contents)
util.ensure_dir(os.path.dirname(LSC_CLIENT_CFG_FILE))
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index 31ed64e3..15ae1ecd 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -4,39 +4,10 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-NTP
----
-**Summary:** enable and configure ntp
-
-Handle ntp configuration. If ntp is not installed on the system and ntp
-configuration is specified, ntp will be installed. If there is a default ntp
-config file in the image or one is present in the distro's ntp package, it will
-be copied to ``/etc/ntp.conf.dist`` before any changes are made. A list of ntp
-pools and ntp servers can be provided under the ``ntp`` config key. If no ntp
-servers or pools are provided, 4 pools will be used in the format
-``{0-3}.{distro}.pool.ntp.org``.
-
-**Internal name:** ``cc_ntp``
-
-**Module frequency:** per instance
-
-**Supported distros:** centos, debian, fedora, opensuse, ubuntu
-
-**Config keys**::
-
- ntp:
- pools:
- - 0.company.pool.ntp.org
- - 1.company.pool.ntp.org
- - ntp.myorg.org
- servers:
- - my.ntp.server.local
- - ntp.ubuntu.com
- - 192.168.23.2
-"""
+"""NTP: enable and configure ntp"""
-from cloudinit.config.schema import validate_cloudconfig_schema
+from cloudinit.config.schema import (
+ get_schema_doc, validate_cloudconfig_schema)
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
from cloudinit import templater
@@ -50,6 +21,7 @@ LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
NTP_CONF = '/etc/ntp.conf'
+TIMESYNCD_CONF = '/etc/systemd/timesyncd.conf.d/cloud-init.conf'
NR_POOL_SERVERS = 4
distros = ['centos', 'debian', 'fedora', 'opensuse', 'ubuntu']
@@ -75,10 +47,13 @@ schema = {
``{0-3}.{distro}.pool.ntp.org``."""),
'distros': distros,
'examples': [
- {'ntp': {'pools': ['0.company.pool.ntp.org', '1.company.pool.ntp.org',
- 'ntp.myorg.org'],
- 'servers': ['my.ntp.server.local', 'ntp.ubuntu.com',
- '192.168.23.2']}}],
+ dedent("""\
+ ntp:
+ pools: [0.int.pool.ntp.org, 1.int.pool.ntp.org, ntp.myorg.org]
+ servers:
+ - ntp.server.local
+ - ntp.ubuntu.com
+ - 192.168.23.2""")],
'frequency': PER_INSTANCE,
'type': 'object',
'properties': {
@@ -116,6 +91,8 @@ schema = {
}
}
+__doc__ = get_schema_doc(schema) # Supplement python help()
+
def handle(name, cfg, cloud, log, _args):
"""Enable and configure ntp."""
@@ -132,20 +109,50 @@ def handle(name, cfg, cloud, log, _args):
" is a %s %instead"), type_utils.obj_name(ntp_cfg))
validate_cloudconfig_schema(cfg, schema)
+ if ntp_installable():
+ service_name = 'ntp'
+ confpath = NTP_CONF
+ template_name = None
+ packages = ['ntp']
+ check_exe = 'ntpd'
+ else:
+ service_name = 'systemd-timesyncd'
+ confpath = TIMESYNCD_CONF
+ template_name = 'timesyncd.conf'
+ packages = []
+ check_exe = '/lib/systemd/systemd-timesyncd'
+
rename_ntp_conf()
# ensure when ntp is installed it has a configuration file
# to use instead of starting up with packaged defaults
- write_ntp_config_template(ntp_cfg, cloud)
- install_ntp(cloud.distro.install_packages, packages=['ntp'],
- check_exe="ntpd")
- # if ntp was already installed, it may not have started
+ write_ntp_config_template(ntp_cfg, cloud, confpath, template=template_name)
+ install_ntp(cloud.distro.install_packages, packages=packages,
+ check_exe=check_exe)
+
try:
- reload_ntp(systemd=cloud.distro.uses_systemd())
+ reload_ntp(service_name, systemd=cloud.distro.uses_systemd())
except util.ProcessExecutionError as e:
LOG.exception("Failed to reload/start ntp service: %s", e)
raise
+def ntp_installable():
+ """Check if we can install ntp package
+
+ Ubuntu-Core systems do not have an ntp package available, so
+ we always return False. Other systems require package managers to install
+ the ntp package If we fail to find one of the package managers, then we
+ cannot install ntp.
+ """
+ if util.system_is_snappy():
+ return False
+
+ if any(map(util.which, ['apt-get', 'dnf', 'yum', 'zypper'])):
+ return True
+
+ return False
+
+
def install_ntp(install_func, packages=None, check_exe="ntpd"):
if util.which(check_exe):
return
@@ -156,7 +163,7 @@ def install_ntp(install_func, packages=None, check_exe="ntpd"):
def rename_ntp_conf(config=None):
- """Rename any existing ntp.conf file and render from template"""
+ """Rename any existing ntp.conf file"""
if config is None: # For testing
config = NTP_CONF
if os.path.exists(config):
@@ -171,7 +178,7 @@ def generate_server_names(distro):
return names
-def write_ntp_config_template(cfg, cloud):
+def write_ntp_config_template(cfg, cloud, path, template=None):
servers = cfg.get('servers', [])
pools = cfg.get('pools', [])
@@ -185,19 +192,20 @@ def write_ntp_config_template(cfg, cloud):
'pools': pools,
}
- template_fn = cloud.get_template_filename('ntp.conf.%s' %
- (cloud.distro.name))
+ if template is None:
+ template = 'ntp.conf.%s' % cloud.distro.name
+
+ template_fn = cloud.get_template_filename(template)
if not template_fn:
template_fn = cloud.get_template_filename('ntp.conf')
if not template_fn:
raise RuntimeError(("No template found, "
- "not rendering %s"), NTP_CONF)
+ "not rendering %s"), path)
- templater.render_to_file(template_fn, NTP_CONF, params)
+ templater.render_to_file(template_fn, path, params)
-def reload_ntp(systemd=False):
- service = 'ntp'
+def reload_ntp(service, systemd=False):
if systemd:
cmd = ['systemctl', 'reload-or-restart', service]
else:
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index dc11561b..28b1d568 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -15,21 +15,23 @@ This module handles puppet installation and configuration. If the ``puppet``
key does not exist in global configuration, no action will be taken. If a
config entry for ``puppet`` is present, then by default the latest version of
puppet will be installed. If ``install`` is set to ``false``, puppet will not
-be installed. However, this may result in an error if puppet is not already
+be installed. However, this will result in an error if puppet is not already
present on the system. The version of puppet to be installed can be specified
under ``version``, and defaults to ``none``, which selects the latest version
in the repos. If the ``puppet`` config key exists in the config archive, this
module will attempt to start puppet even if no installation was performed.
-Puppet configuration can be specified under the ``conf`` key. The configuration
-is specified as a dictionary which is converted into ``<key>=<value>`` format
-and appended to ``puppet.conf`` under the ``[puppetd]`` section. The
+Puppet configuration can be specified under the ``conf`` key. The
+configuration is specified as a dictionary containing high-level ``<section>``
+keys and lists of ``<key>=<value>`` pairs within each section. Each section
+name and ``<key>=<value>`` pair is written directly to ``puppet.conf``. As
+such, section names should be one of: ``main``, ``master``, ``agent`` or
+``user`` and keys should be valid puppet configuration options. The
``certname`` key supports string substitutions for ``%i`` and ``%f``,
corresponding to the instance id and fqdn of the machine respectively.
-If ``ca_cert`` is present under ``conf``, it will not be written to
-``puppet.conf``, but instead will be used as the puppermaster certificate.
-It should be specified in pem format as a multi-line string (using the ``|``
-yaml notation).
+If ``ca_cert`` is present, it will not be written to ``puppet.conf``, but
+instead will be used as the puppermaster certificate. It should be specified
+in pem format as a multi-line string (using the ``|`` yaml notation).
**Internal name:** ``cc_puppet``
@@ -43,12 +45,13 @@ yaml notation).
install: <true/false>
version: <version>
conf:
- server: "puppetmaster.example.org"
- certname: "%i.%f"
- ca_cert: |
- -------BEGIN CERTIFICATE-------
- <cert data>
- -------END CERTIFICATE-------
+ agent:
+ server: "puppetmaster.example.org"
+ certname: "%i.%f"
+ ca_cert: |
+ -------BEGIN CERTIFICATE-------
+ <cert data>
+ -------END CERTIFICATE-------
"""
from six import StringIO
@@ -127,7 +130,7 @@ def handle(name, cfg, cloud, log, _args):
util.write_file(PUPPET_SSL_CERT_PATH, cfg)
util.chownbyname(PUPPET_SSL_CERT_PATH, 'puppet', 'root')
else:
- # Iterate throug the config items, we'll use ConfigParser.set
+ # Iterate through the config items, we'll use ConfigParser.set
# to overwrite or create new items as needed
for (o, v) in cfg.items():
if o == 'certname':
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index ceee952b..f774baa3 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -6,31 +6,8 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Resizefs
---------
-**Summary:** resize filesystem
+"""Resizefs: cloud-config module which resizes the filesystem"""
-Resize a filesystem to use all avaliable space on partition. This module is
-useful along with ``cc_growpart`` and will ensure that if the root partition
-has been resized the root filesystem will be resized along with it. By default,
-``cc_resizefs`` will resize the root partition and will block the boot process
-while the resize command is running. Optionally, the resize operation can be
-performed in the background while cloud-init continues running modules. This
-can be enabled by setting ``resize_rootfs`` to ``true``. This module can be
-disabled altogether by setting ``resize_rootfs`` to ``false``.
-
-**Internal name:** ``cc_resizefs``
-
-**Module frequency:** per always
-
-**Supported distros:** all
-
-**Config keys**::
-
- resize_rootfs: <true/false/"noblock">
- resize_rootfs_tmp: <directory>
-"""
import errno
import getopt
@@ -38,11 +15,47 @@ import os
import re
import shlex
import stat
+from textwrap import dedent
+from cloudinit.config.schema import (
+ get_schema_doc, validate_cloudconfig_schema)
from cloudinit.settings import PER_ALWAYS
from cloudinit import util
+NOBLOCK = "noblock"
+
frequency = PER_ALWAYS
+distros = ['all']
+
+schema = {
+ 'id': 'cc_resizefs',
+ 'name': 'Resizefs',
+ 'title': 'Resize filesystem',
+ 'description': dedent("""\
+ Resize a filesystem to use all avaliable space on partition. This
+ module is useful along with ``cc_growpart`` and will ensure that if the
+ root partition has been resized the root filesystem will be resized
+ along with it. By default, ``cc_resizefs`` will resize the root
+ partition and will block the boot process while the resize command is
+ running. Optionally, the resize operation can be performed in the
+ background while cloud-init continues running modules. This can be
+ enabled by setting ``resize_rootfs`` to ``true``. This module can be
+ disabled altogether by setting ``resize_rootfs`` to ``false``."""),
+ 'distros': distros,
+ 'examples': [
+ 'resize_rootfs: false # disable root filesystem resize operation'],
+ 'frequency': PER_ALWAYS,
+ 'type': 'object',
+ 'properties': {
+ 'resize_rootfs': {
+ 'enum': [True, False, NOBLOCK],
+ 'description': dedent("""\
+ Whether to resize the root partition. Default: 'true'""")
+ }
+ }
+}
+
+__doc__ = get_schema_doc(schema) # Supplement python help()
def _resize_btrfs(mount_point, devpth):
@@ -54,7 +67,7 @@ def _resize_ext(mount_point, devpth):
def _resize_xfs(mount_point, devpth):
- return ('xfs_growfs', devpth)
+ return ('xfs_growfs', mount_point)
def _resize_ufs(mount_point, devpth):
@@ -131,8 +144,6 @@ RESIZE_FS_PRECHECK_CMDS = {
'ufs': _can_skip_resize_ufs
}
-NOBLOCK = "noblock"
-
def rootdev_from_cmdline(cmdline):
found = None
@@ -161,71 +172,77 @@ def can_skip_resize(fs_type, resize_what, devpth):
return False
-def handle(name, cfg, _cloud, log, args):
- if len(args) != 0:
- resize_root = args[0]
- else:
- resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True)
+def is_device_path_writable_block(devpath, info, log):
+ """Return True if devpath is a writable block device.
- if not util.translate_bool(resize_root, addons=[NOBLOCK]):
- log.debug("Skipping module named %s, resizing disabled", name)
- return
-
- # TODO(harlowja) is the directory ok to be used??
- resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run")
- util.ensure_dir(resize_root_d)
-
- # TODO(harlowja): allow what is to be resized to be configurable??
- resize_what = "/"
- result = util.get_mount_info(resize_what, log)
- if not result:
- log.warn("Could not determine filesystem type of %s", resize_what)
- return
-
- (devpth, fs_type, mount_point) = result
-
- info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what)
- log.debug("resize_info: %s" % info)
+ @param devpath: Path to the root device we want to resize.
+ @param info: String representing information about the requested device.
+ @param log: Logger to which logs will be added upon error.
+ @returns Boolean True if block device is writable
+ """
container = util.is_container()
# Ensure the path is a block device.
- if (devpth == "/dev/root" and not os.path.exists(devpth) and
+ if (devpath == "/dev/root" and not os.path.exists(devpath) and
not container):
- devpth = util.rootdev_from_cmdline(util.get_cmdline())
- if devpth is None:
+ devpath = util.rootdev_from_cmdline(util.get_cmdline())
+ if devpath is None:
log.warn("Unable to find device '/dev/root'")
- return
- log.debug("Converted /dev/root to '%s' per kernel cmdline", devpth)
+ return False
+ log.debug("Converted /dev/root to '%s' per kernel cmdline", devpath)
+
+ if devpath == 'overlayroot':
+ log.debug("Not attempting to resize devpath '%s': %s", devpath, info)
+ return False
try:
- statret = os.stat(devpth)
+ statret = os.stat(devpath)
except OSError as exc:
if container and exc.errno == errno.ENOENT:
log.debug("Device '%s' did not exist in container. "
- "cannot resize: %s", devpth, info)
+ "cannot resize: %s", devpath, info)
elif exc.errno == errno.ENOENT:
log.warn("Device '%s' did not exist. cannot resize: %s",
- devpth, info)
+ devpath, info)
else:
raise exc
- return
-
- if not os.access(devpth, os.W_OK):
- if container:
- log.debug("'%s' not writable in container. cannot resize: %s",
- devpth, info)
- else:
- log.warn("'%s' not writable. cannot resize: %s", devpth, info)
- return
+ return False
if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode):
if container:
log.debug("device '%s' not a block device in container."
- " cannot resize: %s" % (devpth, info))
+ " cannot resize: %s" % (devpath, info))
else:
log.warn("device '%s' not a block device. cannot resize: %s" %
- (devpth, info))
+ (devpath, info))
+ return False
+ return True
+
+
+def handle(name, cfg, _cloud, log, args):
+ if len(args) != 0:
+ resize_root = args[0]
+ else:
+ resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True)
+ validate_cloudconfig_schema(cfg, schema)
+ if not util.translate_bool(resize_root, addons=[NOBLOCK]):
+ log.debug("Skipping module named %s, resizing disabled", name)
+ return
+
+ # TODO(harlowja): allow what is to be resized to be configurable??
+ resize_what = "/"
+ result = util.get_mount_info(resize_what, log)
+ if not result:
+ log.warn("Could not determine filesystem type of %s", resize_what)
+ return
+
+ (devpth, fs_type, mount_point) = result
+
+ info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what)
+ log.debug("resize_info: %s" % info)
+
+ if not is_device_path_writable_block(devpth, info, log):
return
resizer = None
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index 2548d1f1..9812562a 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -55,7 +55,7 @@ LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
-distros = ['fedora', 'rhel', 'sles']
+distros = ['fedora', 'opensuse', 'rhel', 'sles']
def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"):
diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
index dfa8cb3d..449872f0 100644
--- a/cloudinit/config/cc_runcmd.py
+++ b/cloudinit/config/cc_runcmd.py
@@ -6,41 +6,70 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-"""
-Runcmd
-------
-**Summary:** run commands
+"""Runcmd: run arbitrary commands at rc.local with output to the console"""
-Run arbitrary commands at a rc.local like level with output to the console.
-Each item can be either a list or a string. If the item is a list, it will be
-properly executed as if passed to ``execve()`` (with the first arg as the
-command). If the item is a string, it will be written to a file and interpreted
-using ``sh``.
-
-.. note::
- all commands must be proper yaml, so you have to quote any characters yaml
- would eat (':' can be problematic)
-
-**Internal name:** ``cc_runcmd``
+from cloudinit.config.schema import (
+ get_schema_doc, validate_cloudconfig_schema)
+from cloudinit.distros import ALL_DISTROS
+from cloudinit.settings import PER_INSTANCE
+from cloudinit import util
-**Module frequency:** per instance
+import os
+from textwrap import dedent
-**Supported distros:** all
-**Config keys**::
+# The schema definition for each cloud-config module is a strict contract for
+# describing supported configuration parameters for each cloud-config section.
+# It allows cloud-config to validate and alert users to invalid or ignored
+# configuration options before actually attempting to deploy with said
+# configuration.
- runcmd:
- - [ ls, -l, / ]
- - [ sh, -xc, "echo $(date) ': hello world!'" ]
- - [ sh, -c, echo "=========hello world'=========" ]
- - ls -l /root
- - [ wget, "http://example.org", -O, /tmp/index.html ]
-"""
+distros = [ALL_DISTROS]
+schema = {
+ 'id': 'cc_runcmd',
+ 'name': 'Runcmd',
+ 'title': 'Run arbitrary commands',
+ 'description': dedent("""\
+ Run arbitrary commands at a rc.local like level with output to the
+ console. Each item can be either a list or a string. If the item is a
+ list, it will be properly executed as if passed to ``execve()`` (with
+ the first arg as the command). If the item is a string, it will be
+ written to a file and interpreted
+ using ``sh``.
-import os
+ .. note::
+ all commands must be proper yaml, so you have to quote any characters
+ yaml would eat (':' can be problematic)"""),
+ 'distros': distros,
+ 'examples': [dedent("""\
+ runcmd:
+ - [ ls, -l, / ]
+ - [ sh, -xc, "echo $(date) ': hello world!'" ]
+ - [ sh, -c, echo "=========hello world'=========" ]
+ - ls -l /root
+ - [ wget, "http://example.org", -O, /tmp/index.html ]
+ """)],
+ 'frequency': PER_INSTANCE,
+ 'type': 'object',
+ 'properties': {
+ 'runcmd': {
+ 'type': 'array',
+ 'items': {
+ 'oneOf': [
+ {'type': 'array', 'items': {'type': 'string'}},
+ {'type': 'string'}]
+ },
+ 'additionalItems': False, # Reject items of non-string non-list
+ 'additionalProperties': False,
+ 'minItems': 1,
+ 'required': [],
+ 'uniqueItems': True
+ }
+ }
+}
-from cloudinit import util
+__doc__ = get_schema_doc(schema) # Supplement python help()
def handle(name, cfg, cloud, log, _args):
@@ -49,6 +78,7 @@ def handle(name, cfg, cloud, log, _args):
" no 'runcmd' key in configuration"), name)
return
+ validate_cloudconfig_schema(cfg, schema)
out_fn = os.path.join(cloud.get_ipath('scripts'), "runcmd")
cmd = cfg["runcmd"]
try:
diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index a9682f19..eecb8178 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -63,11 +63,11 @@ is ``auto``. Options are:
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
+from cloudinit import temp_utils
from cloudinit import util
import glob
import os
-import tempfile
LOG = logging.getLogger(__name__)
@@ -183,7 +183,7 @@ def render_snap_op(op, name, path=None, cfgfile=None, config=None):
# config
# Note, however, we do not touch config files on disk.
nested_cfg = {'config': {shortname: config}}
- (fd, cfg_tmpf) = tempfile.mkstemp()
+ (fd, cfg_tmpf) = temp_utils.mkstemp()
os.write(fd, util.yaml_dumps(nested_cfg).encode())
os.close(fd)
cfgfile = cfg_tmpf
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
index 0066e97f..35d8c57f 100755
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -28,7 +28,7 @@ the keys can be specified, but defaults to ``md5``.
import base64
import hashlib
-from prettytable import PrettyTable
+from cloudinit.simpletable import SimpleTable
from cloudinit.distros import ug_util
from cloudinit import ssh_util
@@ -74,7 +74,7 @@ def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5',
return
tbl_fields = ['Keytype', 'Fingerprint (%s)' % (hash_meth), 'Options',
'Comment']
- tbl = PrettyTable(tbl_fields)
+ tbl = SimpleTable(tbl_fields)
for entry in key_entries:
if _is_printable_key(entry):
row = []
diff --git a/cloudinit/config/cc_ubuntu_init_switch.py b/cloudinit/config/cc_ubuntu_init_switch.py
deleted file mode 100644
index 5dd26901..00000000
--- a/cloudinit/config/cc_ubuntu_init_switch.py
+++ /dev/null
@@ -1,160 +0,0 @@
-# Copyright (C) 2014 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-"""
-Ubuntu Init Switch
-------------------
-**Summary:** reboot system into another init.
-
-This module provides a way for the user to boot with systemd even if the image
-is set to boot with upstart. It should be run as one of the first
-``cloud_init_modules``, and will switch the init system and then issue a
-reboot. The next boot will come up in the target init system and no action
-will be taken. This should be inert on non-ubuntu systems, and also
-exit quickly.
-
-.. note::
- best effort is made, but it's possible this system will break, and probably
- won't interact well with any other mechanism you've used to switch the init
- system.
-
-**Internal name:** ``cc_ubuntu_init_switch``
-
-**Module frequency:** once per instance
-
-**Supported distros:** ubuntu
-
-**Config keys**::
-
- init_switch:
- target: systemd (can be 'systemd' or 'upstart')
- reboot: true (reboot if a change was made, or false to not reboot)
-"""
-
-from cloudinit.distros import ubuntu
-from cloudinit import log as logging
-from cloudinit.settings import PER_INSTANCE
-from cloudinit import util
-
-import os
-import time
-
-frequency = PER_INSTANCE
-REBOOT_CMD = ["/sbin/reboot", "--force"]
-
-DEFAULT_CONFIG = {
- 'init_switch': {'target': None, 'reboot': True}
-}
-
-SWITCH_INIT = """
-#!/bin/sh
-# switch_init: [upstart | systemd]
-
-is_systemd() {
- [ "$(dpkg-divert --listpackage /sbin/init)" = "systemd-sysv" ]
-}
-debug() { echo "$@" 1>&2; }
-fail() { echo "$@" 1>&2; exit 1; }
-
-if [ "$1" = "systemd" ]; then
- if is_systemd; then
- debug "already systemd, nothing to do"
- else
- [ -f /lib/systemd/systemd ] || fail "no systemd available";
- dpkg-divert --package systemd-sysv --divert /sbin/init.diverted \\
- --rename /sbin/init
- fi
- [ -f /sbin/init ] || ln /lib/systemd/systemd /sbin/init
-elif [ "$1" = "upstart" ]; then
- if is_systemd; then
- rm -f /sbin/init
- dpkg-divert --package systemd-sysv --rename --remove /sbin/init
- else
- debug "already upstart, nothing to do."
- fi
-else
- fail "Error. expect 'upstart' or 'systemd'"
-fi
-"""
-
-distros = ['ubuntu']
-
-
-def handle(name, cfg, cloud, log, args):
- """Handler method activated by cloud-init."""
-
- if not isinstance(cloud.distro, ubuntu.Distro):
- log.debug("%s: distro is '%s', not ubuntu. returning",
- name, cloud.distro.__class__)
- return
-
- cfg = util.mergemanydict([cfg, DEFAULT_CONFIG])
- target = cfg['init_switch']['target']
- reboot = cfg['init_switch']['reboot']
-
- if len(args) != 0:
- target = args[0]
- if len(args) > 1:
- reboot = util.is_true(args[1])
-
- if not target:
- log.debug("%s: target=%s. nothing to do", name, target)
- return
-
- if not util.which('dpkg'):
- log.warn("%s: 'dpkg' not available. Assuming not ubuntu", name)
- return
-
- supported = ('upstart', 'systemd')
- if target not in supported:
- log.warn("%s: target set to %s, expected one of: %s",
- name, target, str(supported))
-
- if os.path.exists("/run/systemd/system"):
- current = "systemd"
- else:
- current = "upstart"
-
- if current == target:
- log.debug("%s: current = target = %s. nothing to do", name, target)
- return
-
- try:
- util.subp(['sh', '-s', target], data=SWITCH_INIT)
- except util.ProcessExecutionError as e:
- log.warn("%s: Failed to switch to init '%s'. %s", name, target, e)
- return
-
- if util.is_false(reboot):
- log.info("%s: switched '%s' to '%s'. reboot=false, not rebooting.",
- name, current, target)
- return
-
- try:
- log.warn("%s: switched '%s' to '%s'. rebooting.",
- name, current, target)
- logging.flushLoggers(log)
- _fire_reboot(log, wait_attempts=4, initial_sleep=4)
- except Exception as e:
- util.logexc(log, "Requested reboot did not happen!")
- raise
-
-
-def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2):
- util.subp(REBOOT_CMD)
- start = time.time()
- wait_time = initial_sleep
- for _i in range(0, wait_attempts):
- time.sleep(wait_time)
- wait_time *= backoff
- elapsed = time.time() - start
- log.debug("Rebooted, but still running after %s seconds", int(elapsed))
- # If we got here, not good
- elapsed = time.time() - start
- raise RuntimeError(("Reboot did not happen"
- " after %s seconds!") % (int(elapsed)))
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_zypper_add_repo.py b/cloudinit/config/cc_zypper_add_repo.py
new file mode 100644
index 00000000..aba26952
--- /dev/null
+++ b/cloudinit/config/cc_zypper_add_repo.py
@@ -0,0 +1,218 @@
+#
+# Copyright (C) 2017 SUSE LLC.
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""zypper_add_repo: Add zyper repositories to the system"""
+
+import configobj
+import os
+from six import string_types
+from textwrap import dedent
+
+from cloudinit.config.schema import get_schema_doc
+from cloudinit import log as logging
+from cloudinit.settings import PER_ALWAYS
+from cloudinit import util
+
+distros = ['opensuse', 'sles']
+
+schema = {
+ 'id': 'cc_zypper_add_repo',
+ 'name': 'ZypperAddRepo',
+ 'title': 'Configure zypper behavior and add zypper repositories',
+ 'description': dedent("""\
+ Configure zypper behavior by modifying /etc/zypp/zypp.conf. The
+ configuration writer is "dumb" and will simply append the provided
+ configuration options to the configuration file. Option settings
+ that may be duplicate will be resolved by the way the zypp.conf file
+ is parsed. The file is in INI format.
+ Add repositories to the system. No validation is performed on the
+ repository file entries, it is assumed the user is familiar with
+ the zypper repository file format."""),
+ 'distros': distros,
+ 'examples': [dedent("""\
+ zypper:
+ repos:
+ - id: opensuse-oss
+ name: os-oss
+ baseurl: http://dl.opensuse.org/dist/leap/v/repo/oss/
+ enabled: 1
+ autorefresh: 1
+ - id: opensuse-oss-update
+ name: os-oss-up
+ baseurl: http://dl.opensuse.org/dist/leap/v/update
+ # any setting per
+ # https://en.opensuse.org/openSUSE:Standards_RepoInfo
+ # enable and autorefresh are on by default
+ config:
+ reposdir: /etc/zypp/repos.dir
+ servicesdir: /etc/zypp/services.d
+ download.use_deltarpm: true
+ # any setting in /etc/zypp/zypp.conf
+ """)],
+ 'frequency': PER_ALWAYS,
+ 'type': 'object',
+ 'properties': {
+ 'zypper': {
+ 'type': 'object',
+ 'properties': {
+ 'repos': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'id': {
+ 'type': 'string',
+ 'description': dedent("""\
+ The unique id of the repo, used when
+ writing
+ /etc/zypp/repos.d/<id>.repo.""")
+ },
+ 'baseurl': {
+ 'type': 'string',
+ 'format': 'uri', # built-in format type
+ 'description': 'The base repositoy URL'
+ }
+ },
+ 'required': ['id', 'baseurl'],
+ 'additionalProperties': True
+ },
+ 'minItems': 1
+ },
+ 'config': {
+ 'type': 'object',
+ 'description': dedent("""\
+ Any supported zypo.conf key is written to
+ /etc/zypp/zypp.conf'""")
+ }
+ },
+ 'required': [],
+ 'minProperties': 1, # Either config or repo must be provided
+ 'additionalProperties': False, # only repos and config allowed
+ }
+ }
+}
+
+__doc__ = get_schema_doc(schema) # Supplement python help()
+
+LOG = logging.getLogger(__name__)
+
+
+def _canonicalize_id(repo_id):
+ repo_id = repo_id.replace(" ", "_")
+ return repo_id
+
+
+def _format_repo_value(val):
+ if isinstance(val, bool):
+ # zypp prefers 1/0
+ return 1 if val else 0
+ if isinstance(val, (list, tuple)):
+ return "\n ".join([_format_repo_value(v) for v in val])
+ if not isinstance(val, string_types):
+ return str(val)
+ return val
+
+
+def _format_repository_config(repo_id, repo_config):
+ to_be = configobj.ConfigObj()
+ to_be[repo_id] = {}
+ # Do basic translation of the items -> values
+ for (k, v) in repo_config.items():
+ # For now assume that people using this know the format
+ # of zypper repos and don't verify keys/values further
+ to_be[repo_id][k] = _format_repo_value(v)
+ lines = to_be.write()
+ return "\n".join(lines)
+
+
+def _write_repos(repos, repo_base_path):
+ """Write the user-provided repo definition files
+ @param repos: A list of repo dictionary objects provided by the user's
+ cloud config.
+ @param repo_base_path: The directory path to which repo definitions are
+ written.
+ """
+
+ if not repos:
+ return
+ valid_repos = {}
+ for index, user_repo_config in enumerate(repos):
+ # Skip on absent required keys
+ missing_keys = set(['id', 'baseurl']).difference(set(user_repo_config))
+ if missing_keys:
+ LOG.warning(
+ "Repo config at index %d is missing required config keys: %s",
+ index, ",".join(missing_keys))
+ continue
+ repo_id = user_repo_config.get('id')
+ canon_repo_id = _canonicalize_id(repo_id)
+ repo_fn_pth = os.path.join(repo_base_path, "%s.repo" % (canon_repo_id))
+ if os.path.exists(repo_fn_pth):
+ LOG.info("Skipping repo %s, file %s already exists!",
+ repo_id, repo_fn_pth)
+ continue
+ elif repo_id in valid_repos:
+ LOG.info("Skipping repo %s, file %s already pending!",
+ repo_id, repo_fn_pth)
+ continue
+
+ # Do some basic key formatting
+ repo_config = dict(
+ (k.lower().strip().replace("-", "_"), v)
+ for k, v in user_repo_config.items()
+ if k and k != 'id')
+
+ # Set defaults if not present
+ for field in ['enabled', 'autorefresh']:
+ if field not in repo_config:
+ repo_config[field] = '1'
+
+ valid_repos[repo_id] = (repo_fn_pth, repo_config)
+
+ for (repo_id, repo_data) in valid_repos.items():
+ repo_blob = _format_repository_config(repo_id, repo_data[-1])
+ util.write_file(repo_data[0], repo_blob)
+
+
+def _write_zypp_config(zypper_config):
+ """Write to the default zypp configuration file /etc/zypp/zypp.conf"""
+ if not zypper_config:
+ return
+ zypp_config = '/etc/zypp/zypp.conf'
+ zypp_conf_content = util.load_file(zypp_config)
+ new_settings = ['# Added via cloud.cfg']
+ for setting, value in zypper_config.items():
+ if setting == 'configdir':
+ msg = 'Changing the location of the zypper configuration is '
+ msg += 'not supported, skipping "configdir" setting'
+ LOG.warning(msg)
+ continue
+ if value:
+ new_settings.append('%s=%s' % (setting, value))
+ if len(new_settings) > 1:
+ new_config = zypp_conf_content + '\n'.join(new_settings)
+ else:
+ new_config = zypp_conf_content
+ util.write_file(zypp_config, new_config)
+
+
+def handle(name, cfg, _cloud, log, _args):
+ zypper_section = cfg.get('zypper')
+ if not zypper_section:
+ LOG.debug(("Skipping module named %s,"
+ " no 'zypper' relevant configuration found"), name)
+ return
+ repos = zypper_section.get('repos')
+ if not repos:
+ LOG.debug(("Skipping module named %s,"
+ " no 'repos' configuration found"), name)
+ return
+ zypper_config = zypper_section.get('config', {})
+ repo_base_path = zypper_config.get('reposdir', '/etc/zypp/repos.d/')
+
+ _write_zypp_config(zypper_config)
+ _write_repos(repos, repo_base_path)
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py
index 6400f005..bb291ff8 100644
--- a/cloudinit/config/schema.py
+++ b/cloudinit/config/schema.py
@@ -3,19 +3,24 @@
from __future__ import print_function
-from cloudinit.util import read_file_or_url
+from cloudinit import importer
+from cloudinit.util import find_modules, read_file_or_url
import argparse
+from collections import defaultdict
+from copy import deepcopy
import logging
import os
+import re
import sys
import yaml
+_YAML_MAP = {True: 'true', False: 'false', None: 'null'}
SCHEMA_UNDEFINED = b'UNDEFINED'
CLOUD_CONFIG_HEADER = b'#cloud-config'
SCHEMA_DOC_TMPL = """
{name}
----
+{title_underbar}
**Summary:** {title}
{description}
@@ -31,6 +36,8 @@ SCHEMA_DOC_TMPL = """
{examples}
"""
SCHEMA_PROPERTY_TMPL = '{prefix}**{prop_name}:** ({type}) {description}'
+SCHEMA_EXAMPLES_HEADER = '\n**Examples**::\n\n'
+SCHEMA_EXAMPLES_SPACER_TEMPLATE = '\n # --- Example{0} ---'
class SchemaValidationError(ValueError):
@@ -83,11 +90,49 @@ def validate_cloudconfig_schema(config, schema, strict=False):
logging.warning('Invalid config:\n%s', '\n'.join(messages))
-def validate_cloudconfig_file(config_path, schema):
+def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
+ """Return contents of the cloud-config file annotated with schema errors.
+
+ @param cloudconfig: YAML-loaded object from the original_content.
+ @param original_content: The contents of a cloud-config file
+ @param schema_errors: List of tuples from a JSONSchemaValidationError. The
+ tuples consist of (schemapath, error_message).
+ """
+ if not schema_errors:
+ return original_content
+ schemapaths = _schemapath_for_cloudconfig(cloudconfig, original_content)
+ errors_by_line = defaultdict(list)
+ error_count = 1
+ error_footer = []
+ annotated_content = []
+ for path, msg in schema_errors:
+ errors_by_line[schemapaths[path]].append(msg)
+ error_footer.append('# E{0}: {1}'.format(error_count, msg))
+ error_count += 1
+ lines = original_content.decode().split('\n')
+ error_count = 1
+ for line_number, line in enumerate(lines):
+ errors = errors_by_line[line_number + 1]
+ if errors:
+ error_label = ','.join(
+ ['E{0}'.format(count + error_count)
+ for count in range(0, len(errors))])
+ error_count += len(errors)
+ annotated_content.append(line + '\t\t# ' + error_label)
+ else:
+ annotated_content.append(line)
+ annotated_content.append(
+ '# Errors: -------------\n{0}\n\n'.format('\n'.join(error_footer)))
+ return '\n'.join(annotated_content)
+
+
+def validate_cloudconfig_file(config_path, schema, annotate=False):
"""Validate cloudconfig file adheres to a specific jsonschema.
@param config_path: Path to the yaml cloud-config file to parse.
@param schema: Dict describing a valid jsonschema to validate against.
+ @param annotate: Boolean set True to print original config file with error
+ annotations on the offending lines.
@raises SchemaValidationError containing any of schema_errors encountered.
@raises RuntimeError when config_path does not exist.
@@ -108,18 +153,83 @@ def validate_cloudconfig_file(config_path, schema):
('format', 'File {0} is not valid yaml. {1}'.format(
config_path, str(e))),)
raise SchemaValidationError(errors)
- validate_cloudconfig_schema(
- cloudconfig, schema, strict=True)
+
+ try:
+ validate_cloudconfig_schema(
+ cloudconfig, schema, strict=True)
+ except SchemaValidationError as e:
+ if annotate:
+ print(annotated_cloudconfig_file(
+ cloudconfig, content, e.schema_errors))
+ raise
+
+
+def _schemapath_for_cloudconfig(config, original_content):
+ """Return a dictionary mapping schemapath to original_content line number.
+
+ @param config: The yaml.loaded config dictionary of a cloud-config file.
+ @param original_content: The simple file content of the cloud-config file
+ """
+ # FIXME Doesn't handle multi-line lists or multi-line strings
+ content_lines = original_content.decode().split('\n')
+ schema_line_numbers = {}
+ list_index = 0
+ RE_YAML_INDENT = r'^(\s*)'
+ scopes = []
+ for line_number, line in enumerate(content_lines):
+ indent_depth = len(re.match(RE_YAML_INDENT, line).groups()[0])
+ line = line.strip()
+ if not line or line.startswith('#'):
+ continue
+ if scopes:
+ previous_depth, path_prefix = scopes[-1]
+ else:
+ previous_depth = -1
+ path_prefix = ''
+ if line.startswith('- '):
+ key = str(list_index)
+ value = line[1:]
+ list_index += 1
+ else:
+ list_index = 0
+ key, value = line.split(':', 1)
+ while indent_depth <= previous_depth:
+ if scopes:
+ previous_depth, path_prefix = scopes.pop()
+ else:
+ previous_depth = -1
+ path_prefix = ''
+ if path_prefix:
+ key = path_prefix + '.' + key
+ scopes.append((indent_depth, key))
+ if value:
+ value = value.strip()
+ if value.startswith('['):
+ scopes.append((indent_depth + 2, key + '.0'))
+ for inner_list_index in range(0, len(yaml.safe_load(value))):
+ list_key = key + '.' + str(inner_list_index)
+ schema_line_numbers[list_key] = line_number + 1
+ schema_line_numbers[key] = line_number + 1
+ return schema_line_numbers
def _get_property_type(property_dict):
"""Return a string representing a property type from a given jsonschema."""
property_type = property_dict.get('type', SCHEMA_UNDEFINED)
+ if property_type == SCHEMA_UNDEFINED and property_dict.get('enum'):
+ property_type = [
+ str(_YAML_MAP.get(k, k)) for k in property_dict['enum']]
if isinstance(property_type, list):
property_type = '/'.join(property_type)
- item_type = property_dict.get('items', {}).get('type')
- if item_type:
- property_type = '{0} of {1}'.format(property_type, item_type)
+ items = property_dict.get('items', {})
+ sub_property_type = items.get('type', '')
+ # Collect each item type
+ for sub_item in items.get('oneOf', {}):
+ if sub_property_type:
+ sub_property_type += '/'
+ sub_property_type += '(' + _get_property_type(sub_item) + ')'
+ if sub_property_type:
+ return '{0} of {1}'.format(property_type, sub_property_type)
return property_type
@@ -146,12 +256,14 @@ def _get_schema_examples(schema, prefix=''):
examples = schema.get('examples')
if not examples:
return ''
- rst_content = '\n**Examples**::\n\n'
- for example in examples:
- example_yaml = yaml.dump(example, default_flow_style=False)
+ rst_content = SCHEMA_EXAMPLES_HEADER
+ for count, example in enumerate(examples):
# Python2.6 is missing textwrapper.indent
- lines = example_yaml.split('\n')
+ lines = example.split('\n')
indented_lines = [' {0}'.format(line) for line in lines]
+ if rst_content != SCHEMA_EXAMPLES_HEADER:
+ indented_lines.insert(
+ 0, SCHEMA_EXAMPLES_SPACER_TEMPLATE.format(count + 1))
rst_content += '\n'.join(indented_lines)
return rst_content
@@ -162,61 +274,87 @@ def get_schema_doc(schema):
@param schema: Dict of jsonschema to render.
@raise KeyError: If schema lacks an expected key.
"""
- schema['property_doc'] = _get_property_doc(schema)
- schema['examples'] = _get_schema_examples(schema)
- schema['distros'] = ', '.join(schema['distros'])
- return SCHEMA_DOC_TMPL.format(**schema)
-
-
-def get_schema(section_key=None):
- """Return a dict of jsonschema defined in any cc_* module.
-
- @param: section_key: Optionally limit schema to a specific top-level key.
- """
- # TODO use util.find_modules in subsequent branch
- from cloudinit.config.cc_ntp import schema
- return schema
+ schema_copy = deepcopy(schema)
+ schema_copy['property_doc'] = _get_property_doc(schema)
+ schema_copy['examples'] = _get_schema_examples(schema)
+ schema_copy['distros'] = ', '.join(schema['distros'])
+ # Need an underbar of the same length as the name
+ schema_copy['title_underbar'] = re.sub(r'.', '-', schema['name'])
+ return SCHEMA_DOC_TMPL.format(**schema_copy)
+
+
+FULL_SCHEMA = None
+
+
+def get_schema():
+ """Return jsonschema coalesced from all cc_* cloud-config module."""
+ global FULL_SCHEMA
+ if FULL_SCHEMA:
+ return FULL_SCHEMA
+ full_schema = {
+ '$schema': 'http://json-schema.org/draft-04/schema#',
+ 'id': 'cloud-config-schema', 'allOf': []}
+
+ configs_dir = os.path.dirname(os.path.abspath(__file__))
+ potential_handlers = find_modules(configs_dir)
+ for (fname, mod_name) in potential_handlers.items():
+ mod_locs, looked_locs = importer.find_module(
+ mod_name, ['cloudinit.config'], ['schema'])
+ if mod_locs:
+ mod = importer.import_module(mod_locs[0])
+ full_schema['allOf'].append(mod.schema)
+ FULL_SCHEMA = full_schema
+ return full_schema
def error(message):
print(message, file=sys.stderr)
- return 1
+ sys.exit(1)
-def get_parser():
+def get_parser(parser=None):
"""Return a parser for supported cmdline arguments."""
- parser = argparse.ArgumentParser()
+ if not parser:
+ parser = argparse.ArgumentParser(
+ prog='cloudconfig-schema',
+ description='Validate cloud-config files or document schema')
parser.add_argument('-c', '--config-file',
help='Path of the cloud-config yaml file to validate')
parser.add_argument('-d', '--doc', action="store_true", default=False,
help='Print schema documentation')
- parser.add_argument('-k', '--key',
- help='Limit validation or docs to a section key')
+ parser.add_argument('--annotate', action="store_true", default=False,
+ help='Annotate existing cloud-config file with errors')
return parser
-def main():
- """Tool to validate schema of a cloud-config file or print schema docs."""
- parser = get_parser()
- args = parser.parse_args()
+def handle_schema_args(name, args):
+ """Handle provided schema args and perform the appropriate actions."""
exclusive_args = [args.config_file, args.doc]
if not any(exclusive_args) or all(exclusive_args):
- return error('Expected either --config-file argument or --doc')
-
- schema = get_schema()
+ error('Expected either --config-file argument or --doc')
+ full_schema = get_schema()
if args.config_file:
try:
- validate_cloudconfig_file(args.config_file, schema)
+ validate_cloudconfig_file(
+ args.config_file, full_schema, args.annotate)
except (SchemaValidationError, RuntimeError) as e:
- return error(str(e))
- print("Valid cloud-config file {0}".format(args.config_file))
+ if not args.annotate:
+ error(str(e))
+ else:
+ print("Valid cloud-config file {0}".format(args.config_file))
if args.doc:
- print(get_schema_doc(schema))
+ for subschema in full_schema['allOf']:
+ print(get_schema_doc(subschema))
+
+
+def main():
+ """Tool to validate schema of a cloud-config file or print schema docs."""
+ parser = get_parser()
+ handle_schema_args('cloudconfig-schema', parser.parse_args())
return 0
if __name__ == '__main__':
sys.exit(main())
-
# vi: ts=4 expandtab