summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--HACKING.rst85
-rw-r--r--cloudinit/config/cc_apt_configure.py207
-rw-r--r--cloudinit/config/cc_apt_pipelining.py25
-rw-r--r--cloudinit/config/cc_bootcmd.py28
-rwxr-xr-x[-rw-r--r--]cloudinit/config/cc_byobu.py40
-rw-r--r--cloudinit/config/cc_ca_certs.py32
-rw-r--r--cloudinit/config/cc_chef.py12
-rw-r--r--cloudinit/config/cc_debug.py26
-rw-r--r--cloudinit/config/cc_disable_ec2_metadata.py20
-rw-r--r--cloudinit/config/cc_disk_setup.py105
-rw-r--r--cloudinit/config/cc_emit_upstart.py15
-rw-r--r--cloudinit/config/cc_fan.py49
-rw-r--r--cloudinit/config/cc_final_message.py25
-rw-r--r--cloudinit/config/cc_foo.py16
-rw-r--r--cloudinit/config/cc_growpart.py57
-rw-r--r--cloudinit/config/cc_grub_dpkg.py34
-rw-r--r--cloudinit/config/cc_keys_to_console.py24
-rw-r--r--cloudinit/config/cc_landscape.py49
-rw-r--r--cloudinit/config/cc_locale.py20
-rw-r--r--cloudinit/config/cc_lxd.py173
-rw-r--r--cloudinit/config/cc_mcollective.py41
-rw-r--r--cloudinit/config/cc_migrator.py22
-rw-r--r--cloudinit/config/cc_mounts.py52
-rw-r--r--cloudinit/config/cc_ntp.py32
-rw-r--r--cloudinit/config/cc_package_update_upgrade_install.py35
-rw-r--r--cloudinit/config/cc_phone_home.py34
-rw-r--r--cloudinit/config/cc_power_state_change.py45
-rw-r--r--cloudinit/config/cc_puppet.py45
-rw-r--r--cloudinit/config/cc_resizefs.py26
-rw-r--r--cloudinit/config/cc_resolv_conf.py69
-rw-r--r--cloudinit/config/cc_rh_subscription.py34
-rw-r--r--cloudinit/config/cc_rightscale_userdata.py26
-rw-r--r--cloudinit/config/cc_rsyslog.py251
-rw-r--r--cloudinit/config/cc_runcmd.py32
-rw-r--r--cloudinit/config/cc_salt_minion.py33
-rw-r--r--cloudinit/config/cc_scripts_per_boot.py16
-rw-r--r--cloudinit/config/cc_scripts_per_instance.py16
-rw-r--r--cloudinit/config/cc_scripts_per_once.py16
-rw-r--r--cloudinit/config/cc_scripts_user.py19
-rw-r--r--cloudinit/config/cc_scripts_vendor.py22
-rw-r--r--cloudinit/config/cc_seed_random.py52
-rw-r--r--cloudinit/config/cc_set_hostname.py26
-rwxr-xr-x[-rw-r--r--]cloudinit/config/cc_set_passwords.py56
-rw-r--r--cloudinit/config/cc_snap_config.py184
-rw-r--r--cloudinit/config/cc_snappy.py131
-rw-r--r--cloudinit/config/cc_spacewalk.py25
-rwxr-xr-x[-rw-r--r--]cloudinit/config/cc_ssh.py97
-rwxr-xr-x[-rw-r--r--]cloudinit/config/cc_ssh_authkey_fingerprints.py29
-rwxr-xr-x[-rw-r--r--]cloudinit/config/cc_ssh_import_id.py30
-rw-r--r--cloudinit/config/cc_timezone.py20
-rw-r--r--cloudinit/config/cc_ubuntu_init_switch.py32
-rw-r--r--cloudinit/config/cc_update_etc_hosts.py43
-rw-r--r--cloudinit/config/cc_update_hostname.py25
-rw-r--r--cloudinit/config/cc_users_groups.py84
-rw-r--r--cloudinit/config/cc_write_files.py42
-rw-r--r--cloudinit/config/cc_yum_add_repo.py26
-rwxr-xr-x[-rw-r--r--]cloudinit/distros/__init__.py304
-rw-r--r--cloudinit/distros/rhel.py1
-rwxr-xr-xcloudinit/distros/ug_util.py299
-rw-r--r--cloudinit/net/cmdline.py22
-rw-r--r--cloudinit/settings.py1
-rw-r--r--cloudinit/sources/DataSourceAliYun.py49
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py6
-rw-r--r--cloudinit/sources/DataSourceAzure.py7
-rw-r--r--cloudinit/sources/DataSourceDigitalOcean.py101
-rw-r--r--cloudinit/sources/DataSourceEc2.py18
-rw-r--r--cloudinit/sources/DataSourceMAAS.py12
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py34
-rw-r--r--cloudinit/sources/helpers/azure.py2
-rw-r--r--cloudinit/sources/helpers/digitalocean.py218
-rw-r--r--cloudinit/util.py34
-rw-r--r--config/cloud.cfg7
-rw-r--r--doc/examples/cloud-config-seed-random.txt2
-rw-r--r--doc/examples/cloud-config-user-groups.txt8
-rw-r--r--doc/rtd/conf.py2
-rw-r--r--doc/rtd/index.rst1
-rw-r--r--doc/rtd/topics/datasources.rst48
-rw-r--r--doc/rtd/topics/dir_layout.rst4
-rw-r--r--doc/rtd/topics/examples.rst52
-rw-r--r--doc/rtd/topics/format.rst12
-rw-r--r--doc/rtd/topics/logging.rst175
-rw-r--r--doc/rtd/topics/modules.rst297
-rw-r--r--doc/rtd/topics/moreinfo.rst6
-rw-r--r--doc/sources/altcloud/README.rst8
-rw-r--r--doc/sources/configdrive/README.rst4
-rwxr-xr-xpackages/bddeb1
-rw-r--r--systemd/cloud-init-local.service4
-rw-r--r--systemd/cloud-init.service17
-rw-r--r--sysvinit/gentoo/cloud-init1
-rw-r--r--sysvinit/gentoo/cloud-init-local2
-rw-r--r--test-requirements.txt1
-rw-r--r--tests/unittests/__init__.py9
-rw-r--r--tests/unittests/helpers.py8
-rw-r--r--tests/unittests/test_data.py45
-rw-r--r--tests/unittests/test_datasource/test_aliyun.py148
-rw-r--r--tests/unittests/test_datasource/test_digitalocean.py338
-rw-r--r--tests/unittests/test_datasource/test_opennebula.py23
-rwxr-xr-x[-rw-r--r--]tests/unittests/test_distros/test_user_data_normalize.py70
-rw-r--r--tests/unittests/test_handler/test_handler_apt_conf_v1.py2
-rw-r--r--tests/unittests/test_handler/test_handler_growpart.py4
-rw-r--r--tests/unittests/test_handler/test_handler_lxd.py51
-rw-r--r--tests/unittests/test_handler/test_handler_snappy.py293
-rw-r--r--tests/unittests/test_net.py43
-rw-r--r--tests/unittests/test_util.py42
-rw-r--r--tox.ini52
106 files changed, 4517 insertions, 1182 deletions
diff --git a/.gitignore b/.gitignore
index 77eb9c74..865cac15 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,3 +4,4 @@ dist
*.pyc
__pycache__
.tox
+.coverage
diff --git a/HACKING.rst b/HACKING.rst
index 63a5bde0..4072d0fd 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -3,28 +3,35 @@ Hacking on cloud-init
=====================
This document describes how to contribute changes to cloud-init.
+It assumes you have a `Launchpad`_ account, and refers to your launchpad user
+as ``LP_USER`` throughout.
Do these things once
--------------------
-* If you have not already, be sure to sign the CCA:
+* To contribute, you must sign the Canonical `contributor license agreement`_
- - `Canonical Contributor Agreement`_
+ If you have already signed it as an individual, your Launchpad user will be listed in the `contributor-agreement-canonical`_ group. Unfortunately there is no easy way to check if an organization or company you are doing work for has signed. If you are unsure or have questions, email `Scott Moser <mailto:scott.moser@canonical.com>`_ or ping smoser in ``#cloud-init`` channel via freenode.
-* Clone the `LaunchPad`_ repository:
+* Clone the upstream `repository`_ on Launchpad::
- git clone YOUR_USERNAME@git.launchpad.net:cloud-init
+ git clone https://git.launchpad.net/cloud-init
cd cloud-init
- If you would prefer a bzr style `git clone lp:cloud-init`, see
- the `Instructions on LaunchPad`_ for more information.
+ There is more information on Launchpad as a git hosting site in
+ `Launchpad git documentation`_.
-* Create a new remote pointing to your personal LaunchPad
- repository::
+* Create a new remote pointing to your personal Launchpad repository
- git remote add YOUR_USERNAME YOUR_USERNAME@git.launchpad.net:~YOUR_USERNAME/cloud-init
+ This is equivalent to 'fork' on github::
-.. _Canonical Contributor Agreement: http://www.canonical.com/contributors
+ git remote add LP_USER git+ssh://LP_USER@git.launchpad.net/~LP_USER/cloud-init
+ git push LP_USER master
+
+.. _repository: https://git.launchpad.net/cloud-init
+.. _contributor license agreement: http://www.canonical.com/contributors
+.. _contributor-agreement-canonical: https://launchpad.net/%7Econtributor-agreement-canonical/+members
+.. _Launchpad git documentation: https://help.launchpad.net/Code/Git
Do these things for each feature or bug
---------------------------------------
@@ -33,40 +40,60 @@ Do these things for each feature or bug
git checkout -b my-topic-branch
-.. _Instructions on launchpad: https://help.launchpad.net/Code/Git
-
* Make and commit your changes (note, you can make multiple commits,
fixes, more commits.)::
git commit
-* Check pep8 and test, and address any issues::
+* Run unit tests and lint/formatting checks with `tox`_::
- make test pep8
+ tox
-* Push your changes to your personal LaunchPad repository::
+* Push your changes to your personal Launchpad repository::
- git push -u YOUR_USERNAME my-topic-branch
+ git push -u LP_USER my-topic-branch
* Use your browser to create a merge request:
- - Open the branch on `LaunchPad`_
+ - Open the branch on Launchpad.
+
+ - You can see a web view of your repository and navigate to the branch at:
+
+ ``https://code.launchpad.net/~LP_USER/cloud-init/``
+
+ - It will typically be at:
+
+ ``https://code.launchpad.net/~LP_USER/cloud-init/+git/cloud-init/+ref/BRANCHNAME``
+
+ for example, here is larsks move-to-git branch: https://code.launchpad.net/~larsks/cloud-init/+git/cloud-init/+ref/feature/move-to-git
+
+ - Click 'Propose for merging'
+ - Select 'lp:cloud-init' as the target repository
+ - Type '``master``' as the Target reference path
+ - Click 'Propose Merge'
+ - On the next page, hit 'Set commit message' and type a git combined git style commit message like::
+
+ Activate the frobnicator.
+
+ The frobnicator was previously inactive and now runs by default.
+ This may save the world some day. Then, list the bugs you fixed
+ as footers with syntax as shown here.
+
+ The commit message should be one summary line of less than
+ 74 characters followed by a blank line, and then one or more
+ paragraphs describing the change and why it was needed.
- - It will typically be at
- ``https://code.launchpad.net/~YOUR_USERNAME/cloud-init/+git/cloud-init/+ref/BRANCHNAME``
- for example
- https://code.launchpad.net/~larsks/cloud-init/+git/cloud-init/+ref/feature/move-to-git
+ This is the message that will be used on the commit when it
+ is sqaushed and merged into trunk.
- - Click 'Propose for merging`
- - Select ``cloud-init`` as the target repository
- - Select ``master`` as the target reference path
+ LP: #1
-Then, someone on cloud-init-dev (currently `Scott Moser`_ and `Joshua
-Harlow`_) will review your changes and follow up in the merge request.
+Then, someone in the `cloud-init-dev`_ group will review your changes and
+follow up in the merge request.
-Feel free to ping and/or join ``#cloud-init`` on freenode (irc) if you
+Feel free to ping and/or join ``#cloud-init`` on freenode irc if you
have any questions.
+.. _tox: https://tox.readthedocs.io/en/latest/
.. _Launchpad: https://launchpad.net
-.. _Scott Moser: https://launchpad.net/~smoser
-.. _Joshua Harlow: https://launchpad.net/~harlowja
+.. _cloud-init-dev: https://launchpad.net/~cloud-init-dev/+members#active
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index fa9505a7..6145fcd2 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -18,6 +18,213 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Apt Configure
+-------------
+**Summary:** configure apt
+
+This module handles both configuration of apt options and adding source lists.
+There are configuration options such as ``apt_get_wrapper`` and
+``apt_get_command`` that control how cloud-init invokes apt-get.
+These configuration options are handled on a per-distro basis, so consult
+documentation for cloud-init's distro support for instructions on using
+these config options.
+
+.. note::
+ To ensure that apt configuration is valid yaml, any strings containing
+ special characters, especially ``:`` should be quoted.
+
+.. note::
+ For more information about apt configuration, see the
+ ``Additional apt configuration`` example.
+
+**Preserve sources.list:**
+
+By default, cloud-init will generate a new sources list in
+``/etc/apt/sources.list.d`` based on any changes specified in cloud config.
+To disable this behavior and preserve the sources list from the pristine image,
+set ``preserve_sources_list`` to ``true``.
+
+.. note::
+ The ``preserve_sources_list`` option overrides all other config keys that
+ would alter ``sources.list`` or ``sources.list.d``, **except** for
+ additional sources to be added to ``sources.list.d``.
+
+**Disable source suites:**
+
+Entries in the sources list can be disabled using ``disable_suites``, which
+takes a list of suites to be disabled. If the string ``$RELEASE`` is present in
+a suite in the ``disable_suites`` list, it will be replaced with the release
+name. If a suite specified in ``disable_suites`` is not present in
+``sources.list`` it will be ignored. For convenience, several aliases are
+provided for ``disable_suites``:
+
+ - ``updates`` => ``$RELEASE-updates``
+ - ``backports`` => ``$RELEASE-backports``
+ - ``security`` => ``$RELEASE-security``
+ - ``proposed`` => ``$RELEASE-proposed``
+ - ``release`` => ``$RELEASE``
+
+.. note::
+ When a suite is disabled using ``disable_suites``, its entry in
+ ``sources.list`` is not deleted; it is just commented out.
+
+**Configure primary and security mirrors:**
+
+The primary and security archive mirrors can be specified using the ``primary``
+and ``security`` keys, respectively. Both the ``primary`` and ``security`` keys
+take a list of configs, allowing mirrors to be specified on a per-architecture
+basis. Each config is a dictionary which must have an entry for ``arches``,
+specifying which architectures that config entry is for. The keyword
+``default`` applies to any architecture not explicitly listed. The mirror url
+can be specified with the ``url`` key, or a list of mirrors to check can be
+provided in order, with the first mirror that can be resolved being selected.
+This allows the same configuration to be used in different environment, with
+different hosts used for a local apt mirror. If no mirror is provided by uri or
+search, ``search_dns`` may be used to search for dns names in the format
+``<distro>-mirror`` in each of the following:
+
+ - fqdn of this host per cloud metadata
+ - localdomain
+ - domains listed in ``/etc/resolv.conf``
+
+If there is a dns entry for ``<distro>-mirror``, then it is assumed that there
+is a distro mirror at ``http://<distro>-mirror.<domain>/<distro>``. If the
+``primary`` key is defined, but not the ``security`` key, then then
+configuration for ``primary`` is also used for ``security``. If ``search_dns``
+is used for the ``security`` key, the search pattern will be.
+``<distro>-security-mirror``.
+
+If no mirrors are specified, or all lookups fail, then default mirrors defined
+in the datasource are used. If none are present in the datasource either the
+following defaults are used:
+
+ - primary: ``http://archive.ubuntu.com/ubuntu``
+ - security: ``http://security.ubuntu.com/ubuntu``
+
+**Specify sources.list template:**
+
+A custom template for rendering ``sources.list`` can be specefied with
+``sources_list``. If no ``sources_list`` template is given, cloud-init will
+use sane default. Within this template, the following strings will be replaced
+with the appropriate values:
+
+ - ``$MIRROR``
+ - ``$RELEASE``
+ - ``$PRIMARY``
+ - ``$SECURITY``
+
+**Pass configuration to apt:**
+
+Apt configuration can be specified using ``conf``. Configuration is specified
+as a string. For multiline apt configuration, make sure to follow yaml syntax.
+
+**Configure apt proxy:**
+
+Proxy configuration for apt can be specified using ``conf``, but proxy config
+keys also exist for convenience. The proxy config keys, ``http_proxy``,
+``ftp_proxy``, and ``https_proxy`` may be used to specify a proxy for http, ftp
+and https protocols respectively. The ``proxy`` key also exists as an alias for
+``http_proxy``. Proxy url is specified in the format
+``<protocol>://[[user][:pass]@]host[:port]/``.
+
+**Add apt repos by regex:**
+
+All source entries in ``apt-sources`` that match regex in
+``add_apt_repo_match`` will be added to the system using
+``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults
+to ``^[\w-]+:\w``
+
+**Add source list entries:**
+
+Source list entries can be specified as a dictionary under the ``sources``
+config key, with key in the dict representing a different source file. The key
+The key of each source entry will be used as an id that can be referenced in
+other config entries, as well as the filename for the source's configuration
+under ``/etc/apt/sources.list.d``. If the name does not end with ``.list``,
+it will be appended. If there is no configuration for a key in ``sources``, no
+file will be written, but the key may still be referred to as an id in other
+``sources`` entries.
+
+Each entry under ``sources`` is a dictionary which may contain any of the
+following optional keys:
+
+ - ``source``: a sources.list entry (some variable replacements apply)
+ - ``keyid``: a key to import via shortid or fingerprint
+ - ``key``: a raw PGP key
+ - ``keyserver``: alternate keyserver to pull ``keyid`` key from
+
+The ``source`` key supports variable replacements for the following strings:
+
+ - ``$MIRROR``
+ - ``$PRIMARY``
+ - ``$SECURITY``
+ - ``$RELEASE``
+
+**Internal name:** ``cc_apt_configure``
+
+**Module frequency:** per instance
+
+**Supported distros:** ubuntu, debian
+
+**Config keys**::
+
+ apt:
+ preserve_sources_list: <true/false>
+ disable_suites:
+ - $RELEASE-updates
+ - backports
+ - $RELEASE
+ - mysuite
+ primary:
+ - arches:
+ - amd64
+ - i386
+ - default
+ uri: "http://us.archive.ubuntu.com/ubuntu"
+ search:
+ - "http://cool.but-sometimes-unreachable.com/ubuntu"
+ - "http://us.archive.ubuntu.com/ubuntu"
+ search_dns: <true/false>
+ - arches:
+ - s390x
+ - arm64
+ uri: "http://archive-to-use-for-arm64.example.com/ubuntu"
+ security:
+ - arches:
+ - default
+ search_dns: true
+ sources_list: |
+ deb $MIRROR $RELEASE main restricted
+ deb-src $MIRROR $RELEASE main restricted
+ deb $PRIMARY $RELEASE universe restricted
+ deb $SECURITY $RELEASE-security multiverse
+ conf: |
+ APT {
+ Get {
+ Assume-Yes "true";
+ Fix-Broken "true";
+ }
+ }
+ proxy: "http://[[user][:pass]@]host[:port]/"
+ http_proxy: "http://[[user][:pass]@]host[:port]/"
+ ftp_proxy: "ftp://[[user][:pass]@]host[:port]/"
+ https_proxy: "https://[[user][:pass]@]host[:port]/"
+ sources:
+ source1:
+ keyid: "keyid"
+ keyserver: "keyserverurl"
+ source: "deb http://<url>/ xenial main"
+ source2:
+ source "ppa:<ppa-name>"
+ source3:
+ source "deb $MIRROR $RELEASE multiverse"
+ key: |
+ ------BEGIN PGP PUBLIC KEY BLOCK-------
+ <key data>
+ ------END PGP PUBLIC KEY BLOCK-------
+"""
+
import glob
import os
import re
diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py
index 40c32c84..ab9d0054 100644
--- a/cloudinit/config/cc_apt_pipelining.py
+++ b/cloudinit/config/cc_apt_pipelining.py
@@ -16,6 +16,31 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Apt Pipelining
+--------------
+**Summary:** configure apt pipelining
+
+This module configures apt's ``Acquite::http::Pipeline-Depth`` option, whcih
+controls how apt handles HTTP pipelining. It may be useful for pipelining to be
+disabled, because some web servers, such as S3 do not pipeline properly (LP:
+#948461). The ``apt_pipelining`` config key may be set to ``false`` to disable
+pipelining altogether. This is the default behavior. If it is set to ``none``,
+``unchanged``, or ``os``, no change will be made to apt configuration and the
+default setting for the distro will be used. The pipeline depth can also be
+manually specified by setting ``apt_pipelining`` to a number. However, this is
+not recommended.
+
+**Internal name:** ``cc_apt_pipelining``
+
+**Module frequency:** per instance
+
+**Supported distros:** ubuntu, debian
+
+**Config keys**::
+ apt_pipelining: <false/none/unchanged/os/number>
+"""
+
from cloudinit.settings import PER_INSTANCE
from cloudinit import util
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
index b763a3c3..22b23f28 100644
--- a/cloudinit/config/cc_bootcmd.py
+++ b/cloudinit/config/cc_bootcmd.py
@@ -18,6 +18,34 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Bootcmd
+-------
+**Summary:** run commands early in boot process
+
+This module runs arbitrary commands very early in the boot process,
+only slightly after a boothook would run. This is very similar to a
+boothook, but more user friendly. The environment variable ``INSTANCE_ID``
+will be set to the current instance id for all run commands. Commands can be
+specified either as lists or strings. For invocation details, see ``runcmd``.
+
+.. note::
+ bootcmd should only be used for things that could not be done later in the
+ boot process.
+
+**Internal name:** ``cc_bootcmd``
+
+**Module frequency:** per always
+
+**Supported distros:** all
+
+**Config keys**::
+
+ bootcmd:
+ - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts
+ - [ cloud-nit-per, once, mymkfs, mkfs, /dev/vdb ]
+"""
+
import os
from cloudinit.settings import PER_ALWAYS
diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py
index ef0ce7ab..4a616e26 100644..100755
--- a/cloudinit/config/cc_byobu.py
+++ b/cloudinit/config/cc_byobu.py
@@ -18,11 +18,39 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-# Ensure this is aliased to a name not 'distros'
-# since the module attribute 'distros'
-# is a list of distros that are supported, not a sub-module
-from cloudinit import distros as ds
+"""
+Byobu
+-----
+**Summary:** enable/disable byobu system wide and for default user
+This module controls whether byobu is enabled or disabled system wide and for
+the default system user. If byobu is to be enabled, this module will ensure it
+is installed. Likewise, if it is to be disabled, it will be removed if
+installed.
+
+Valid configuration options for this module are:
+
+ - ``enable-system``: enable byobu system wide
+ - ``enable-user``: enable byobu for the default user
+ - ``disable-system``: disable byobu system wide
+ - ``disable-user``: disable byobu for the default user
+ - ``enable``: enable byobu both system wide and for default user
+ - ``disable``: disable byobu for all users
+ - ``user``: alias for ``enable-user``
+ - ``system``: alias for ``enable-system``
+
+**Internal name:** ``cc_byobu``
+
+**Module frequency:** per instance
+
+**Supported distros:** ubuntu, debian
+
+**Config keys**::
+
+ byobu_by_default: <user/system>
+"""
+
+from cloudinit.distros import ug_util
from cloudinit import util
distros = ['ubuntu', 'debian']
@@ -61,8 +89,8 @@ def handle(name, cfg, cloud, log, args):
shcmd = ""
if mod_user:
- (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
- (user, _user_config) = ds.extract_default(users)
+ (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
+ (user, _user_config) = ug_util.extract_default(users)
if not user:
log.warn(("No default byobu user provided, "
"can not launch %s for the default user"), bl_inst)
diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py
index 8248b020..53d14060 100644
--- a/cloudinit/config/cc_ca_certs.py
+++ b/cloudinit/config/cc_ca_certs.py
@@ -14,6 +14,38 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+CA Certs
+--------
+**Summary:** add ca certificates
+
+This module adds CA certificates to ``/etc/ca-certificates.conf`` and updates
+the ssl cert cache using ``update-ca-certificates``. The default certificates
+can be removed from the system with the configuration option
+``remove-defaults``.
+
+.. note::
+ certificates must be specified using valid yaml. in order to specify a
+ multiline certificate, the yaml multiline list syntax must be used
+
+**Internal name:** ``cc_ca_certs``
+
+**Module frequency:** per instance
+
+**Supporte distros:** ubuntu, debian
+
+**Config keys**::
+
+ ca-certs:
+ remove-defaults: <true/false>
+ trusted:
+ - <single line cert>
+ - |
+ -----BEGIN CERTIFICATE-----
+ YOUR-ORGS-TRUSTED-CA-CERT-HERE
+ -----END CERTIFICATE-----
+"""
+
import os
from cloudinit import util
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index 4c28be6a..922fb6af 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -19,9 +19,11 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
+Chef
+----
**Summary:** module that configures, starts and installs chef.
-**Description:** This module enables chef to be installed (from packages or
+This module enables chef to be installed (from packages or
from gems, or from omnibus). Before this occurs chef configurations are
written to disk (validation.pem, client.pem, firstboot.json, client.rb),
and needed chef folders/directories are created (/etc/chef and /var/log/chef
@@ -33,7 +35,13 @@ chef will have forked into its own process) then a post run function can
run that can do finishing activities (such as removing the validation pem
file).
-It can be configured with the following option structure::
+**Internal name:** ``cc_chef``
+
+**Module frequency:** per always
+
+**Supported distros:** all
+
+**Config keys**::
chef:
directories: (defaulting to /etc/chef, /var/log/chef, /var/lib/chef,
diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py
index bdc32fe6..5ab36469 100644
--- a/cloudinit/config/cc_debug.py
+++ b/cloudinit/config/cc_debug.py
@@ -15,22 +15,28 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
+Debug
+-----
**Summary:** helper to debug cloud-init *internal* datastructures.
-**Description:** This module will enable for outputting various internal
-information that cloud-init sources provide to either a file or to the output
-console/log location that this cloud-init has been configured with when
-running.
+This module will enable for outputting various internal information that
+cloud-init sources provide to either a file or to the output console/log
+location that this cloud-init has been configured with when running.
-It can be configured with the following option structure::
+.. note::
+ Log configurations are not output.
- debug:
- verbose: (defaulting to true)
- output: (location to write output, defaulting to console + log)
+**Internal name:** ``cc_debug``
-.. note::
+**Module frequency:** per instance
- Log configurations are not output.
+**Supported distros:** all
+
+**Config keys**::
+
+ debug:
+ verbose: true/false (defaulting to true)
+ output: (location to write output, defaulting to console + log)
"""
import copy
diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py
index 3fd2c20f..5c54e6f4 100644
--- a/cloudinit/config/cc_disable_ec2_metadata.py
+++ b/cloudinit/config/cc_disable_ec2_metadata.py
@@ -18,6 +18,26 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Disable EC2 Metadata
+--------------------
+**Summary:** disable aws ec2 metadata
+
+This module can disable the ec2 datasource by rejecting the route to
+``169.254.169.254``, the usual route to the datasource. This module is disabled
+by default.
+
+**Internal name:** ``cc_disable_ec2_metadata``
+
+**Module frequency:** per always
+
+**Supported distros:** all
+
+**Config keys**::
+
+ disable_ec2_metadata: <true/false>
+"""
+
from cloudinit import util
from cloudinit.settings import PER_ALWAYS
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index b642f1f8..0c4b794d 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -16,6 +16,96 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Disk Setup
+----------
+**Summary:** configure partitions and filesystems
+
+This module is able to configure simple partition tables and filesystems.
+
+.. note::
+ for more detail about configuration options for disk setup, see the disk
+ setup example
+
+For convenience, aliases can be specified for disks using the
+``device_aliases`` config key, which takes a dictionary of alias: path
+mappings. There are automatic aliases for ``swap`` and ``ephemeral<X>``, where
+``swap`` will always refer to the active swap partition and ``ephemeral<X>``
+will refer to the block device of the ephemeral image.
+
+Disk partitioning is done using the ``disk_setup`` directive. This config
+directive accepts a dictionary where each key is either a path to a block
+device or an alias specified in ``device_aliases``, and each value is the
+configuration options for the device. The ``table_type`` option specifies the
+partition table type, either ``mbr`` or ``gpt``. The ``layout`` option
+specifies how partitions on the device are to be arranged. If ``layout`` is set
+to ``true``, a single partition using all the space on the device will be
+created. If set to ``false``, no partitions will be created. Partitions can be
+specified by providing a list to ``layout``, where each entry in the list is
+either a size or a list containing a size and the numerical value for a
+partition type. The size for partitions is specified in **percentage** of disk
+space, not in bytes (e.g. a size of 33 would take up 1/3 of the disk space).
+The ``overwrite`` option controls whether this module tries to be safe about
+writing partition talbes or not. If ``overwrite: false`` is set, the device
+will be checked for a partition table and for a file system and if either is
+found, the operation will be skipped. If ``overwrite: true`` is set, no checks
+will be performed.
+
+.. note::
+ Using ``overwrite: true`` is dangerous and can lead to data loss, so double
+ check that the correct device has been specified if using this option.
+
+File system configuration is done using the ``fs_setup`` directive. This config
+directive accepts a list of filesystem configs. The device to create the
+filesystem on may be specified either as a path or as an alias in the format
+``<alias name>.<y>`` where ``<y>`` denotes the partition number on the device.
+The partition can also be specified by setting ``partition`` to the desired
+partition number. The ``partition`` option may also be set to ``auto``, in
+which this module will search for the existance of a filesystem matching the
+``label``, ``type`` and ``device`` of the ``fs_setup`` entry and will skip
+creating the filesystem if one is found. The ``partition`` option may also be
+set to ``any``, in which case any file system that matches ``type`` and
+``device`` will cause this module to skip filesystem creation for the
+``fs_setup`` entry, regardless of ``label`` matching or not. To write a
+filesystem directly to a device, use ``partition: none``. A label can be
+specified for the filesystem using ``label``, and the filesystem type can be
+specified using ``filesystem``.
+
+.. note::
+ If specifying device using the ``<device name>.<partition number>`` format,
+ the value of ``partition`` will be overwritten.
+
+.. note::
+ Using ``overwrite: true`` for filesystems is dangerous and can lead to data
+ loss, so double check the entry in ``fs_setup``.
+
+**Internal name:** ``cc_disk_setup``
+
+**Module frequency:** per instance
+
+**Supported distros:** all
+
+**Config keys**::
+
+ device_aliases:
+ <alias name>: <device path>
+ disk_setup:
+ <alias name/path>:
+ table_type: <'mbr'/'gpt'>
+ layout:
+ - [33,82]
+ - 66
+ overwrite: <true/false>
+ fs_setup:
+ - label: <label>
+ filesystem: <filesystem type>
+ device: <device>
+ partition: <"auto"/"any"/"none"/<partition number>>
+ overwrite: <true/false>
+ replace_fs: <filesystem type>
+"""
+
from cloudinit.settings import PER_INSTANCE
from cloudinit import util
import logging
@@ -33,12 +123,14 @@ BLKID_CMD = util.which("blkid")
BLKDEV_CMD = util.which("blockdev")
WIPEFS_CMD = util.which("wipefs")
+LANG_C_ENV = {'LANG': 'C'}
+
LOG = logging.getLogger(__name__)
def handle(_name, cfg, cloud, log, _args):
"""
- See doc/examples/cloud-config_disk-setup.txt for documentation on the
+ See doc/examples/cloud-config-disk-setup.txt for documentation on the
format.
"""
disk_setup = cfg.get("disk_setup")
@@ -355,8 +447,11 @@ def get_mbr_hdd_size(device):
def get_gpt_hdd_size(device):
- out, _ = util.subp([SGDISK_CMD, '-p', device])
- return out.splitlines()[0].split()[2]
+ out, _ = util.subp([SGDISK_CMD, '-p', device], update_env=LANG_C_ENV)
+ for line in out.splitlines():
+ if line.startswith("Disk"):
+ return line.split()[2]
+ raise Exception("Failed to get %s size from sgdisk" % (device))
def get_hdd_size(table_type, device):
@@ -408,7 +503,7 @@ def check_partition_mbr_layout(device, layout):
def check_partition_gpt_layout(device, layout):
prt_cmd = [SGDISK_CMD, '-p', device]
try:
- out, _err = util.subp(prt_cmd)
+ out, _err = util.subp(prt_cmd, update_env=LANG_C_ENV)
except Exception as e:
raise Exception("Error running partition command on %s\n%s" % (
device, e))
@@ -621,6 +716,8 @@ def exec_mkpart_gpt(device, layout):
LOG.warn("Failed to partition device %s" % device)
raise
+ read_parttbl(device)
+
def exec_mkpart(table_type, device, layout):
"""
diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
index 98828b9e..a7be6351 100644
--- a/cloudinit/config/cc_emit_upstart.py
+++ b/cloudinit/config/cc_emit_upstart.py
@@ -18,6 +18,21 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Emit Upstart
+------------
+**Summary:** emit upstart configuration
+
+Emit upstart configuration for cloud-init modules on upstart based systems. No
+user configuration should be required.
+
+**Internal name:** ``cc_emit_upstart``
+
+**Module frequency:** per always
+
+**Supported distros:** ubuntu, debian
+"""
+
import os
from cloudinit import log as logging
diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py
index 545fee22..6027fdc7 100644
--- a/cloudinit/config/cc_fan.py
+++ b/cloudinit/config/cc_fan.py
@@ -15,25 +15,38 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
"""
-fan module allows configuration of Ubuntu Fan
- https://wiki.ubuntu.com/FanNetworking
-
-Example config:
- #cloud-config
- fan:
- config: |
- # fan 240
- 10.0.0.0/8 eth0/16 dhcp
- 10.0.0.0/8 eth1/16 dhcp off
- # fan 241
- 241.0.0.0/8 eth0/16 dhcp
- config_path: /etc/network/fan
-
-If cloud-init sees a 'fan' entry in cloud-config it will
- a.) write 'config_path' with the contents
- b.) install the package 'ubuntu-fan' if it is not installed
- c.) ensure the service is started (or restarted if was previously running)
+Fan
+---
+**Summary:** configure ubuntu fan networking
+
+This module installs, configures and starts the ubuntu fan network system. For
+more information about Ubuntu Fan, see:
+``https://wiki.ubuntu.com/FanNetworking``.
+
+If cloud-init sees a ``fan`` entry in cloud-config it will:
+
+ - write ``config_path`` with the contents of the ``config`` key
+ - install the package ``ubuntu-fan`` if it is not installed
+ - ensure the service is started (or restarted if was previously running)
+
+**Internal name:** ``cc_fan``
+
+**Module frequency:** per instance
+
+**Supported distros:** ubuntu
+
+**Config keys**::
+
+ fan:
+ config: |
+ # fan 240
+ 10.0.0.0/8 eth0/16 dhcp
+ 10.0.0.0/8 eth1/16 dhcp off
+ # fan 241
+ 241.0.0.0/8 eth0/16 dhcp
+ config_path: /etc/network/fan
"""
from cloudinit import log as logging
diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py
index c9021eb1..5e144fde 100644
--- a/cloudinit/config/cc_final_message.py
+++ b/cloudinit/config/cc_final_message.py
@@ -18,6 +18,31 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Final Message
+-------------
+**Summary:** output final message when cloud-init has finished
+
+This module configures the final message that cloud-init writes. The message is
+specified as a jinja template with the following variables set:
+
+ - ``version``: cloud-init version
+ - ``timestamp``: time at cloud-init finish
+ - ``datasource``: cloud-init data source
+ - ``uptime``: system uptime
+
+**Internal name:** ``cc_final_message``
+
+**Module frequency:** per always
+
+**Supported distros:** all
+
+**Config keys**::
+
+ final_message: <message>
+
+"""
+
from cloudinit import templater
from cloudinit import util
from cloudinit import version
diff --git a/cloudinit/config/cc_foo.py b/cloudinit/config/cc_foo.py
index 95aab4dd..ad0e0468 100644
--- a/cloudinit/config/cc_foo.py
+++ b/cloudinit/config/cc_foo.py
@@ -18,6 +18,20 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Foo
+---
+**Summary:** example module
+
+Example to show module structure. Does not do anything.
+
+**Internal name:** ``cc_foo``
+
+**Module frequency:** per instance
+
+**Supported distros:** all
+"""
+
from cloudinit.settings import PER_INSTANCE
# Modules are expected to have the following attributes.
@@ -35,7 +49,7 @@ from cloudinit.settings import PER_INSTANCE
# Typically those are from module configuration where the module
# is defined with some extra configuration that will eventually
# be translated from yaml into arguments to this module.
-# 2. A optional 'frequency' that defines how often this module should be ran.
+# 2. A optional 'frequency' that defines how often this module should be run.
# Typically one of PER_INSTANCE, PER_ALWAYS, PER_ONCE. If not
# provided PER_INSTANCE will be assumed.
# See settings.py for these constants.
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index 40560f11..a95e6c81 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -18,6 +18,63 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Growpart
+--------
+**Summary:** grow partitions
+
+Growpart resizes partitions to fill the available disk space.
+This is useful for cloud instances with a larger amount of disk space available
+than the pristine image uses, as it allows the instance to automatically make
+use of the extra space.
+
+The devices run growpart on are specified as a list under the ``devices`` key.
+Each entry in the devices list can be either the path to the device's
+mountpoint in the filesystem or a path to the block device in ``/dev``.
+
+The utility to use for resizing can be selected using the ``mode`` config key.
+If ``mode`` key is set to ``auto``, then any available utility (either
+``growpart`` or ``gpart``) will be used. If neither utility is available, no
+error will be raised. If ``mode`` is set to ``growpart``, then the ``growpart``
+utility will be used. If this utility is not available on the system, this will
+result in an error. If ``mode`` is set to ``off`` or ``false``, then
+``cc_growpart`` will take no action.
+
+There is some functionality overlap between this module and the ``growroot``
+functionality of ``cloud-initramfs-tools``. However, there are some situations
+where one tool is able to function and the other is not. The default
+configuration for both should work for most cloud instances. To explicitly
+prevent ``cloud-initramfs-tools`` from running ``growroot``, the file
+``/etc/growroot-disabled`` can be created. By default, both ``growroot`` and
+``cc_growpart`` will check for the existance of this file and will not run if
+it is present. However, this file can be ignored for ``cc_growpart`` by setting
+``ignore_growroot_disabled`` to ``true``. For more information on
+``cloud-initramfs-tools`` see: https://launchpad.net/cloud-initramfs-tools
+
+Growpart is enabled by default on the root partition. The default config for
+growpart is::
+
+ growpart:
+ mode: auto
+ devices: ["/"]
+ ignore_growroot_disabled: false
+
+**Internal name:** ``cc_growpart``
+
+**Module frequency:** per always
+
+**Supported distros:** all
+
+**Config keys**::
+
+ growpart:
+ mode: <auto/growpart/off/false>
+ devices:
+ - "/"
+ - "/dev/vdb1"
+ ignore_growroot_disabled: <true/false>
+"""
+
import os
import os.path
import re
diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py
index 156722d9..33ca40a1 100644
--- a/cloudinit/config/cc_grub_dpkg.py
+++ b/cloudinit/config/cc_grub_dpkg.py
@@ -18,6 +18,40 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Grub Dpkg
+---------
+**Summary:** configure grub debconf installation device
+
+Configure which device is used as the target for grub installation. This module
+should work correctly by default without any user configuration. It can be
+enabled/disabled using the ``enabled`` config key in the ``grub_dpkg`` config
+dict. The global config key ``grub-dpkg`` is an alias for ``grub_dpkg``. If no
+installation device is specified this module will look for the first existing
+device in:
+
+ - ``/dev/sda``
+ - ``/dev/vda``
+ - ``/dev/xvda``
+ - ``/dev/sda1``
+ - ``/dev/vda1``
+ - ``/dev/xvda1``
+
+**Internal name:** ``cc_grub_dpkg``
+
+**Module frequency:** per instance
+
+**Supported distros:** ubuntu, debian
+
+**Config keys**::
+
+ grub_dpkg:
+ enabled: <true/false>
+ grub-pc/install_devices: <devices>
+ grub-pc/install_devices_empty: <devices>
+ grub-dpkg: (alias for grub_dpkg)
+"""
+
import os
from cloudinit import util
diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py
index 9a02f056..d4b2013e 100644
--- a/cloudinit/config/cc_keys_to_console.py
+++ b/cloudinit/config/cc_keys_to_console.py
@@ -18,6 +18,30 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Keys to Console
+---------------
+**Summary:** control which ssh keys may be written to console
+
+For security reasons it may be desirable not to write ssh fingerprints and keys
+to the console. To avoid the fingerprint of types of ssh keys being written to
+console the ``ssh_fp_console_blacklist`` config key can be used. By default all
+types of keys will have their fingerprints written to console. To avoid keys
+of a key type being written to console the ``ssh_key_console_blacklist`` config
+key can be used. By default ``ssh-dss`` keys are not written to console.
+
+**Internal name:** ``cc_keys_to_console``
+
+**Module frequency:** per instance
+
+**Supported distros:** all
+
+**Config keys**::
+
+ ssh_fp_console_blacklist: <list of key types>
+ ssh_key_console_blacklist: <list of key types>
+"""
+
import os
from cloudinit.settings import PER_INSTANCE
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index 68fcb27f..11c84513 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -18,6 +18,55 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Landscape
+---------
+**Summary:** install and configure landscape client
+
+This module installs and configures ``landscape-client``. The landscape client
+will only be installed if the key ``landscape`` is present in config. Landscape
+client configuration is given under the ``client`` key under the main
+``landscape`` config key. The config parameters are not interpreted by
+cloud-init, but rather are converted into a ConfigObj formatted file and
+written out to ``/etc/landscape/client.conf``.
+
+The following default client config is provided, but can be overridden::
+
+ landscape:
+ client:
+ log_level: "info"
+ url: "https://landscape.canonical.com/message-system"
+ ping_url: "http://landscape.canoncial.com/ping"
+ data_path: "/var/lib/landscape/client"
+
+.. note::
+ see landscape documentation for client config keys
+
+.. note::
+ if ``tags`` is defined, its contents should be a string delimited with
+ ``,`` rather than a list
+
+**Internal name:** ``cc_landscape``
+
+**Module frequency:** per instance
+
+**Supported distros:** ubuntu
+
+**Config keys**::
+
+ landscape:
+ client:
+ url: "https://landscape.canonical.com/message-system"
+ ping_url: "http://landscape.canonical.com/ping"
+ data_path: "/var/lib/landscape/client"
+ http_proxy: "http://my.proxy.com/foobar"
+ https_proxy: "https://my.proxy.com/foobar"
+ tags: "server,cloud"
+ computer_title: "footitle"
+ registration_key: "fookey"
+ account_name: "fooaccount"
+"""
+
import os
from six import StringIO
diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py
index bbe5fcae..268888e2 100644
--- a/cloudinit/config/cc_locale.py
+++ b/cloudinit/config/cc_locale.py
@@ -18,6 +18,26 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Locale
+------
+**Summary:** set system locale
+
+Configure the system locale and apply it system wide. By default use the locale
+specified by the datasource.
+
+**Internal name:** ``cc_locale``
+
+**Module frequency:** per instance
+
+**Supported distros:** all
+
+**Config keys**::
+
+ locale: <locale str>
+ locale_configfile: <path to locale config file>
+"""
+
from cloudinit import util
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index 0086840f..3e7faca7 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -17,35 +17,50 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
-This module initializes lxd using 'lxd init'
-
-Example config:
- #cloud-config
- lxd:
- init:
- network_address: <ip addr>
- network_port: <port>
- storage_backend: <zfs/dir>
- storage_create_device: <dev>
- storage_create_loop: <size>
- storage_pool: <name>
- trust_password: <password>
- bridge:
- mode: <new, existing or none>
- name: <name>
- ipv4_address: <ip addr>
- ipv4_netmask: <cidr>
- ipv4_dhcp_first: <ip addr>
- ipv4_dhcp_last: <ip addr>
- ipv4_dhcp_leases: <size>
- ipv4_nat: <bool>
- ipv6_address: <ip addr>
- ipv6_netmask: <cidr>
- ipv6_nat: <bool>
- domain: <domain>
+LXD
+---
+**Summary:** configure lxd with ``lxd init`` and optionally lxd-bridge
+
+This module configures lxd with user specified options using ``lxd init``.
+If lxd is not present on the system but lxd configuration is provided, then
+lxd will be installed. If the selected storage backend is zfs, then zfs will
+be installed if missing. If network bridge configuration is provided, then
+lxd-bridge will be configured accordingly.
+
+**Internal name:** ``cc_lxd``
+
+**Module frequency:** per instance
+
+**Supported distros:** ubuntu
+
+**Config keys**::
+
+ lxd:
+ init:
+ network_address: <ip addr>
+ network_port: <port>
+ storage_backend: <zfs/dir>
+ storage_create_device: <dev>
+ storage_create_loop: <size>
+ storage_pool: <name>
+ trust_password: <password>
+ bridge:
+ mode: <new, existing or none>
+ name: <name>
+ ipv4_address: <ip addr>
+ ipv4_netmask: <cidr>
+ ipv4_dhcp_first: <ip addr>
+ ipv4_dhcp_last: <ip addr>
+ ipv4_dhcp_leases: <size>
+ ipv4_nat: <bool>
+ ipv6_address: <ip addr>
+ ipv6_netmask: <cidr>
+ ipv6_nat: <bool>
+ domain: <domain>
"""
from cloudinit import util
+import os
distros = ['ubuntu']
@@ -105,25 +120,43 @@ def handle(name, cfg, cloud, log, args):
# Set up lxd-bridge if bridge config is given
dconf_comm = "debconf-communicate"
- if bridge_cfg and util.which(dconf_comm):
- debconf = bridge_to_debconf(bridge_cfg)
+ if bridge_cfg:
+ if os.path.exists("/etc/default/lxd-bridge") \
+ and util.which(dconf_comm):
+ # Bridge configured through packaging
+
+ debconf = bridge_to_debconf(bridge_cfg)
+
+ # Update debconf database
+ try:
+ log.debug("Setting lxd debconf via " + dconf_comm)
+ data = "\n".join(["set %s %s" % (k, v)
+ for k, v in debconf.items()]) + "\n"
+ util.subp(['debconf-communicate'], data)
+ except Exception:
+ util.logexc(log, "Failed to run '%s' for lxd with" %
+ dconf_comm)
+
+ # Remove the existing configuration file (forces re-generation)
+ util.del_file("/etc/default/lxd-bridge")
+
+ # Run reconfigure
+ log.debug("Running dpkg-reconfigure for lxd")
+ util.subp(['dpkg-reconfigure', 'lxd',
+ '--frontend=noninteractive'])
+ else:
+ # Built-in LXD bridge support
+ cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg)
+ if cmd_create:
+ log.debug("Creating lxd bridge: %s" %
+ " ".join(cmd_create))
+ util.subp(cmd_create)
+
+ if cmd_attach:
+ log.debug("Setting up default lxd bridge: %s" %
+ " ".join(cmd_create))
+ util.subp(cmd_attach)
- # Update debconf database
- try:
- log.debug("Setting lxd debconf via " + dconf_comm)
- data = "\n".join(["set %s %s" % (k, v)
- for k, v in debconf.items()]) + "\n"
- util.subp(['debconf-communicate'], data)
- except Exception:
- util.logexc(log, "Failed to run '%s' for lxd with" % dconf_comm)
-
- # Remove the existing configuration file (forces re-generation)
- util.del_file("/etc/default/lxd-bridge")
-
- # Run reconfigure
- log.debug("Running dpkg-reconfigure for lxd")
- util.subp(['dpkg-reconfigure', 'lxd',
- '--frontend=noninteractive'])
elif bridge_cfg:
raise RuntimeError(
"Unable to configure lxd bridge without %s." + dconf_comm)
@@ -177,3 +210,55 @@ def bridge_to_debconf(bridge_cfg):
raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode"))
return debconf
+
+
+def bridge_to_cmd(bridge_cfg):
+ if bridge_cfg.get("mode") == "none":
+ return None, None
+
+ bridge_name = bridge_cfg.get("name", "lxdbr0")
+ cmd_create = []
+ cmd_attach = ["lxc", "network", "attach-profile", bridge_name,
+ "default", "eth0", "--force-local"]
+
+ if bridge_cfg.get("mode") == "existing":
+ return None, cmd_attach
+
+ if bridge_cfg.get("mode") != "new":
+ raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode"))
+
+ cmd_create = ["lxc", "network", "create", bridge_name]
+
+ if bridge_cfg.get("ipv4_address") and bridge_cfg.get("ipv4_netmask"):
+ cmd_create.append("ipv4.address=%s/%s" %
+ (bridge_cfg.get("ipv4_address"),
+ bridge_cfg.get("ipv4_netmask")))
+
+ if bridge_cfg.get("ipv4_nat", "true") == "true":
+ cmd_create.append("ipv4.nat=true")
+
+ if bridge_cfg.get("ipv4_dhcp_first") and \
+ bridge_cfg.get("ipv4_dhcp_last"):
+ dhcp_range = "%s-%s" % (bridge_cfg.get("ipv4_dhcp_first"),
+ bridge_cfg.get("ipv4_dhcp_last"))
+ cmd_create.append("ipv4.dhcp.ranges=%s" % dhcp_range)
+ else:
+ cmd_create.append("ipv4.address=none")
+
+ if bridge_cfg.get("ipv6_address") and bridge_cfg.get("ipv6_netmask"):
+ cmd_create.append("ipv6.address=%s/%s" %
+ (bridge_cfg.get("ipv6_address"),
+ bridge_cfg.get("ipv6_netmask")))
+
+ if bridge_cfg.get("ipv6_nat", "false") == "true":
+ cmd_create.append("ipv6.nat=true")
+
+ else:
+ cmd_create.append("ipv6.address=none")
+
+ if bridge_cfg.get("domain"):
+ cmd_create.append("dns.domain=%s" % bridge_cfg.get("domain"))
+
+ cmd_create.append("--force-local")
+
+ return cmd_create, cmd_attach
diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py
index b3089f30..c447f266 100644
--- a/cloudinit/config/cc_mcollective.py
+++ b/cloudinit/config/cc_mcollective.py
@@ -19,6 +19,47 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Mcollective
+-----------
+**Summary:** install, configure and start mcollective
+
+This module installs, configures and starts mcollective. If the ``mcollective``
+key is present in config, then mcollective will be installed and started.
+
+Configuration for ``mcollective`` can be specified in the ``conf`` key under
+``mcollective``. Each config value consists of a key value pair and will be
+written to ``/etc/mcollective/server.cfg``. The ``public-cert`` and
+``private-cert`` keys, if present in conf may be used to specify the public and
+private certificates for mcollective. Their values will be written to
+``/etc/mcollective/ssl/server-public.pem`` and
+``/etc/mcollective/ssl/server-private.pem``.
+
+.. note::
+ The ec2 metadata service is readable by non-root users.
+ If security is a concern, use include-once and ssl urls.
+
+**Internal name:** ``cc_mcollective``
+
+**Module frequency:** per instance
+
+**Supported distros:** all
+
+**Config keys**::
+
+ mcollective:
+ conf:
+ <key>: <value>
+ public-cert: |
+ -------BEGIN CERTIFICATE--------
+ <cert data>
+ -------END CERTIFICATE--------
+ private-cert: |
+ -------BEGIN CERTIFICATE--------
+ <cert data>
+ -------END CERTIFICATE--------
+"""
+
import errno
import six
diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py
index facaa538..6e0bf4bb 100644
--- a/cloudinit/config/cc_migrator.py
+++ b/cloudinit/config/cc_migrator.py
@@ -16,6 +16,28 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Migrator
+--------
+**Summary:** migrate old versions of cloud-init data to new
+
+This module handles moving old versions of cloud-init data to newer ones.
+Currently, it only handles renaming cloud-init's per-frequency semaphore files
+to canonicalized name and renaming legacy semaphore names to newer ones. This
+module is enabled by default, but can be disabled by specifying ``migrate:
+false`` in config.
+
+**Internal name:** ``cc_migrator``
+
+**Module frequency:** per always
+
+**Supported distros:** all
+
+**Config keys**::
+
+ migrate: <true/false>
+"""
+
import os
import shutil
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index 2b981935..dfc4b598 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -18,6 +18,54 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Mounts
+------
+**Summary:** configure mount points and swap files
+
+This module can add or remove mountpoints from ``/etc/fstab`` as well as
+configure swap. The ``mounts`` config key takes a list of fstab entries to add.
+Each entry is specified as a list of ``[ fs_spec, fs_file, fs_vfstype,
+fs_mntops, fs-freq, fs_passno ]``. For more information on these options,
+consult the manual for ``/etc/fstab``. When specifying the ``fs_spec``, if the
+device name starts with one of ``xvd``, ``sd``, ``hd``, or ``vd``, the leading
+``/dev`` may be omitted.
+
+In order to remove a previously listed mount, an entry can be added to the
+mounts list containing ``fs_spec`` for the device to be removed but no
+mountpoint (i.e. ``[ sda1 ]`` or ``[ sda1, null ]``).
+
+The ``mount_default_fields`` config key allows default options to be specified
+for the values in a ``mounts`` entry that are not specified, aside from the
+``fs_spec`` and the ``fs_file``. If specified, this must be a list containing 7
+values. It defaults to::
+
+ mount_default_fields: [none, none, "auto", "defaults,nobootwait", "0", "2"]
+
+Swap files can be configured by setting the path to the swap file to create
+with ``filename``, the size of the swap file with ``size`` maximum size of
+the swap file if using an ``size: auto`` with ``maxsize``. By default no
+swap file is created.
+
+**Internal name:** ``cc_mounts``
+
+**Module frequency:** per instance
+
+**Supported distros:** all
+
+**Config keys**::
+
+ mounts:
+ - [ /dev/ephemeral0, /mnt, auto, "defaults,noexec" ]
+ - [ sdc, /opt/data ]
+ - [ xvdh, /opt/data, "auto", "defaults,nobootwait", "0", "0" ]
+ mount_default_fields: [None, None, "auto", "nefaults,nobootwait", "0", "2"]
+ swap:
+ filename: <file>
+ size: <"auto"/size in bytes>
+ maxsize: <size in bytes>
+"""
+
from string import whitespace
import logging
@@ -265,7 +313,7 @@ def handle(_name, cfg, cloud, log, _args):
# fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno
def_mnt_opts = "defaults,nobootwait"
if cloud.distro.uses_systemd():
- def_mnt_opts = "defaults,nofail"
+ def_mnt_opts = "defaults,nofail,x-systemd.requires=cloud-init.service"
defvals = [None, None, "auto", def_mnt_opts, "0", "2"]
defvals = cfg.get("mount_default_fields", defvals)
@@ -401,5 +449,5 @@ def handle(_name, cfg, cloud, log, _args):
try:
util.subp(("mount", "-a"))
- except Exception:
+ except util.ProcessExecutionError:
util.logexc(log, "Activating mounts via 'mount -a' failed")
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index ad69aa34..7cda3172 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -16,6 +16,38 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+NTP
+---
+**Summary:** enable and configure ntp
+
+Handle ntp configuration. If ntp is not installed on the system and ntp
+configuration is specified, ntp will be installed. If there is a default ntp
+config file in the image or one is present in the distro's ntp package, it will
+be copied to ``/etc/ntp.conf.dist`` before any changes are made. A list of ntp
+pools and ntp servers can be provided under the ``ntp`` config key. If no ntp
+servers or pools are provided, 4 pools will be used in the format
+``{0-3}.{distro}.pool.ntp.org``.
+
+**Internal name:** ``cc_ntp``
+
+**Module frequency:** per instance
+
+**Supported distros:** centos, debian, fedora, opensuse, ubuntu
+
+**Config keys**::
+
+ ntp:
+ pools:
+ - 0.company.pool.ntp.org
+ - 1.company.pool.ntp.org
+ - ntp.myorg.org
+ servers:
+ - my.ntp.server.local
+ - ntp.ubuntu.com
+ - 192.168.23.2
+"""
+
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
from cloudinit import templater
diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py
index 73b0e30d..6d717616 100644
--- a/cloudinit/config/cc_package_update_upgrade_install.py
+++ b/cloudinit/config/cc_package_update_upgrade_install.py
@@ -16,6 +16,41 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Package Update Upgrade Install
+------------------------------
+**Summary:** update, upgrade, and install packages
+
+This module allows packages to be updated, upgraded or installed during boot.
+If any packages are to be installed or an upgrade is to be performed then the
+package cache will be updated first. If a package installation or upgrade
+requires a reboot, then a reboot can be performed if
+``package_reboot_if_required`` is specified. A list of packages to install can
+be provided. Each entry in the list can be either a package name or a list with
+two entries, the first being the package name and the second being the specific
+package version to install.
+
+**Internal name:** ``cc_package_update_upgrade_install``
+
+**Module frequency:** per instance
+
+**Supported distros:** all
+
+**Config keys**::
+
+ packages:
+ - pwgen
+ - pastebinit
+ - [libpython2.7, 2.7.3-0ubuntu3.1]
+ package_update: <true/false>
+ package_upgrade: <true/false>
+ package_reboot_if_required: <true/false>
+
+ apt_update: (alias for package_update)
+ apt_upgrade: (alias for package_upgrade)
+ apt_reboot_if_required: (alias for package_reboot_if_required)
+"""
+
import os
import time
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index ae720bd2..cb70d39c 100644
--- a/cloudinit/config/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -18,6 +18,40 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Phone Home
+----------
+**Summary:** post data to url
+
+This module can be used to post data to a remote host after boot is complete.
+If the post url contains the string ``$INSTANCE_ID`` it will be replaced with
+the id of the current instance. Either all data can be posted or a list of
+keys to post. Available keys are:
+
+ - ``pub_key_dsa``
+ - ``pub_key_rsa``
+ - ``pub_key_ecdsa``
+ - ``instance_id``
+ - ``hostname``
+ - ``fdqn``
+
+**Internal name:** ``cc_phone_home``
+
+**Module frequency:** per instance
+
+**Supported distros:** all
+
+**Config keys**::
+
+ phone_home:
+ url: http://example.com/$INSTANCE_ID/
+ post:
+ - pub_key_dsa
+ - instance_id
+ - fqdn
+ tries: 10
+"""
+
from cloudinit import templater
from cloudinit import util
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index cc3f7f70..61b5416a 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -16,6 +16,51 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Power State Change
+------------------
+**Summary:** change power state
+
+This module handles shutdown/reboot after all config modules have been run. By
+default it will take no action, and the system will keep running unless a
+package installation/upgrade requires a system reboot (e.g. installing a new
+kernel) and ``package_reboot_if_required`` is true. The ``power_state`` config
+key accepts a dict of options. If ``mode`` is any value other than
+``poweroff``, ``halt``, or ``reboot``, then no action will be taken.
+
+The system
+can be shutdown before cloud-init has finished using the ``timeout`` option.
+The ``delay`` key specifies a duration to be added onto any shutdown command
+used. Therefore, if a 5 minute delay and a 120 second shutdown are specified,
+the maximum amount of time between cloud-init starting and the system shutting
+down is 7 minutes, and the minimum amount of time is 5 minutes. The ``delay``
+key must have an argument in a form that the ``shutdown`` utility recognizes.
+The most common format is the form ``+5`` for 5 minutes. See ``man shutdown``
+for more options.
+
+Optionally, a command can be run to determine whether or not
+the system should shut down. The command to be run should be specified in the
+``condition`` key. For command formatting, see the documentation for
+``cc_runcmd``. The specified shutdown behavior will only take place if the
+``condition`` key is omitted or the command specified by the ``condition``
+key returns 0.
+
+**Internal name:** ``cc_power_state_change``
+
+**Module frequency:** per instance
+
+**Supported distros:** all
+
+**Config keys**::
+
+ power_state:
+ delay: <now/'+minutes'>
+ mode: <poweroff/halt/reboot>
+ message: <shutdown message>
+ timeout: <seconds>
+ condition: <true/false/command>
+"""
+
from cloudinit.settings import PER_INSTANCE
from cloudinit import util
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index 774d3322..bfd630d2 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -18,6 +18,51 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Puppet
+------
+**Summary:** install, configure and start puppet
+
+This module handles puppet installation and configuration. If the ``puppet``
+key does not exist in global configuration, no action will be taken. If a
+config entry for ``puppet`` is present, then by default the latest version of
+puppet will be installed. If ``install`` is set to ``false``, puppet will not
+be installed. However, this may result in an error if puppet is not already
+present on the system. The version of puppet to be installed can be specified
+under ``version``, and defaults to ``none``, which selects the latest version
+in the repos. If the ``puppet`` config key exists in the config archive, this
+module will attempt to start puppet even if no installation was performed.
+
+Puppet configuration can be specified under the ``conf`` key. The configuration
+is specified as a dictionary which is converted into ``<key>=<value>`` format
+and appended to ``puppet.conf`` under the ``[puppetd]`` section. The
+``certname`` key supports string substitutions for ``%i`` and ``%f``,
+corresponding to the instance id and fqdn of the machine respectively.
+If ``ca_cert`` is present under ``conf``, it will not be written to
+``puppet.conf``, but instead will be used as the puppermaster certificate.
+It should be specified in pem format as a multi-line string (using the ``|``
+yaml notation).
+
+**Internal name:** ``cc_puppet``
+
+**Module frequency:** per instance
+
+**Supported distros:** all
+
+**Config keys**::
+
+ puppet:
+ install: <true/false>
+ version: <version>
+ conf:
+ server: "puppetmaster.example.org"
+ certname: "%i.%f"
+ ca_cert: |
+ -------BEGIN CERTIFICATE-------
+ <cert data>
+ -------END CERTIFICATE-------
+"""
+
from six import StringIO
import os
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 2a2a9f59..1b917966 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -18,6 +18,32 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Resizefs
+--------
+**Summary:** resize filesystem
+
+Resize a filesystem to use all avaliable space on partition. This module is
+useful along with ``cc_growpart`` and will ensure that if the root partition
+has been resized the root filesystem will be resized along with it. By default,
+``cc_resizefs`` will resize the root partition and will block the boot process
+while the resize command is running. Optionally, the resize operation can be
+performed in the background while cloud-init continues running modules. This
+can be enabled by setting ``resize_rootfs`` to ``true``. This module can be
+disabled altogether by setting ``resize_rootfs`` to ``false``.
+
+**Internal name:** ``cc_resizefs``
+
+**Module frequency:** per always
+
+**Supported distros:** all
+
+**Config keys**::
+
+ resize_rootfs: <true/false/"noblock">
+ resize_rootfs_tmp: <directory>
+"""
+
import errno
import os
import stat
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index 71d9e3a7..feea5653 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -18,36 +18,45 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-# Note:
-# This module is intended to manage resolv.conf in environments where
-# early configuration of resolv.conf is necessary for further
-# bootstrapping and/or where configuration management such as puppet or
-# chef own dns configuration. As Debian/Ubuntu will, by default, utilize
-# resovlconf, and similarly RedHat will use sysconfig, this module is
-# likely to be of little use unless those are configured correctly.
-#
-# For RedHat with sysconfig, be sure to set PEERDNS=no for all DHCP
-# enabled NICs. And, in Ubuntu/Debian it is recommended that DNS
-# be configured via the standard /etc/network/interfaces configuration
-# file.
-#
-#
-# Usage Example:
-#
-# #cloud-config
-# manage_resolv_conf: true
-#
-# resolv_conf:
-# nameservers: ['8.8.4.4', '8.8.8.8']
-# searchdomains:
-# - foo.example.com
-# - bar.example.com
-# domain: example.com
-# options:
-# rotate: true
-# timeout: 1
-#
-
+"""
+Resolv Conf
+-----------
+**Summary:** configure resolv.conf
+
+This module is intended to manage resolv.conf in environments where early
+configuration of resolv.conf is necessary for further bootstrapping and/or
+where configuration management such as puppet or chef own dns configuration.
+As Debian/Ubuntu will, by default, utilize resovlconf, and similarly RedHat
+will use sysconfig, this module is likely to be of little use unless those
+are configured correctly.
+
+.. note::
+ For RedHat with sysconfig, be sure to set PEERDNS=no for all DHCP
+ enabled NICs.
+
+.. note::
+ And, in Ubuntu/Debian it is recommended that DNS be configured via the
+ standard /etc/network/interfaces configuration file.
+
+**Internal name:** ``cc_resolv_conf``
+
+**Module frequency:** per instance
+
+**Supported distros:** fedora, rhel, sles
+
+**Config keys**::
+
+ manage_resolv_conf: <true/false>
+ resolv_conf:
+ nameservers: ['8.8.4.4', '8.8.8.8']
+ searchdomains:
+ - foo.example.com
+ - bar.example.com
+ domain: example.com
+ options:
+ rotate: <true/false>
+ timeout: 1
+"""
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index d4ad724a..d858f65c 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -16,6 +16,40 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+RedHat Subscription
+-------------------
+**Summary:** register red hat enterprise linux based system
+
+Register a RedHat system either by username and password *or* activation and
+org. Following a sucessful registration, you can auto-attach subscriptions, set
+the service level, add subscriptions based on pool id, enable/disable yum
+repositories based on repo id, and alter the rhsm_baseurl and server-hostname
+in ``/etc/rhsm/rhs.conf``. For more details, see the ``Register RedHat
+Subscription`` example config.
+
+**Internal name:** ``cc_rh_subscription``
+
+**Module frequency:** per instance
+
+**Supported distros:** rhel, fedora
+
+**Config keys**::
+
+ rh_subscription:
+ username: <username>
+ password: <password>
+ activation-key: <activation key>
+ org: <org number>
+ auto-attach: <true/false>
+ service-level: <service level>
+ add-pool: <list of pool ids>
+ enable-repo: <list of yum repo ids>
+ disable-repo: <list of yum repo ids>
+ rhsm-baseurl: <url>
+ server-hostname: <hostname>
+"""
+
from cloudinit import util
distros = ['fedora', 'rhel']
diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py
index 8118fac4..6cf8c948 100644
--- a/cloudinit/config/cc_rightscale_userdata.py
+++ b/cloudinit/config/cc_rightscale_userdata.py
@@ -18,6 +18,32 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Rightscale Userdata
+-------------------
+**Summary:** support rightscale configuration hooks
+
+This module adds support for RightScale configuration hooks to cloud-init.
+RightScale adds a entry in the format ``CLOUD_INIT_REMOTE_HOOK=http://...`` to
+ec2 user-data. This module checks for this line in the raw userdata and
+retrieves any scripts linked by the RightScale user data and places them in the
+user scripts configuration directory, to be run later by ``cc_scripts_user``.
+
+.. note::
+ the ``CLOUD_INIT_REMOTE_HOOK`` config variable is present in the raw ec2
+ user data only, not in any cloud-config parts
+
+**Internal name:** ``cc_rightscale_userdata``
+
+**Module frequency:** per instance
+
+**Supported distros:** all
+
+**Config keys**::
+
+ CLOUD_INIT_REMOTE_HOOK=<url>
+"""
+
#
# The purpose of this script is to allow cloud-init to consume
# rightscale style userdata. rightscale user data is key-value pairs
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index b8642d65..1c12e567 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -18,90 +18,177 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
-rsyslog module allows configuration of syslog logging via rsyslog
-Configuration is done under the cloud-config top level 'rsyslog'.
-
-Under 'rsyslog' you can define:
- - configs: [default=[]]
- this is a list. entries in it are a string or a dictionary.
- each entry has 2 parts:
- * content
- * filename
- if the entry is a string, then it is assigned to 'content'.
- for each entry, content is written to the provided filename.
- if filename is not provided, its default is read from 'config_filename'
-
- Content here can be any valid rsyslog configuration. No format
- specific format is enforced.
-
- For simply logging to an existing remote syslog server, via udp:
- configs: ["*.* @192.168.1.1"]
-
- - remotes: [default={}]
- This is a dictionary of name / value pairs.
- In comparison to 'config's, it is more focused in that it only supports
- remote syslog configuration. It is not rsyslog specific, and could
- convert to other syslog implementations.
-
- Each entry in remotes is a 'name' and a 'value'.
- * name: an string identifying the entry. good practice would indicate
- using a consistent and identifiable string for the producer.
- For example, the MAAS service could use 'maas' as the key.
- * value consists of the following parts:
- * optional filter for log messages
- default if not present: *.*
- * optional leading '@' or '@@' (indicates udp or tcp respectively).
- default if not present (udp): @
- This is rsyslog format for that. if not present, is '@'.
- * ipv4 or ipv6 or hostname
- ipv6 addresses must be in [::1] format. (@[fd00::1]:514)
- * optional port
- port defaults to 514
-
- - config_filename: [default=20-cloud-config.conf]
- this is the file name to use if none is provided in a config entry.
-
- - config_dir: [default=/etc/rsyslog.d]
- this directory is used for filenames that are not absolute paths.
-
- - service_reload_command: [default="auto"]
- this command is executed if files have been written and thus the syslog
- daemon needs to be told.
-
-Note, since cloud-init 0.5 a legacy version of rsyslog config has been
-present and is still supported. See below for the mappings between old
-value and new value:
- old value -> new value
- 'rsyslog' -> rsyslog/configs
- 'rsyslog_filename' -> rsyslog/config_filename
- 'rsyslog_dir' -> rsyslog/config_dir
-
-the legacy config does not support 'service_reload_command'.
-
-Example config:
- #cloud-config
- rsyslog:
- configs:
- - "*.* @@192.158.1.1"
- - content: "*.* @@192.0.2.1:10514"
- filename: 01-example.conf
- - content: |
- *.* @@syslogd.example.com
- remotes:
- maas: "192.168.1.1"
- juju: "10.0.4.1"
- config_dir: config_dir
- config_filename: config_filename
- service_reload_command: [your, syslog, restart, command]
-
-Example Legacy config:
- #cloud-config
- rsyslog:
- - "*.* @@192.158.1.1"
- rsyslog_dir: /etc/rsyslog-config.d/
- rsyslog_filename: 99-local.conf
+.. _cc_rsyslog:
+
+Rsyslog
+-------
+**Summary:** configure system loggig via rsyslog
+
+This module configures remote system logging using rsyslog.
+
+The rsyslog config file to write to can be specified in ``config_filename``,
+which defaults to ``20-cloud-config.conf``. The rsyslog config directory to
+write config files to may be specified in ``config_dir``, which defaults to
+``/etc/rsyslog.d``.
+
+A list of configurations for for rsyslog can be specified under the ``configs``
+key in the ``rsyslog`` config. Each entry in ``configs`` is either a string or
+a dictionary. Each config entry contains a configuration string and a file to
+write it to. For config entries that are a dictionary, ``filename`` sets the
+target filename and ``content`` specifies the config string to write. For
+config entries that are only a string, the string is used as the config string
+to write. If the filename to write the config to is not specified, the value of
+the ``config_filename`` key is used. A file with the selected filename will
+be written inside the directory specified by ``config_dir``.
+
+The command to use to reload the rsyslog service after the config has been
+updated can be specified in ``service_reload_command``. If this is set to
+``auto``, then an appropriate command for the distro will be used. This is the
+default behavior. To manually set the command, use a list of command args (e.g.
+``[systemctl, restart, rsyslog]``).
+
+Configuration for remote servers can be specified in ``configs``, but for
+convenience it can be specified as key value pairs in ``remotes``. Each key
+is the name for an rsyslog remote entry. Each value holds the contents of the
+remote config for rsyslog. The config consists of the following parts:
+
+ - filter for log messages (defaults to ``*.*``)
+ - optional leading ``@`` or ``@@``, indicating udp and tcp respectively
+ (defaults to ``@``, for udp)
+ - ipv4 or ipv6 hostname or address. ipv6 addresses must be in ``[::1]``
+ format, (e.g. ``@[fd00::1]:514``)
+ - optional port number (defaults to ``514``)
+
+This module will provide sane defaults for any part of the remote entry that is
+not specified, so in most cases remote hosts can be specified just using
+``<name>: <address>``.
+
+For backwards compatibility, this module still supports legacy names for the
+config entries. Legacy to new mappings are as follows:
+
+ - ``rsyslog`` -> ``rsyslog/configs``
+ - ``rsyslog_filename`` -> ``rsyslog/config_filename``
+ - ``rsyslog_dir`` -> ``rsyslog/config_dir``
+
+.. note::
+ The legacy config format does not support specifying
+ ``service_reload_command``.
+
+**Internal name:** ``cc_rsyslog``
+
+**Module frequency:** per instance
+
+**Supported distros:** all
+
+**Config keys**::
+
+ rsyslog:
+ config_dir: config_dir
+ config_filename: config_filename
+ configs:
+ - "*.* @@192.158.1.1"
+ - content: "*.* @@192.0.2.1:10514"
+ filename: 01-example.conf
+ - content: |
+ *.* @@syslogd.example.com
+ remotes:
+ maas: "192.168.1.1"
+ juju: "10.0.4.1"
+ service_reload_command: [your, syslog, restart, command]
+
+**Legacy config keys**::
+
+ rsyslog:
+ - "*.* @@192.158.1.1"
+ rsyslog_dir: /etc/rsyslog-config.d/
+ rsyslog_filename: 99-local.conf
"""
+# Old rsyslog documentation, kept for reference:
+#
+# rsyslog module allows configuration of syslog logging via rsyslog
+# Configuration is done under the cloud-config top level 'rsyslog'.
+#
+# Under 'rsyslog' you can define:
+# - configs: [default=[]]
+# this is a list. entries in it are a string or a dictionary.
+# each entry has 2 parts:
+# * content
+# * filename
+# if the entry is a string, then it is assigned to 'content'.
+# for each entry, content is written to the provided filename.
+# if filename is not provided, its default is read from 'config_filename'
+#
+# Content here can be any valid rsyslog configuration. No format
+# specific format is enforced.
+#
+# For simply logging to an existing remote syslog server, via udp:
+# configs: ["*.* @192.168.1.1"]
+#
+# - remotes: [default={}]
+# This is a dictionary of name / value pairs.
+# In comparison to 'config's, it is more focused in that it only supports
+# remote syslog configuration. It is not rsyslog specific, and could
+# convert to other syslog implementations.
+#
+# Each entry in remotes is a 'name' and a 'value'.
+# * name: an string identifying the entry. good practice would indicate
+# using a consistent and identifiable string for the producer.
+# For example, the MAAS service could use 'maas' as the key.
+# * value consists of the following parts:
+# * optional filter for log messages
+# default if not present: *.*
+# * optional leading '@' or '@@' (indicates udp or tcp respectively).
+# default if not present (udp): @
+# This is rsyslog format for that. if not present, is '@'.
+# * ipv4 or ipv6 or hostname
+# ipv6 addresses must be in [::1] format. (@[fd00::1]:514)
+# * optional port
+# port defaults to 514
+#
+# - config_filename: [default=20-cloud-config.conf]
+# this is the file name to use if none is provided in a config entry.
+#
+# - config_dir: [default=/etc/rsyslog.d]
+# this directory is used for filenames that are not absolute paths.
+#
+# - service_reload_command: [default="auto"]
+# this command is executed if files have been written and thus the syslog
+# daemon needs to be told.
+#
+# Note, since cloud-init 0.5 a legacy version of rsyslog config has been
+# present and is still supported. See below for the mappings between old
+# value and new value:
+# old value -> new value
+# 'rsyslog' -> rsyslog/configs
+# 'rsyslog_filename' -> rsyslog/config_filename
+# 'rsyslog_dir' -> rsyslog/config_dir
+#
+# the legacy config does not support 'service_reload_command'.
+#
+# Example config:
+# #cloud-config
+# rsyslog:
+# configs:
+# - "*.* @@192.158.1.1"
+# - content: "*.* @@192.0.2.1:10514"
+# filename: 01-example.conf
+# - content: |
+# *.* @@syslogd.example.com
+# remotes:
+# maas: "192.168.1.1"
+# juju: "10.0.4.1"
+# config_dir: config_dir
+# config_filename: config_filename
+# service_reload_command: [your, syslog, restart, command]
+#
+# Example Legacy config:
+# #cloud-config
+# rsyslog:
+# - "*.* @@192.158.1.1"
+# rsyslog_dir: /etc/rsyslog-config.d/
+# rsyslog_filename: 99-local.conf
+
import os
import re
import six
diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
index bc09d38c..23e1e898 100644
--- a/cloudinit/config/cc_runcmd.py
+++ b/cloudinit/config/cc_runcmd.py
@@ -18,6 +18,38 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Runcmd
+------
+**Summary:** run commands
+
+Run arbitrary commands at a rc.local like level with output to the console.
+Each item can be either a list or a string. If the item is a list, it will be
+properly executed as if passed to ``execve()`` (with the first arg as the
+command). If the item is a string, it will be written to a file and interpreted
+using ``sh``.
+
+.. note::
+ all commands must be proper yaml, so you have to quote any characters yaml
+ would eat (':' can be problematic)
+
+**Internal name:** ``cc_runcmd``
+
+**Module frequency:** per instance
+
+**Supported distros:** all
+
+**Config keys**::
+
+ runcmd:
+ - [ ls, -l, / ]
+ - [ sh, -xc, "echo $(date) ': hello world!'" ]
+ - [ sh, -c, echo "=========hello world'=========" ]
+ - ls -l /root
+ - [ wget, "http://example.org", -O, /tmp/index.html ]
+"""
+
+
import os
from cloudinit import util
diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py
index 13d70c8e..90786658 100644
--- a/cloudinit/config/cc_salt_minion.py
+++ b/cloudinit/config/cc_salt_minion.py
@@ -14,6 +14,39 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Salt Minion
+-----------
+**Summary:** set up and run salt minion
+
+This module installs, configures and starts salt minion. If the ``salt_minion``
+key is present in the config parts, then salt minion will be installed and
+started. Configuration for salt minion can be specified in the ``conf`` key
+under ``salt_minion``. Any conf values present there will be assigned in
+``/etc/salt/minion``. The public and private keys to use for salt minion can be
+specified with ``public_key`` and ``private_key`` respectively.
+
+**Internal name:** ``cc_salt_minion``
+
+**Module frequency:** per instance
+
+**Supported distros:** all
+
+**Config keys**::
+
+ salt_minion:
+ conf:
+ master: salt.example.com
+ public_key: |
+ ------BEGIN PUBLIC KEY-------
+ <key data>
+ ------END PUBLIC KEY-------
+ private_key: |
+ ------BEGIN PRIVATE KEY------
+ <key data>
+ ------END PRIVATE KEY-------
+"""
+
import os
from cloudinit import util
diff --git a/cloudinit/config/cc_scripts_per_boot.py b/cloudinit/config/cc_scripts_per_boot.py
index ee3b6c9f..0736cf7e 100644
--- a/cloudinit/config/cc_scripts_per_boot.py
+++ b/cloudinit/config/cc_scripts_per_boot.py
@@ -18,6 +18,22 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Scripts Per Boot
+----------------
+**Summary:** run per boot scripts
+
+Any scripts in the ``scripts/per-boot`` directory on the datasource will be run
+every time the system boots. Scripts will be run in alphabetical order. This
+module does not accept any config keys.
+
+**Internal name:** ``cc_scripts_per_boot``
+
+**Module frequency:** per always
+
+**Supported distros:** all
+"""
+
import os
from cloudinit import util
diff --git a/cloudinit/config/cc_scripts_per_instance.py b/cloudinit/config/cc_scripts_per_instance.py
index c0d62b12..c71d154b 100644
--- a/cloudinit/config/cc_scripts_per_instance.py
+++ b/cloudinit/config/cc_scripts_per_instance.py
@@ -18,6 +18,22 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Scripts Per Instance
+--------------------
+**Summary:** run per instance scripts
+
+Any scripts in the ``scripts/per-instance`` directory on the datasource will
+be run when a new instance is first booted. Scripts will be run in alphabetical
+order. This module does not accept any config keys.
+
+**Internal name:** ``cc_scripts_per_instance``
+
+**Module frequency:** per instance
+
+**Supported distros:** all
+"""
+
import os
from cloudinit import util
diff --git a/cloudinit/config/cc_scripts_per_once.py b/cloudinit/config/cc_scripts_per_once.py
index ecb527f6..bf637eea 100644
--- a/cloudinit/config/cc_scripts_per_once.py
+++ b/cloudinit/config/cc_scripts_per_once.py
@@ -18,6 +18,22 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Scripts Per Once
+----------------
+**Summary:** run one time scripts
+
+Any scripts in the ``scripts/per-once`` directory on the datasource will be run
+only once. Scripts will be run in alphabetical order. This module does not
+accept any config keys.
+
+**Internal name:** ``cc_scripts_per_once``
+
+**Module frequency:** per once
+
+**Supported distros:** all
+"""
+
import os
from cloudinit import util
diff --git a/cloudinit/config/cc_scripts_user.py b/cloudinit/config/cc_scripts_user.py
index 699857d1..54338a43 100644
--- a/cloudinit/config/cc_scripts_user.py
+++ b/cloudinit/config/cc_scripts_user.py
@@ -18,6 +18,25 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Scripts User
+------------
+**Summary:** run user scripts
+
+This module runs all user scripts. User scripts are not specified in the
+``scripts`` directory in the datasource, but rather are present in the
+``scripts`` dir in the instance configuration. Any cloud-config parts with a
+``#!`` will be treated as a script and run. Scripts specified as cloud-config
+parts will be run in the order they are specified in the configuration.
+This module does not accept any config keys.
+
+**Internal name:** ``cc_scripts_user``
+
+**Module frequency:** per instance
+
+**Supported distros:** all
+"""
+
import os
from cloudinit import util
diff --git a/cloudinit/config/cc_scripts_vendor.py b/cloudinit/config/cc_scripts_vendor.py
index 80bf10ff..b5777df7 100644
--- a/cloudinit/config/cc_scripts_vendor.py
+++ b/cloudinit/config/cc_scripts_vendor.py
@@ -16,6 +16,28 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Scripts Vendor
+--------------
+**Summary:** run vendor scripts
+
+Any scripts in the ``scripts/vendor`` directory in the datasource will be run
+when a new instance is first booted. Scripts will be run in alphabetical order.
+Vendor scripts can be run with an optional prefix specified in the ``prefix``
+entry under the ``vendor_data`` config key.
+
+**Internal name:** ``cc_scripts_vendor``
+
+**Module frequency:** per instance
+
+**Supporte distros:** all
+
+**Config keys**::
+
+ vendor_data:
+ prefix: <vendor data prefix>
+"""
+
import os
from cloudinit import util
diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
index 5085c23a..d84255ed 100644
--- a/cloudinit/config/cc_seed_random.py
+++ b/cloudinit/config/cc_seed_random.py
@@ -19,6 +19,58 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Seed Random
+-----------
+**Summary:** provide random seed data
+
+Since all cloud instances started from the same image will produce very similar
+data when they are first booted, as they are all starting with the same seed
+for the kernel's entropy keyring. To avoid this, random seed data can be
+provided to the instance either as a string or by specifying a command to run
+to generate the data.
+
+Configuration for this module is under the ``random_seed`` config key. The
+``file`` key specifies the path to write the data to, defaulting to
+``/dev/urandom``. Data can be passed in directly with ``data``, and may
+optionally be specified in encoded form, with the encoding specified in
+``encoding``.
+
+.. note::
+ when using a multiline value for ``data`` or specifying binary data, be
+ sure to follow yaml syntax and use the ``|`` and ``!binary`` yaml format
+ specifiers when appropriate
+
+Instead of specifying a data string, a command can be run to generate/collect
+the data to be written. The command should be specified as a list of args in
+the ``command`` key. If a command is specified that cannot be run, no error
+will be reported unless ``command_required`` is set to true.
+
+For example, to use ``pollinate`` to gather data from a
+remote entropy server and write it to ``/dev/urandom``, the following could be
+used::
+
+ random_seed:
+ file: /dev/urandom
+ command: ["pollinate", "--server=http://local.polinate.server"]
+ command_required: true
+
+**Internal name:** ``cc_seed_random``
+
+**Module frequency:** per instance
+
+**Supported distros:** all
+
+**Config keys**::
+
+ random_seed:
+ file: <file>
+ data: <random string>
+ encoding: <raw/base64/b64/gzip/gz>
+ command: [<cmd name>, <arg1>, <arg2>...]
+ command_required: <true/false>
+"""
+
import base64
import os
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index f43d8d5a..c35cefee 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -18,6 +18,32 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Set Hostname
+------------
+**Summary:** set hostname and fqdn
+
+This module handles setting the system hostname and fqdn. If
+``preserve_hostname`` is set, then the hostname will not be altered.
+
+A hostname and fqdn can be provided by specifying a full domain name under the
+``fqdn`` key. Alternatively, a hostname can be specified using the ``hostname``
+key, and the fqdn of the cloud wil be used. If a fqdn specified with the
+``hostname`` key, it will be handled properly, although it is better to use
+the ``fqdn`` config key. If both ``fqdn`` and ``hostname`` are set, ``fqdn``
+will be used.
+
+**Internal name:** per instance
+
+**Supported distros:** all
+
+**Config keys**::
+
+ perserve_hostname: <true/false>
+ fqdn: <fqdn>
+ hostname: <fqdn/hostname>
+"""
+
from cloudinit import util
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index 5c8c23b8..6fc00517 100644..100755
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -18,13 +18,55 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import sys
+"""
+Set Passwords
+-------------
+**Summary:** Set user passwords
+
+Set system passwords and enable or disable ssh password authentication.
+The ``chpasswd`` config key accepts a dictionary containing a single one of two
+keys, either ``expire`` or ``list``. If ``expire`` is specified and is set to
+``false``, then the ``password`` global config key is used as the password for
+all user accounts. If the ``expire`` key is specified and is set to ``true``
+then user passwords will be expired, preventing the default system passwords
+from being used.
+
+If the ``list`` key is provided, a list of
+``username:password`` pairs can be specified. The usernames specified
+must already exist on the system, or have been created using the
+``cc_users_groups`` module. A password can be randomly generated using
+``username:RANDOM`` or ``username:R``. Password ssh authentication can be
+enabled, disabled, or left to system defaults using ``ssh_pwauth``.
+
+.. note::
+ if using ``expire: true`` then a ssh authkey should be specified or it may
+ not be possible to login to the system
+
+**Internal name:** ``cc_set_passwords``
+
+**Module frequency:** per instance
-# Ensure this is aliased to a name not 'distros'
-# since the module attribute 'distros'
-# is a list of distros that are supported, not a sub-module
-from cloudinit import distros as ds
+**Supported distros:** all
+
+**Config keys**::
+
+ ssh_pwauth: <yes/no/unchanged>
+
+ password: password1
+ chpasswd:
+ expire: <true/false>
+
+ chpasswd:
+ list:
+ - user1:password1
+ - user2:Random
+ - user3:password3
+ - user4:R
+"""
+
+import sys
+from cloudinit.distros import ug_util
from cloudinit import ssh_util
from cloudinit import util
@@ -53,8 +95,8 @@ def handle(_name, cfg, cloud, log, args):
expire = util.get_cfg_option_bool(chfg, 'expire', expire)
if not plist and password:
- (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
- (user, _user_config) = ds.extract_default(users)
+ (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
+ (user, _user_config) = ug_util.extract_default(users)
if user:
plist = "%s:%s" % (user, password)
else:
diff --git a/cloudinit/config/cc_snap_config.py b/cloudinit/config/cc_snap_config.py
new file mode 100644
index 00000000..275a2d09
--- /dev/null
+++ b/cloudinit/config/cc_snap_config.py
@@ -0,0 +1,184 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2016 Canonical Ltd.
+#
+# Author: Ryan Harper <ryan.harper@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Snappy
+------
+**Summary:** snap_config modules allows configuration of snapd.
+
+This module uses the same ``snappy`` namespace for configuration but
+acts only only a subset of the configuration.
+
+If ``assertions`` is set and the user has included a list of assertions
+then cloud-init will collect the assertions into a single assertion file
+and invoke ``snap ack <path to file with assertions>`` which will attempt
+to load the provided assertions into the snapd assertion database.
+
+If ``email`` is set, this value is used to create an authorized user for
+contacting and installing snaps from the Ubuntu Store. This is done by
+calling ``snap create-user`` command.
+
+If ``known`` is set to True, then it is expected the user also included
+an assertion of type ``system-user``. When ``snap create-user`` is called
+cloud-init will append '--known' flag which instructs snapd to look for
+a system-user assertion with the details. If ``known`` is not set, then
+``snap create-user`` will contact the Ubuntu SSO for validating and importing
+a system-user for the instance.
+
+.. note::
+ If the system is already managed, then cloud-init will not attempt to
+ create a system-user.
+
+**Internal name:** ``cc_snap_config``
+
+**Module frequency:** per instance
+
+**Supported distros:** any with 'snapd' available
+
+**Config keys**::
+
+ #cloud-config
+ snappy:
+ assertions:
+ - |
+ <assertion 1>
+ - |
+ <assertion 2>
+ email: user@user.org
+ known: true
+
+"""
+
+from cloudinit import log as logging
+from cloudinit.settings import PER_INSTANCE
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+frequency = PER_INSTANCE
+SNAPPY_CMD = "snap"
+ASSERTIONS_FILE = "/var/lib/cloud/instance/snapd.assertions"
+
+
+"""
+snappy:
+ assertions:
+ - |
+ <snap assertion 1>
+ - |
+ <snap assertion 2>
+ email: foo@foo.io
+ known: true
+"""
+
+
+def add_assertions(assertions=None):
+ """Import list of assertions.
+
+ Import assertions by concatenating each assertion into a
+ string separated by a '\n'. Write this string to a instance file and
+ then invoke `snap ack /path/to/file` and check for errors.
+ If snap exits 0, then all assertions are imported.
+ """
+ if not assertions:
+ assertions = []
+
+ if not isinstance(assertions, list):
+ raise ValueError('assertion parameter was not a list: %s', assertions)
+
+ snap_cmd = [SNAPPY_CMD, 'ack']
+ combined = "\n".join(assertions)
+ if len(combined) == 0:
+ raise ValueError("Assertion list is empty")
+
+ for asrt in assertions:
+ LOG.debug('Acking: %s', asrt.split('\n')[0:2])
+
+ util.write_file(ASSERTIONS_FILE, combined.encode('utf-8'))
+ util.subp(snap_cmd + [ASSERTIONS_FILE], capture=True)
+
+
+def add_snap_user(cfg=None):
+ """Add a snap system-user if provided with email under snappy config.
+
+ - Check that system is not already managed.
+ - Check that if using a system-user assertion, that it's
+ imported into snapd.
+
+ Returns a dictionary to be passed to Distro.create_user
+ """
+
+ if not cfg:
+ cfg = {}
+
+ if not isinstance(cfg, dict):
+ raise ValueError('configuration parameter was not a dict: %s', cfg)
+
+ snapuser = cfg.get('email', None)
+ if not snapuser:
+ return
+
+ usercfg = {
+ 'snapuser': snapuser,
+ 'known': cfg.get('known', False),
+ }
+
+ # query if we're already registered
+ out, _ = util.subp([SNAPPY_CMD, 'managed'], capture=True)
+ if out.strip() == "true":
+ LOG.warning('This device is already managed. '
+ 'Skipping system-user creation')
+ return
+
+ if usercfg.get('known'):
+ # Check that we imported a system-user assertion
+ out, _ = util.subp([SNAPPY_CMD, 'known', 'system-user'],
+ capture=True)
+ if len(out) == 0:
+ LOG.error('Missing "system-user" assertion. '
+ 'Check "snappy" user-data assertions.')
+ return
+
+ return usercfg
+
+
+def handle(name, cfg, cloud, log, args):
+ cfgin = cfg.get('snappy')
+ if not cfgin:
+ LOG.debug('No snappy config provided, skipping')
+ return
+
+ if not(util.system_is_snappy()):
+ LOG.debug("%s: system not snappy", name)
+ return
+
+ assertions = cfgin.get('assertions', [])
+ if len(assertions) > 0:
+ LOG.debug('Importing user-provided snap assertions')
+ add_assertions(assertions)
+
+ # Create a snap user if requested.
+ # Snap systems contact the store with a user's email
+ # and extract information needed to create a local user.
+ # A user may provide a 'system-user' assertion which includes
+ # the required information. Using such an assertion to create
+ # a local user requires specifying 'known: true' in the supplied
+ # user-data.
+ usercfg = add_snap_user(cfg=cfgin)
+ if usercfg:
+ cloud.distro.create_user(usercfg.get('snapuser'), **usercfg)
diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index 6bcd8382..e03ec483 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -1,49 +1,76 @@
# vi: ts=4 expandtab
#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
"""
-snappy modules allows configuration of snappy.
-Example config:
- #cloud-config
- snappy:
- system_snappy: auto
- ssh_enabled: auto
- packages: [etcd, pkg2.smoser]
- config:
- pkgname:
- key2: value2
- pkg2:
- key1: value1
- packages_dir: '/writable/user-data/cloud-init/snaps'
-
- - ssh_enabled:
- This controls the system's ssh service. The default value is 'auto'.
- True: enable ssh service
- False: disable ssh service
- auto: enable ssh service if either ssh keys have been provided
- or user has requested password authentication (ssh_pwauth).
-
- - snap installation and config
- The above would install 'etcd', and then install 'pkg2.smoser' with a
- '<config-file>' argument where 'config-file' has 'config-blob' inside it.
- If 'pkgname' is installed already, then 'snappy config pkgname <file>'
- will be called where 'file' has 'pkgname-config-blob' as its content.
-
- Entries in 'config' can be namespaced or non-namespaced for a package.
- In either case, the config provided to snappy command is non-namespaced.
- The package name is provided as it appears.
-
- If 'packages_dir' has files in it that end in '.snap', then they are
- installed. Given 3 files:
- <packages_dir>/foo.snap
- <packages_dir>/foo.config
- <packages_dir>/bar.snap
- cloud-init will invoke:
- snappy install <packages_dir>/foo.snap <packages_dir>/foo.config
- snappy install <packages_dir>/bar.snap
-
- Note, that if provided a 'config' entry for 'ubuntu-core', then
- cloud-init will invoke: snappy config ubuntu-core <config>
- Allowing you to configure ubuntu-core in this way.
+Snappy
+------
+**Summary:** snappy modules allows configuration of snappy.
+
+The below example config config would install ``etcd``, and then install
+``pkg2.smoser`` with a ``<config-file>`` argument where ``config-file`` has
+``config-blob`` inside it. If ``pkgname`` is installed already, then
+``snappy config pkgname <file>``
+will be called where ``file`` has ``pkgname-config-blob`` as its content.
+
+Entries in ``config`` can be namespaced or non-namespaced for a package.
+In either case, the config provided to snappy command is non-namespaced.
+The package name is provided as it appears.
+
+If ``packages_dir`` has files in it that end in ``.snap``, then they are
+installed. Given 3 files:
+
+ - <packages_dir>/foo.snap
+ - <packages_dir>/foo.config
+ - <packages_dir>/bar.snap
+
+cloud-init will invoke:
+
+ - snappy install <packages_dir>/foo.snap <packages_dir>/foo.config
+ - snappy install <packages_dir>/bar.snap
+
+.. note::
+ that if provided a ``config`` entry for ``ubuntu-core``, then
+ cloud-init will invoke: snappy config ubuntu-core <config>
+ Allowing you to configure ubuntu-core in this way.
+
+The ``ssh_enabled`` key controls the system's ssh service. The default value
+is ``auto``. Options are:
+
+ - **True:** enable ssh service
+ - **False:** disable ssh service
+ - **auto:** enable ssh service if either ssh keys have been provided
+ or user has requested password authentication (ssh_pwauth).
+
+**Internal name:** ``cc_snappy``
+
+**Module frequency:** per instance
+
+**Supported distros:** ubuntu
+
+**Config keys**::
+
+ #cloud-config
+ snappy:
+ system_snappy: auto
+ ssh_enabled: auto
+ packages: [etcd, pkg2.smoser]
+ config:
+ pkgname:
+ key2: value2
+ pkg2:
+ key1: value1
+ packages_dir: '/writable/user-data/cloud-init/snaps'
"""
from cloudinit import log as logging
@@ -230,24 +257,14 @@ def disable_enable_ssh(enabled):
util.write_file(not_to_be_run, "cloud-init\n")
-def system_is_snappy():
- # channel.ini is configparser loadable.
- # snappy will move to using /etc/system-image/config.d/*.ini
- # this is certainly not a perfect test, but good enough for now.
- content = util.load_file("/etc/system-image/channel.ini", quiet=True)
- if 'ubuntu-core' in content.lower():
- return True
- if os.path.isdir("/etc/system-image/config.d/"):
- return True
- return False
-
-
def set_snappy_command():
global SNAPPY_CMD
if util.which("snappy-go"):
SNAPPY_CMD = "snappy-go"
- else:
+ elif util.which("snappy"):
SNAPPY_CMD = "snappy"
+ else:
+ SNAPPY_CMD = "snap"
LOG.debug("snappy command is '%s'", SNAPPY_CMD)
@@ -262,7 +279,7 @@ def handle(name, cfg, cloud, log, args):
LOG.debug("%s: System is not snappy. disabling", name)
return
- if sys_snappy.lower() == "auto" and not(system_is_snappy()):
+ if sys_snappy.lower() == "auto" and not(util.system_is_snappy()):
LOG.debug("%s: 'auto' mode, and system not snappy", name)
return
diff --git a/cloudinit/config/cc_spacewalk.py b/cloudinit/config/cc_spacewalk.py
index f3c1a664..99b63a84 100644
--- a/cloudinit/config/cc_spacewalk.py
+++ b/cloudinit/config/cc_spacewalk.py
@@ -13,15 +13,30 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
-**Summary:** helper to setup https://fedorahosted.org/spacewalk/
+Spacewalk
+---------
+**Summary:** install and configure spacewalk
-**Description:** This module will enable for configuring the needed
-actions to setup spacewalk on redhat based systems.
+This module installs spacewalk and applies basic configuration. If the
+``spacewalk`` config key is present spacewalk will be installed. The server to
+connect to after installation must be provided in the ``server`` in spacewalk
+configuration. A proxy to connect through and a activation key may optionally
+be specified.
-It can be configured with the following option structure::
+For more information about spacewalk see: https://fedorahosted.org/spacewalk/
+
+**Internal name:** ``cc_spacewalk``
+
+**Module frequency:** per instance
+
+**Supported distros:** redhat, fedora
+
+**Config keys**::
spacewalk:
- server: spacewalk api server (required)
+ server: <url>
+ proxy: <proxy host>
+ activation_key: <key>
"""
from cloudinit import util
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index cb9b70aa..576fa58a 100644..100755
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -18,15 +18,98 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+SSH
+---
+**Summary:** configure ssh and ssh keys
+
+This module handles most configuration for ssh and ssh keys. Many images have
+default ssh keys, which can be removed using ``ssh_deletekeys``. Since removing
+default keys is usually the desired behavior this option is enabled by default.
+
+Keys can be added using the ``ssh_keys`` configuration key. The argument to
+this config key should be a dictionary entries for the public and private keys
+of each desired key type. Entries in the ``ssh_keys`` config dict should
+have keys in the format ``<key type>_private`` and ``<key type>_public``, e.g.
+``rsa_private: <key>`` and ``rsa_public: <key>``. See below for supported key
+types. Not all key types have to be specified, ones left unspecified will not
+be used. If this config option is used, then no keys will be generated.
+
+.. note::
+ when specifying private keys in cloud-config, care should be taken to
+ ensure that the communication between the data source and the instance is
+ secure
+
+.. note::
+ to specify multiline private keys, use yaml multiline syntax
+
+If no keys are specified using ``ssh_keys``, then keys will be generated using
+``ssh-keygen``. By default one public/private pair of each supported key type
+will be generated. The key types to generate can be specified using the
+``ssh_genkeytypes`` config flag, which accepts a list of key types to use. For
+each key type for which this module has been instructed to create a keypair, if
+a key of the same type is already present on the system (i.e. if
+``ssh_deletekeys`` was false), no key will be generated.
+
+Supported key types for the ``ssh_keys`` and the ``ssh_genkeytypes`` config
+flags are:
+
+ - rsa
+ - dsa
+ - ecdsa
+ - ed25519
+
+Root login can be enabled/disabled using the ``disable_root`` config key. Root
+login options can be manually specified with ``disable_root_opts``. If
+``disable_root_opts`` is specified and contains the string ``$USER``,
+it will be replaced with the username of the default user. By default,
+root login is disabled, and root login opts are set to::
+
+ no-port-forwarding,no-agent-forwarding,no-X11-forwarding
+
+Authorized keys for the default user/first user defined in ``users`` can be
+specified using `ssh_authorized_keys``. Keys should be specified as a list of
+public keys.
+
+.. note::
+ see the ``cc_set_passwords`` module documentation to enable/disable ssh
+ password authentication
+
+**Internal name:** ``cc_ssh``
+
+**Module frequency:** per instance
+
+**Supported distros:** all
+
+**Config keys**::
+
+ ssh_deletekeys: <true/false>
+ ssh_keys:
+ rsa_private: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qco
+ ...
+ -----END RSA PRIVATE KEY-----
+ rsa_public: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7Xd ...
+ dsa_private: |
+ -----BEGIN DSA PRIVATE KEY-----
+ MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qco
+ ...
+ -----END DSA PRIVATE KEY-----
+ dsa_public: ssh-dsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7Xd ...
+ ssh_genkeytypes: <key type>
+ disable_root: <true/false>
+ disable_root_opts: <disable root options string>
+ ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUU ...
+ - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZ ...
+"""
+
import glob
import os
import sys
-# Ensure this is aliased to a name not 'distros'
-# since the module attribute 'distros'
-# is a list of distros that are supported, not a sub-module
-from cloudinit import distros as ds
-
+from cloudinit.distros import ug_util
from cloudinit import ssh_util
from cloudinit import util
@@ -110,8 +193,8 @@ def handle(_name, cfg, cloud, log, _args):
"file %s", keytype, keyfile)
try:
- (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
- (user, _user_config) = ds.extract_default(users)
+ (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
+ (user, _user_config) = ug_util.extract_default(users)
disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
DISABLE_ROOT_OPTS)
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
index 6ce831bc..7eeb0f84 100644..100755
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -16,16 +16,33 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+SSH Authkey Fingerprints
+------------------------
+**Summary:** log fingerprints of user ssh keys
+
+Write fingerprints of authorized keys for each user to log. This is enabled by
+default, but can be disabled using ``no_ssh_fingerprints``. The hash type for
+the keys can be specified, but defaults to ``md5``.
+
+**Internal name:** `` cc_ssh_authkey_fingerprints``
+
+**Module frequency:** per instance
+
+**Supported distros:** all
+
+**Config keys**::
+
+ no_ssh_fingerprints: <true/false>
+ authkey_hash: <hash type>
+"""
+
import base64
import hashlib
from prettytable import PrettyTable
-# Ensure this is aliased to a name not 'distros'
-# since the module attribute 'distros'
-# is a list of distros that are supported, not a sub-module
-from cloudinit import distros as ds
-
+from cloudinit.distros import ug_util
from cloudinit import ssh_util
from cloudinit import util
@@ -98,7 +115,7 @@ def handle(name, cfg, cloud, log, _args):
return
hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5")
- (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
+ (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
for (user_name, _cfg) in users.items():
(key_fn, key_entries) = ssh_util.extract_authorized_keys(user_name)
_pprint_key_entries(user_name, key_fn,
diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
index 28c4585b..1be96dc5 100644..100755
--- a/cloudinit/config/cc_ssh_import_id.py
+++ b/cloudinit/config/cc_ssh_import_id.py
@@ -18,11 +18,31 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-# Ensure this is aliased to a name not 'distros'
-# since the module attribute 'distros'
-# is a list of distros that are supported, not a sub-module
-from cloudinit import distros as ds
+"""
+SSH Import Id
+-------------
+**Summary:** import ssh id
+This module imports ssh keys from either a public keyserver, usually launchpad
+or github using ``ssh-import-id``. Keys are referenced by the username they are
+associated with on the keyserver. The keyserver can be specified by prepending
+either ``lp:`` for launchpad or ``gh:`` for github to the username.
+
+**Internal name:** ``cc_ssh_import_id``
+
+**Module frequency:** per instance
+
+**Supported distros:** ubuntu, debian
+
+**Config keys**::
+
+ ssh_import_id:
+ - user
+ - gh:user
+ - lp:user
+"""
+
+from cloudinit.distros import ug_util
from cloudinit import util
import pwd
@@ -43,7 +63,7 @@ def handle(_name, cfg, cloud, log, args):
return
# import for cloudinit created users
- (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
+ (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
elist = []
for (user, user_cfg) in users.items():
import_ids = []
diff --git a/cloudinit/config/cc_timezone.py b/cloudinit/config/cc_timezone.py
index b9eb85b2..7024b07b 100644
--- a/cloudinit/config/cc_timezone.py
+++ b/cloudinit/config/cc_timezone.py
@@ -18,6 +18,26 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Timezone
+--------
+**Summary:** set system timezone
+
+Set the system timezone. If any args are passed to the module then the first
+will be used for the timezone. Otherwise, the module will attempt to retrieve
+the timezone from cloud config.
+
+**Internal name:** ``cc_timezone``
+
+**Module frequency:** per instance
+
+**Supported distros:** all
+
+**Config keys**::
+
+ timezone: <timezone>
+"""
+
from cloudinit import util
from cloudinit.settings import PER_INSTANCE
diff --git a/cloudinit/config/cc_ubuntu_init_switch.py b/cloudinit/config/cc_ubuntu_init_switch.py
index bffb4380..31a96e4a 100644
--- a/cloudinit/config/cc_ubuntu_init_switch.py
+++ b/cloudinit/config/cc_ubuntu_init_switch.py
@@ -17,27 +17,33 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
+Ubuntu Init Switch
+------------------
**Summary:** reboot system into another init.
-**Description:** This module provides a way for the user to boot with systemd
-even if the image is set to boot with upstart. It should be run as one of the
-first ``cloud_init_modules``, and will switch the init system and then issue a
-reboot. The next boot will come up in the target init system and no action will
-be taken.
+This module provides a way for the user to boot with systemd even if the image
+is set to boot with upstart. It should be run as one of the first
+``cloud_init_modules``, and will switch the init system and then issue a
+reboot. The next boot will come up in the target init system and no action
+will be taken. This should be inert on non-ubuntu systems, and also
+exit quickly.
-This should be inert on non-ubuntu systems, and also exit quickly.
+.. note::
+ best effort is made, but it's possible this system will break, and probably
+ won't interact well with any other mechanism you've used to switch the init
+ system.
+
+**Internal name:** ``cc_ubuntu_init_switch``
+
+**Module frequency:** once per instance
+
+**Supported distros:** ubuntu
-It can be configured with the following option structure::
+**Config keys**::
init_switch:
target: systemd (can be 'systemd' or 'upstart')
reboot: true (reboot if a change was made, or false to not reboot)
-
-.. note::
-
- Best effort is made, but it's possible
- this system will break, and probably won't interact well with any other
- mechanism you've used to switch the init system.
"""
from cloudinit.distros import ubuntu
diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py
index 15703efe..3fcb2652 100644
--- a/cloudinit/config/cc_update_etc_hosts.py
+++ b/cloudinit/config/cc_update_etc_hosts.py
@@ -18,6 +18,49 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Update Etc Hosts
+----------------
+**Summary:** update ``/etc/hosts``
+
+This module will update the contents of ``/etc/hosts`` based on the
+hostname/fqdn specified in config. Management of ``/etc/hosts`` is controlled
+using ``manage_etc_hosts``. If this is set to false, cloud-init will not manage
+``/etc/hosts`` at all. This is the default behavior.
+
+If set to ``true`` or ``template``, cloud-init will generate ``/etc/hosts``
+using the template located in ``/etc/cloud/templates/hosts.tmpl``. In the
+``/etc/cloud/templates/hosts.tmpl`` template, the strings ``$hostname`` and
+``$fqdn`` will be replaced with the hostname and fqdn respectively.
+
+If ``manage_etc_hosts`` is set to ``localhost``, then cloud-init will not
+rewrite ``/etc/hosts`` entirely, but rather will ensure that a entry for the
+fqdn with ip ``127.0.1.1`` is present in ``/etc/hosts`` (i.e.
+``ping <hostname>`` will ping ``127.0.1.1``).
+
+.. note::
+ if ``manage_etc_hosts`` is set ``true`` or ``template``, the contents
+ of ``/etc/hosts`` will be updated every boot. to make any changes to
+ ``/etc/hosts`` persistant they must be made in
+ ``/etc/cloud/templates/hosts.tmpl``
+
+.. note::
+ for instructions on specifying hostname and fqdn, see documentation for
+ ``cc_set_hostname``
+
+**Internal name:** ``cc_update_etc_hosts``
+
+**Module frequency:** per always
+
+**Supported distros:** all
+
+**Config keys**::
+
+ manage_etc_hosts: <true/"template"/false/"localhost">
+ fqdn: <fqdn>
+ hostname: <fqdn/hostname>
+"""
+
from cloudinit import templater
from cloudinit import util
diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py
index 5b78afe1..315b7b6f 100644
--- a/cloudinit/config/cc_update_hostname.py
+++ b/cloudinit/config/cc_update_hostname.py
@@ -18,6 +18,31 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Update Hostname
+---------------
+**Summary:** update hostname and fqdn
+
+This module will update the system hostname and fqdn. If ``preserve_hostname``
+is set, then the hostname will not be altered.
+
+.. note::
+ for instructions on specifying hostname and fqdn, see documentation for
+ ``cc_set_hostname``
+
+**Internal name:** ``cc_update_hostname``
+
+**Module frequency:** per always
+
+**Supported distros:** all
+
+**Config keys**::
+
+ preserve_hostname: <true/false>
+ fqdn: <fqdn>
+ hostname: <fqdn/hostname>
+"""
+
import os
from cloudinit.settings import PER_ALWAYS
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
index bf5b4581..36352362 100644
--- a/cloudinit/config/cc_users_groups.py
+++ b/cloudinit/config/cc_users_groups.py
@@ -16,10 +16,90 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Users and Groups
+----------------
+**Summary:** configure users and groups
+
+This module configures users and groups. For more detailed information on user
+options, see the ``Including users and groups`` config example.
+
+Groups to add to the system can be specified as a list under the ``groups``
+key. Each entry in the list should either contain a the group name as a string,
+or a dictionary with the group name as the key and a list of users who should
+be members of the group as the value.
+
+The ``users`` config key takes a list of users to configure. The first entry in
+this list is used as the default user for the system. To preserve the standard
+default user for the distro, the string ``default`` may be used as the first
+entry of the ``users`` list. Each entry in the ``users`` list, other than a
+``default`` entry, should be a dictionary of options for the user. Supported
+config keys for an entry in ``users`` are as follows:
+
+ - ``name``: The user's login name
+ - ``homedir``: Optional. Home dir for user. Default is ``/home/<username>``
+ - ``primary-group``: Optional. Primary group for user. Default to new group
+ named after user.
+ - ``groups``: Optional. Additional groups to add the user to. Default: none
+ - ``selinux-user``: Optional. SELinux user for user's login. Default to
+ default SELinux user.
+ - ``lock_passwd``: Optional. Disable password login. Default: true
+ - ``inactive``: Optional. Mark user inactive. Default: false
+ - ``passwd``: Hash of user password
+ - ``no-create-home``: Optional. Do not create home directory. Default:
+ false
+ - ``no-user-group``: Optional. Do not create group named after user.
+ Default: false
+ - ``no-log-init``: Optional. Do not initialize lastlog and faillog for
+ user. Default: false
+ - ``ssh-import-id``: Optional. SSH id to import for user. Default: none
+ - ``ssh-autorized-keys``: Optional. List of ssh keys to add to user's
+ authkeys file. Default: none
+ - ``sudo``: Optional. Sudo rule to use, or list of sudo rules to use.
+ Default: none.
+ - ``system``: Optional. Create user as system user with no home directory.
+ Default: false
+
+.. note::
+ Specifying a hash of a user's password with ``passwd`` is a security risk
+ if the cloud-config can be intercepted. SSH authentication is preferred.
+
+.. note::
+ If specifying a sudo rule for a user, ensure that the syntax for the rule
+ is valid, as it is not checked by cloud-init.
+
+**Internal name:** ``cc_users_groups``
+
+**Module frequency:** per instance
+
+**Supported distros:** all
+
+**Config keys**::
+
+ groups:
+ - ubuntu: [foo, bar]
+ - cloud-users
+
+ users:
+ - default
+ - name: <username>
+ gecos: <real name>
+ primary-group: <primary group>
+ groups: <additional groups>
+ selinux-user: <selinux username>
+ expiredate: <date>
+ ssh-import-id: <none/id>
+ lock_passwd: <true/false>
+ passwd: <password>
+ sudo: <sudo config>
+ inactive: <true/false>
+ system: <true/false>
+"""
+
# Ensure this is aliased to a name not 'distros'
# since the module attribute 'distros'
# is a list of distros that are supported, not a sub-module
-from cloudinit import distros as ds
+from cloudinit.distros import ug_util
from cloudinit.settings import PER_INSTANCE
@@ -27,7 +107,7 @@ frequency = PER_INSTANCE
def handle(name, cfg, cloud, _log, _args):
- (users, groups) = ds.normalize_users_groups(cfg, cloud.distro)
+ (users, groups) = ug_util.normalize_users_groups(cfg, cloud.distro)
for (name, members) in groups.items():
cloud.distro.create_group(name, members)
for (user, config) in users.items():
diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index b1096b9b..b5956bda 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -16,6 +16,48 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Write Files
+-----------
+**Summary:** write arbitrary files
+
+Write out arbitrary content to files, optionally setting permissions. Content
+can be specified in plain text or binary. Data encoded with either base64 or
+binary gzip data can be specified and will be decoded before being written.
+
+.. note::
+ if multiline data is provided, care should be taken to ensure that it
+ follows yaml formatting standargs. to specify binary data, use the yaml
+ option ``!!binary``
+
+**Internal name:** ``cc_write_files``
+
+**Module frequency:** per instance
+
+**Supported distros:** all
+
+**Config keys**::
+
+ write_files:
+ - encoding: b64
+ content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4...
+ owner: root:root
+ path: /etc/sysconfig/selinux
+ permissions: '0644'
+ - content: |
+ # My new /etc/sysconfig/samba file
+
+ SMDBOPTIONS="-D"
+ path: /etc/sysconfig/samba
+ - content: !!binary |
+ f0VMRgIBAQAAAAAAAAAAAAIAPgABAAAAwARAAAAAAABAAAAAAAAAAJAVAAAAAA
+ AEAAHgAdAAYAAAAFAAAAQAAAAAAAAABAAEAAAAAAAEAAQAAAAAAAwAEAAAAAAA
+ AAAAAAAAAwAAAAQAAAAAAgAAAAAAAAACQAAAAAAAAAJAAAAAAAAcAAAAAAAAAB
+ ...
+ path: /bin/arch
+ permissions: '0555'
+"""
+
import base64
import os
import six
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index 22549e62..eb94c1f3 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -16,6 +16,32 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+Yum Add Repo
+------------
+**Summary:** add yum repository configuration to the system
+
+Add yum repository configuration to ``/etc/yum.repos.d``. Configuration files
+are named based on the dictionary key under the ``yum_repos`` they are
+specified with. If a config file already exists with the same name as a config
+entry, the config entry will be skipped.
+
+**Internal name:** ``cc_yum_add_repo``
+
+**Module frequency:** per always
+
+**Supported distros:** fedora, rhel
+
+**Config keys**::
+
+ yum_repos:
+ <repo-name>:
+ baseurl: <repo url>
+ name: <repo name>
+ enabled: <true/false>
+ # any repository configuration options (see man yum.conf)
+"""
+
import os
import configobj
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index b1192e84..4a726430 100644..100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -367,6 +367,9 @@ class Distro(object):
adduser_cmd = ['useradd', name]
log_adduser_cmd = ['useradd', name]
+ if util.system_is_snappy():
+ adduser_cmd.append('--extrausers')
+ log_adduser_cmd.append('--extrausers')
# Since we are creating users, we want to carefully validate the
# inputs. If something goes wrong, we can end up with a system
@@ -445,6 +448,32 @@ class Distro(object):
util.logexc(LOG, "Failed to create user %s", name)
raise e
+ def add_snap_user(self, name, **kwargs):
+ """
+ Add a snappy user to the system using snappy tools
+ """
+
+ snapuser = kwargs.get('snapuser')
+ known = kwargs.get('known', False)
+ adduser_cmd = ["snap", "create-user", "--sudoer", "--json"]
+ if known:
+ adduser_cmd.append("--known")
+ adduser_cmd.append(snapuser)
+
+ # Run the command
+ LOG.debug("Adding snap user %s", name)
+ try:
+ (out, err) = util.subp(adduser_cmd, logstring=adduser_cmd,
+ capture=True)
+ LOG.debug("snap create-user returned: %s:%s", out, err)
+ jobj = util.load_json(out)
+ username = jobj.get('username', None)
+ except Exception as e:
+ util.logexc(LOG, "Failed to create snap user %s", name)
+ raise e
+
+ return username
+
def create_user(self, name, **kwargs):
"""
Creates users for the system using the GNU passwd tools. This
@@ -452,6 +481,10 @@ class Distro(object):
distros where useradd is not desirable or not available.
"""
+ # Add a snap user, if requested
+ if 'snapuser' in kwargs:
+ return self.add_snap_user(name, **kwargs)
+
# Add the user
self.add_user(name, **kwargs)
@@ -602,6 +635,8 @@ class Distro(object):
def create_group(self, name, members=None):
group_add_cmd = ['groupadd', name]
+ if util.system_is_snappy():
+ group_add_cmd.append('--extrausers')
if not members:
members = []
@@ -685,275 +720,6 @@ def _get_arch_package_mirror_info(package_mirrors, arch):
return default
-# Normalizes a input group configuration
-# which can be a comma seperated list of
-# group names, or a list of group names
-# or a python dictionary of group names
-# to a list of members of that group.
-#
-# The output is a dictionary of group
-# names => members of that group which
-# is the standard form used in the rest
-# of cloud-init
-def _normalize_groups(grp_cfg):
- if isinstance(grp_cfg, six.string_types):
- grp_cfg = grp_cfg.strip().split(",")
- if isinstance(grp_cfg, list):
- c_grp_cfg = {}
- for i in grp_cfg:
- if isinstance(i, dict):
- for k, v in i.items():
- if k not in c_grp_cfg:
- if isinstance(v, list):
- c_grp_cfg[k] = list(v)
- elif isinstance(v, six.string_types):
- c_grp_cfg[k] = [v]
- else:
- raise TypeError("Bad group member type %s" %
- type_utils.obj_name(v))
- else:
- if isinstance(v, list):
- c_grp_cfg[k].extend(v)
- elif isinstance(v, six.string_types):
- c_grp_cfg[k].append(v)
- else:
- raise TypeError("Bad group member type %s" %
- type_utils.obj_name(v))
- elif isinstance(i, six.string_types):
- if i not in c_grp_cfg:
- c_grp_cfg[i] = []
- else:
- raise TypeError("Unknown group name type %s" %
- type_utils.obj_name(i))
- grp_cfg = c_grp_cfg
- groups = {}
- if isinstance(grp_cfg, dict):
- for (grp_name, grp_members) in grp_cfg.items():
- groups[grp_name] = util.uniq_merge_sorted(grp_members)
- else:
- raise TypeError(("Group config must be list, dict "
- " or string types only and not %s") %
- type_utils.obj_name(grp_cfg))
- return groups
-
-
-# Normalizes a input group configuration
-# which can be a comma seperated list of
-# user names, or a list of string user names
-# or a list of dictionaries with components
-# that define the user config + 'name' (if
-# a 'name' field does not exist then the
-# default user is assumed to 'own' that
-# configuration.
-#
-# The output is a dictionary of user
-# names => user config which is the standard
-# form used in the rest of cloud-init. Note
-# the default user will have a special config
-# entry 'default' which will be marked as true
-# all other users will be marked as false.
-def _normalize_users(u_cfg, def_user_cfg=None):
- if isinstance(u_cfg, dict):
- ad_ucfg = []
- for (k, v) in u_cfg.items():
- if isinstance(v, (bool, int, float) + six.string_types):
- if util.is_true(v):
- ad_ucfg.append(str(k))
- elif isinstance(v, dict):
- v['name'] = k
- ad_ucfg.append(v)
- else:
- raise TypeError(("Unmappable user value type %s"
- " for key %s") % (type_utils.obj_name(v), k))
- u_cfg = ad_ucfg
- elif isinstance(u_cfg, six.string_types):
- u_cfg = util.uniq_merge_sorted(u_cfg)
-
- users = {}
- for user_config in u_cfg:
- if isinstance(user_config, (list,) + six.string_types):
- for u in util.uniq_merge(user_config):
- if u and u not in users:
- users[u] = {}
- elif isinstance(user_config, dict):
- if 'name' in user_config:
- n = user_config.pop('name')
- prev_config = users.get(n) or {}
- users[n] = util.mergemanydict([prev_config,
- user_config])
- else:
- # Assume the default user then
- prev_config = users.get('default') or {}
- users['default'] = util.mergemanydict([prev_config,
- user_config])
- else:
- raise TypeError(("User config must be dictionary/list "
- " or string types only and not %s") %
- type_utils.obj_name(user_config))
-
- # Ensure user options are in the right python friendly format
- if users:
- c_users = {}
- for (uname, uconfig) in users.items():
- c_uconfig = {}
- for (k, v) in uconfig.items():
- k = k.replace('-', '_').strip()
- if k:
- c_uconfig[k] = v
- c_users[uname] = c_uconfig
- users = c_users
-
- # Fixup the default user into the real
- # default user name and replace it...
- def_user = None
- if users and 'default' in users:
- def_config = users.pop('default')
- if def_user_cfg:
- # Pickup what the default 'real name' is
- # and any groups that are provided by the
- # default config
- def_user_cfg = def_user_cfg.copy()
- def_user = def_user_cfg.pop('name')
- def_groups = def_user_cfg.pop('groups', [])
- # Pickup any config + groups for that user name
- # that we may have previously extracted
- parsed_config = users.pop(def_user, {})
- parsed_groups = parsed_config.get('groups', [])
- # Now merge our extracted groups with
- # anything the default config provided
- users_groups = util.uniq_merge_sorted(parsed_groups, def_groups)
- parsed_config['groups'] = ",".join(users_groups)
- # The real config for the default user is the
- # combination of the default user config provided
- # by the distro, the default user config provided
- # by the above merging for the user 'default' and
- # then the parsed config from the user's 'real name'
- # which does not have to be 'default' (but could be)
- users[def_user] = util.mergemanydict([def_user_cfg,
- def_config,
- parsed_config])
-
- # Ensure that only the default user that we
- # found (if any) is actually marked as being
- # the default user
- if users:
- for (uname, uconfig) in users.items():
- if def_user and uname == def_user:
- uconfig['default'] = True
- else:
- uconfig['default'] = False
-
- return users
-
-
-# Normalizes a set of user/users and group
-# dictionary configuration into a useable
-# format that the rest of cloud-init can
-# understand using the default user
-# provided by the input distrobution (if any)
-# to allow for mapping of the 'default' user.
-#
-# Output is a dictionary of group names -> [member] (list)
-# and a dictionary of user names -> user configuration (dict)
-#
-# If 'user' exists it will override
-# the 'users'[0] entry (if a list) otherwise it will
-# just become an entry in the returned dictionary (no override)
-def normalize_users_groups(cfg, distro):
- if not cfg:
- cfg = {}
-
- users = {}
- groups = {}
- if 'groups' in cfg:
- groups = _normalize_groups(cfg['groups'])
-
- # Handle the previous style of doing this where the first user
- # overrides the concept of the default user if provided in the user: XYZ
- # format.
- old_user = {}
- if 'user' in cfg and cfg['user']:
- old_user = cfg['user']
- # Translate it into the format that is more useful
- # going forward
- if isinstance(old_user, six.string_types):
- old_user = {
- 'name': old_user,
- }
- if not isinstance(old_user, dict):
- LOG.warn(("Format for 'user' key must be a string or "
- "dictionary and not %s"), type_utils.obj_name(old_user))
- old_user = {}
-
- # If no old user format, then assume the distro
- # provides what the 'default' user maps to, but notice
- # that if this is provided, we won't automatically inject
- # a 'default' user into the users list, while if a old user
- # format is provided we will.
- distro_user_config = {}
- try:
- distro_user_config = distro.get_default_user()
- except NotImplementedError:
- LOG.warn(("Distro has not implemented default user "
- "access. No distribution provided default user"
- " will be normalized."))
-
- # Merge the old user (which may just be an empty dict when not
- # present with the distro provided default user configuration so
- # that the old user style picks up all the distribution specific
- # attributes (if any)
- default_user_config = util.mergemanydict([old_user, distro_user_config])
-
- base_users = cfg.get('users', [])
- if not isinstance(base_users, (list, dict) + six.string_types):
- LOG.warn(("Format for 'users' key must be a comma separated string"
- " or a dictionary or a list and not %s"),
- type_utils.obj_name(base_users))
- base_users = []
-
- if old_user:
- # Ensure that when user: is provided that this user
- # always gets added (as the default user)
- if isinstance(base_users, list):
- # Just add it on at the end...
- base_users.append({'name': 'default'})
- elif isinstance(base_users, dict):
- base_users['default'] = dict(base_users).get('default', True)
- elif isinstance(base_users, six.string_types):
- # Just append it on to be re-parsed later
- base_users += ",default"
-
- users = _normalize_users(base_users, default_user_config)
- return (users, groups)
-
-
-# Given a user dictionary config it will
-# extract the default user name and user config
-# from that list and return that tuple or
-# return (None, None) if no default user is
-# found in the given input
-def extract_default(users, default_name=None, default_config=None):
- if not users:
- users = {}
-
- def safe_find(entry):
- config = entry[1]
- if not config or 'default' not in config:
- return False
- else:
- return config['default']
-
- tmp_users = users.items()
- tmp_users = dict(filter(safe_find, tmp_users))
- if not tmp_users:
- return (default_name, default_config)
- else:
- name = list(tmp_users)[0]
- config = tmp_users[name]
- config.pop('default', None)
- return (name, config)
-
-
def fetch(name):
locs, looked_locs = importer.find_module(name, ['', __name__], ['Distro'])
if not locs:
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index 1aa42d75..e574e1b9 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -62,6 +62,7 @@ class Distro(distros.Distro):
self._runner = helpers.Runners(paths)
self.osfamily = 'redhat'
self._net_renderer = sysconfig.Renderer()
+ cfg['ssh_svcname'] = 'sshd'
def install_packages(self, pkglist):
self.package_command('install', pkgs=pkglist)
diff --git a/cloudinit/distros/ug_util.py b/cloudinit/distros/ug_util.py
new file mode 100755
index 00000000..99301530
--- /dev/null
+++ b/cloudinit/distros/ug_util.py
@@ -0,0 +1,299 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+# Author: Ben Howard <ben.howard@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import six
+
+from cloudinit import log as logging
+from cloudinit import type_utils
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+
+# Normalizes a input group configuration
+# which can be a comma seperated list of
+# group names, or a list of group names
+# or a python dictionary of group names
+# to a list of members of that group.
+#
+# The output is a dictionary of group
+# names => members of that group which
+# is the standard form used in the rest
+# of cloud-init
+def _normalize_groups(grp_cfg):
+ if isinstance(grp_cfg, six.string_types):
+ grp_cfg = grp_cfg.strip().split(",")
+ if isinstance(grp_cfg, list):
+ c_grp_cfg = {}
+ for i in grp_cfg:
+ if isinstance(i, dict):
+ for k, v in i.items():
+ if k not in c_grp_cfg:
+ if isinstance(v, list):
+ c_grp_cfg[k] = list(v)
+ elif isinstance(v, six.string_types):
+ c_grp_cfg[k] = [v]
+ else:
+ raise TypeError("Bad group member type %s" %
+ type_utils.obj_name(v))
+ else:
+ if isinstance(v, list):
+ c_grp_cfg[k].extend(v)
+ elif isinstance(v, six.string_types):
+ c_grp_cfg[k].append(v)
+ else:
+ raise TypeError("Bad group member type %s" %
+ type_utils.obj_name(v))
+ elif isinstance(i, six.string_types):
+ if i not in c_grp_cfg:
+ c_grp_cfg[i] = []
+ else:
+ raise TypeError("Unknown group name type %s" %
+ type_utils.obj_name(i))
+ grp_cfg = c_grp_cfg
+ groups = {}
+ if isinstance(grp_cfg, dict):
+ for (grp_name, grp_members) in grp_cfg.items():
+ groups[grp_name] = util.uniq_merge_sorted(grp_members)
+ else:
+ raise TypeError(("Group config must be list, dict "
+ " or string types only and not %s") %
+ type_utils.obj_name(grp_cfg))
+ return groups
+
+
+# Normalizes a input group configuration
+# which can be a comma seperated list of
+# user names, or a list of string user names
+# or a list of dictionaries with components
+# that define the user config + 'name' (if
+# a 'name' field does not exist then the
+# default user is assumed to 'own' that
+# configuration.
+#
+# The output is a dictionary of user
+# names => user config which is the standard
+# form used in the rest of cloud-init. Note
+# the default user will have a special config
+# entry 'default' which will be marked as true
+# all other users will be marked as false.
+def _normalize_users(u_cfg, def_user_cfg=None):
+ if isinstance(u_cfg, dict):
+ ad_ucfg = []
+ for (k, v) in u_cfg.items():
+ if isinstance(v, (bool, int, float) + six.string_types):
+ if util.is_true(v):
+ ad_ucfg.append(str(k))
+ elif isinstance(v, dict):
+ v['name'] = k
+ ad_ucfg.append(v)
+ else:
+ raise TypeError(("Unmappable user value type %s"
+ " for key %s") % (type_utils.obj_name(v), k))
+ u_cfg = ad_ucfg
+ elif isinstance(u_cfg, six.string_types):
+ u_cfg = util.uniq_merge_sorted(u_cfg)
+
+ users = {}
+ for user_config in u_cfg:
+ if isinstance(user_config, (list,) + six.string_types):
+ for u in util.uniq_merge(user_config):
+ if u and u not in users:
+ users[u] = {}
+ elif isinstance(user_config, dict):
+ if 'name' in user_config:
+ n = user_config.pop('name')
+ prev_config = users.get(n) or {}
+ users[n] = util.mergemanydict([prev_config,
+ user_config])
+ else:
+ # Assume the default user then
+ prev_config = users.get('default') or {}
+ users['default'] = util.mergemanydict([prev_config,
+ user_config])
+ else:
+ raise TypeError(("User config must be dictionary/list "
+ " or string types only and not %s") %
+ type_utils.obj_name(user_config))
+
+ # Ensure user options are in the right python friendly format
+ if users:
+ c_users = {}
+ for (uname, uconfig) in users.items():
+ c_uconfig = {}
+ for (k, v) in uconfig.items():
+ k = k.replace('-', '_').strip()
+ if k:
+ c_uconfig[k] = v
+ c_users[uname] = c_uconfig
+ users = c_users
+
+ # Fixup the default user into the real
+ # default user name and replace it...
+ def_user = None
+ if users and 'default' in users:
+ def_config = users.pop('default')
+ if def_user_cfg:
+ # Pickup what the default 'real name' is
+ # and any groups that are provided by the
+ # default config
+ def_user_cfg = def_user_cfg.copy()
+ def_user = def_user_cfg.pop('name')
+ def_groups = def_user_cfg.pop('groups', [])
+ # Pickup any config + groups for that user name
+ # that we may have previously extracted
+ parsed_config = users.pop(def_user, {})
+ parsed_groups = parsed_config.get('groups', [])
+ # Now merge our extracted groups with
+ # anything the default config provided
+ users_groups = util.uniq_merge_sorted(parsed_groups, def_groups)
+ parsed_config['groups'] = ",".join(users_groups)
+ # The real config for the default user is the
+ # combination of the default user config provided
+ # by the distro, the default user config provided
+ # by the above merging for the user 'default' and
+ # then the parsed config from the user's 'real name'
+ # which does not have to be 'default' (but could be)
+ users[def_user] = util.mergemanydict([def_user_cfg,
+ def_config,
+ parsed_config])
+
+ # Ensure that only the default user that we
+ # found (if any) is actually marked as being
+ # the default user
+ if users:
+ for (uname, uconfig) in users.items():
+ if def_user and uname == def_user:
+ uconfig['default'] = True
+ else:
+ uconfig['default'] = False
+
+ return users
+
+
+# Normalizes a set of user/users and group
+# dictionary configuration into a useable
+# format that the rest of cloud-init can
+# understand using the default user
+# provided by the input distrobution (if any)
+# to allow for mapping of the 'default' user.
+#
+# Output is a dictionary of group names -> [member] (list)
+# and a dictionary of user names -> user configuration (dict)
+#
+# If 'user' exists it will override
+# the 'users'[0] entry (if a list) otherwise it will
+# just become an entry in the returned dictionary (no override)
+def normalize_users_groups(cfg, distro):
+ if not cfg:
+ cfg = {}
+
+ users = {}
+ groups = {}
+ if 'groups' in cfg:
+ groups = _normalize_groups(cfg['groups'])
+
+ # Handle the previous style of doing this where the first user
+ # overrides the concept of the default user if provided in the user: XYZ
+ # format.
+ old_user = {}
+ if 'user' in cfg and cfg['user']:
+ old_user = cfg['user']
+ # Translate it into the format that is more useful
+ # going forward
+ if isinstance(old_user, six.string_types):
+ old_user = {
+ 'name': old_user,
+ }
+ if not isinstance(old_user, dict):
+ LOG.warn(("Format for 'user' key must be a string or "
+ "dictionary and not %s"), type_utils.obj_name(old_user))
+ old_user = {}
+
+ # If no old user format, then assume the distro
+ # provides what the 'default' user maps to, but notice
+ # that if this is provided, we won't automatically inject
+ # a 'default' user into the users list, while if a old user
+ # format is provided we will.
+ distro_user_config = {}
+ try:
+ distro_user_config = distro.get_default_user()
+ except NotImplementedError:
+ LOG.warn(("Distro has not implemented default user "
+ "access. No distribution provided default user"
+ " will be normalized."))
+
+ # Merge the old user (which may just be an empty dict when not
+ # present with the distro provided default user configuration so
+ # that the old user style picks up all the distribution specific
+ # attributes (if any)
+ default_user_config = util.mergemanydict([old_user, distro_user_config])
+
+ base_users = cfg.get('users', [])
+ if not isinstance(base_users, (list, dict) + six.string_types):
+ LOG.warn(("Format for 'users' key must be a comma separated string"
+ " or a dictionary or a list and not %s"),
+ type_utils.obj_name(base_users))
+ base_users = []
+
+ if old_user:
+ # Ensure that when user: is provided that this user
+ # always gets added (as the default user)
+ if isinstance(base_users, list):
+ # Just add it on at the end...
+ base_users.append({'name': 'default'})
+ elif isinstance(base_users, dict):
+ base_users['default'] = dict(base_users).get('default', True)
+ elif isinstance(base_users, six.string_types):
+ # Just append it on to be re-parsed later
+ base_users += ",default"
+
+ users = _normalize_users(base_users, default_user_config)
+ return (users, groups)
+
+
+# Given a user dictionary config it will
+# extract the default user name and user config
+# from that list and return that tuple or
+# return (None, None) if no default user is
+# found in the given input
+def extract_default(users, default_name=None, default_config=None):
+ if not users:
+ users = {}
+
+ def safe_find(entry):
+ config = entry[1]
+ if not config or 'default' not in config:
+ return False
+ else:
+ return config['default']
+
+ tmp_users = users.items()
+ tmp_users = dict(filter(safe_find, tmp_users))
+ if not tmp_users:
+ return (default_name, default_config)
+ else:
+ name = list(tmp_users)[0]
+ config = tmp_users[name]
+ config.pop('default', None)
+ return (name, config)
diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py
index 822a020b..4075a279 100644
--- a/cloudinit/net/cmdline.py
+++ b/cloudinit/net/cmdline.py
@@ -66,7 +66,9 @@ def _klibc_to_config_entry(content, mac_addrs=None):
provided here. There is no good documentation on this unfortunately.
DEVICE=<name> is expected/required and PROTO should indicate if
- this is 'static' or 'dhcp'.
+ this is 'static' or 'dhcp' or 'dhcp6' (LP: #1621507).
+ note that IPV6PROTO is also written by newer code to address the
+ possibility of both ipv4 and ipv6 getting addresses.
"""
if mac_addrs is None:
@@ -74,19 +76,20 @@ def _klibc_to_config_entry(content, mac_addrs=None):
data = _load_shell_content(content)
try:
- name = data['DEVICE']
+ name = data['DEVICE'] if 'DEVICE' in data else data['DEVICE6']
except KeyError:
- raise ValueError("no 'DEVICE' entry in data")
+ raise ValueError("no 'DEVICE' or 'DEVICE6' entry in data")
# ipconfig on precise does not write PROTO
- proto = data.get('PROTO')
+ # IPv6 config gives us IPV6PROTO, not PROTO.
+ proto = data.get('PROTO', data.get('IPV6PROTO'))
if not proto:
if data.get('filename'):
proto = 'dhcp'
else:
proto = 'static'
- if proto not in ('static', 'dhcp'):
+ if proto not in ('static', 'dhcp', 'dhcp6'):
raise ValueError("Unexpected value for PROTO: %s" % proto)
iface = {
@@ -98,12 +101,15 @@ def _klibc_to_config_entry(content, mac_addrs=None):
if name in mac_addrs:
iface['mac_address'] = mac_addrs[name]
- # originally believed there might be IPV6* values
- for v, pre in (('ipv4', 'IPV4'),):
+ # Handle both IPv4 and IPv6 values
+ for v, pre in (('ipv4', 'IPV4'), ('ipv6', 'IPV6')):
# if no IPV4ADDR or IPV6ADDR, then go on.
if pre + "ADDR" not in data:
continue
- subnet = {'type': proto, 'control': 'manual'}
+
+ # PROTO for ipv4, IPV6PROTO for ipv6
+ cur_proto = data.get(pre + 'PROTO', proto)
+ subnet = {'type': cur_proto, 'control': 'manual'}
# these fields go right on the subnet
for key in ('NETMASK', 'BROADCAST', 'GATEWAY'):
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index 8c258ea1..a9682716 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -32,6 +32,7 @@ CFG_BUILTIN = {
'NoCloud',
'ConfigDrive',
'OpenNebula',
+ 'DigitalOcean',
'Azure',
'AltCloud',
'OVF',
diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py
new file mode 100644
index 00000000..19957212
--- /dev/null
+++ b/cloudinit/sources/DataSourceAliYun.py
@@ -0,0 +1,49 @@
+# vi: ts=4 expandtab
+
+import os
+
+from cloudinit import sources
+from cloudinit.sources import DataSourceEc2 as EC2
+
+DEF_MD_VERSION = "2016-01-01"
+
+
+class DataSourceAliYun(EC2.DataSourceEc2):
+ metadata_urls = ["http://100.100.100.200"]
+
+ def __init__(self, sys_cfg, distro, paths):
+ super(DataSourceAliYun, self).__init__(sys_cfg, distro, paths)
+ self.seed_dir = os.path.join(paths.seed_dir, "AliYun")
+ self.api_ver = DEF_MD_VERSION
+
+ def get_hostname(self, fqdn=False, _resolve_ip=False):
+ return self.metadata.get('hostname', 'localhost.localdomain')
+
+ def get_public_ssh_keys(self):
+ return parse_public_keys(self.metadata.get('public-keys', {}))
+
+
+def parse_public_keys(public_keys):
+ keys = []
+ for key_id, key_body in public_keys.items():
+ if isinstance(key_body, str):
+ keys.append(key_body.strip())
+ elif isinstance(key_body, list):
+ keys.extend(key_body)
+ elif isinstance(key_body, dict):
+ key = key_body.get('openssh-key', [])
+ if isinstance(key, str):
+ keys.append(key.strip())
+ elif isinstance(key, list):
+ keys.extend(key)
+ return keys
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceAliYun, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index 48136f7c..20345389 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -195,8 +195,7 @@ class DataSourceAltCloud(sources.DataSource):
(cmd_out, _err) = util.subp(cmd)
LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
except ProcessExecutionError as _err:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
- _err.message)
+ util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
return False
except OSError as _err:
util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
@@ -211,8 +210,7 @@ class DataSourceAltCloud(sources.DataSource):
(cmd_out, _err) = util.subp(cmd)
LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
except ProcessExecutionError as _err:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
- _err.message)
+ util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
return False
except OSError as _err:
util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index dbc2bb68..b802b03e 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -252,7 +252,7 @@ class DataSourceAzureNet(sources.DataSource):
cc_modules_override = support_new_ephemeral(self.sys_cfg)
if cc_modules_override:
- self.cfg['cloud_config_modules'] = cc_modules_override
+ self.cfg['cloud_init_modules'] = cc_modules_override
return True
@@ -283,11 +283,14 @@ def find_fabric_formatted_ephemeral_part():
device_location = potential_location
break
if device_location is None:
+ LOG.debug("no azure resource disk partition path found")
return None
ntfs_devices = util.find_devs_with("TYPE=ntfs")
real_device = os.path.realpath(device_location)
if real_device in ntfs_devices:
return device_location
+ LOG.debug("'%s' existed (%s) but was not ntfs formated",
+ device_location, real_device)
return None
@@ -342,7 +345,7 @@ def support_new_ephemeral(cfg):
LOG.debug("cloud-init will format ephemeral0.1 this boot.")
LOG.debug("setting disk_setup and mounts modules 'always' for this boot")
- cc_modules = cfg.get('cloud_config_modules')
+ cc_modules = cfg.get('cloud_init_modules')
if not cc_modules:
return None
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
index fc596e17..c5770d5d 100644
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ b/cloudinit/sources/DataSourceDigitalOcean.py
@@ -18,13 +18,12 @@
# DigitalOcean Droplet API:
# https://developers.digitalocean.com/documentation/metadata/
-import json
-
from cloudinit import log as logging
from cloudinit import sources
-from cloudinit import url_helper
from cloudinit import util
+import cloudinit.sources.helpers.digitalocean as do_helper
+
LOG = logging.getLogger(__name__)
BUILTIN_DS_CONFIG = {
@@ -36,11 +35,13 @@ BUILTIN_DS_CONFIG = {
MD_RETRIES = 30
MD_TIMEOUT = 2
MD_WAIT_RETRY = 2
+MD_USE_IPV4LL = True
class DataSourceDigitalOcean(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.distro = distro
self.metadata = dict()
self.ds_cfg = util.mergemanydict([
util.get_cfg_by_path(sys_cfg, ["datasource", "DigitalOcean"], {}),
@@ -48,80 +49,72 @@ class DataSourceDigitalOcean(sources.DataSource):
self.metadata_address = self.ds_cfg['metadata_url']
self.retries = self.ds_cfg.get('retries', MD_RETRIES)
self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT)
+ self.use_ip4LL = self.ds_cfg.get('use_ip4LL', MD_USE_IPV4LL)
self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY)
+ self._network_config = None
def _get_sysinfo(self):
- # DigitalOcean embeds vendor ID and instance/droplet_id in the
- # SMBIOS information
-
- LOG.debug("checking if instance is a DigitalOcean droplet")
-
- # Detect if we are on DigitalOcean and return the Droplet's ID
- vendor_name = util.read_dmi_data("system-manufacturer")
- if vendor_name != "DigitalOcean":
- return (False, None)
+ return do_helper.read_sysinfo()
- LOG.info("running on DigitalOcean")
-
- droplet_id = util.read_dmi_data("system-serial-number")
- if droplet_id:
- LOG.debug(("system identified via SMBIOS as DigitalOcean Droplet"
- "{}").format(droplet_id))
- else:
- LOG.critical(("system identified via SMBIOS as a DigitalOcean "
- "Droplet, but did not provide an ID. Please file a "
- "support ticket at: "
- "https://cloud.digitalocean.com/support/tickets/"
- "new"))
-
- return (True, droplet_id)
-
- def get_data(self, apply_filter=False):
+ def get_data(self):
(is_do, droplet_id) = self._get_sysinfo()
# only proceed if we know we are on DigitalOcean
if not is_do:
return False
- LOG.debug("reading metadata from {}".format(self.metadata_address))
- response = url_helper.readurl(self.metadata_address,
- timeout=self.timeout,
- sec_between=self.wait_retry,
- retries=self.retries)
+ LOG.info("Running on digital ocean. droplet_id=%s" % droplet_id)
- contents = util.decode_binary(response.contents)
- decoded = json.loads(contents)
+ ipv4LL_nic = None
+ if self.use_ip4LL:
+ ipv4LL_nic = do_helper.assign_ipv4_link_local()
- self.metadata = decoded
- self.metadata['instance-id'] = decoded.get('droplet_id', droplet_id)
- self.metadata['local-hostname'] = decoded.get('hostname', droplet_id)
- self.vendordata_raw = decoded.get("vendor_data", None)
- self.userdata_raw = decoded.get("user_data", None)
- return True
+ md = do_helper.read_metadata(
+ self.metadata_address, timeout=self.timeout,
+ sec_between=self.wait_retry, retries=self.retries)
- def get_public_ssh_keys(self):
- public_keys = self.metadata.get('public_keys', [])
- if isinstance(public_keys, list):
- return public_keys
- else:
- return [public_keys]
+ self.metadata_full = md
+ self.metadata['instance-id'] = md.get('droplet_id', droplet_id)
+ self.metadata['local-hostname'] = md.get('hostname', droplet_id)
+ self.metadata['interfaces'] = md.get('interfaces')
+ self.metadata['public-keys'] = md.get('public_keys')
+ self.metadata['availability_zone'] = md.get('region', 'default')
+ self.vendordata_raw = md.get("vendor_data", None)
+ self.userdata_raw = md.get("user_data", None)
- @property
- def availability_zone(self):
- return self.metadata.get('region', 'default')
+ if ipv4LL_nic:
+ do_helper.del_ipv4_link_local(ipv4LL_nic)
- @property
- def launch_index(self):
- return None
+ return True
def check_instance_id(self, sys_cfg):
return sources.instance_id_matches_system_uuid(
self.get_instance_id(), 'system-serial-number')
+ @property
+ def network_config(self):
+ """Configure the networking. This needs to be done each boot, since
+ the IP information may have changed due to snapshot and/or
+ migration.
+ """
+
+ if self._network_config:
+ return self._network_config
+
+ interfaces = self.metadata.get('interfaces')
+ LOG.debug(interfaces)
+ if not interfaces:
+ raise Exception("Unable to get meta-data from server....")
+
+ nameservers = self.metadata_full['dns']['nameservers']
+ self._network_config = do_helper.convert_network_configuration(
+ interfaces, nameservers)
+ return self._network_config
+
# Used to match classes to dependencies
datasources = [
- (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, )),
]
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 6fe2a0bb..bc84ef5d 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -31,21 +31,19 @@ from cloudinit import util
LOG = logging.getLogger(__name__)
-DEF_MD_URL = "http://169.254.169.254"
-
# Which version we are requesting of the ec2 metadata apis
DEF_MD_VERSION = '2009-04-04'
-# Default metadata urls that will be used if none are provided
-# They will be checked for 'resolveability' and some of the
-# following may be discarded if they do not resolve
-DEF_MD_URLS = [DEF_MD_URL, "http://instance-data.:8773"]
-
class DataSourceEc2(sources.DataSource):
+ # Default metadata urls that will be used if none are provided
+ # They will be checked for 'resolveability' and some of the
+ # following may be discarded if they do not resolve
+ metadata_urls = ["http://169.254.169.254", "http://instance-data.:8773"]
+
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.metadata_address = DEF_MD_URL
+ self.metadata_address = None
self.seed_dir = os.path.join(paths.seed_dir, "ec2")
self.api_ver = DEF_MD_VERSION
@@ -106,7 +104,7 @@ class DataSourceEc2(sources.DataSource):
return False
# Remove addresses from the list that wont resolve.
- mdurls = mcfg.get("metadata_urls", DEF_MD_URLS)
+ mdurls = mcfg.get("metadata_urls", self.metadata_urls)
filtered = [x for x in mdurls if util.is_resolvable_url(x)]
if set(filtered) != set(mdurls):
@@ -117,7 +115,7 @@ class DataSourceEc2(sources.DataSource):
mdurls = filtered
else:
LOG.warn("Empty metadata url list! using default list")
- mdurls = DEF_MD_URLS
+ mdurls = self.metadata_urls
urls = []
url2base = {}
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index ab93c0a2..81abcd47 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -310,12 +310,12 @@ if __name__ == "__main__":
creds = {'consumer_key': args.ckey, 'token_key': args.tkey,
'token_secret': args.tsec, 'consumer_secret': args.csec}
- maaspkg_cfg = "/etc/cloud/cloud.cfg.d/90_dpkg_maas.cfg"
- if (args.config is None and args.url is None and
- os.path.exists(maaspkg_cfg) and
- os.access(maaspkg_cfg, os.R_OK)):
- sys.stderr.write("Used config in %s.\n" % maaspkg_cfg)
- args.config = maaspkg_cfg
+ if args.config is None:
+ for fname in ('91_kernel_cmdline_url', '90_dpkg_maas'):
+ fpath = "/etc/cloud/cloud.cfg.d/" + fname + ".cfg"
+ if os.path.exists(fpath) and os.access(fpath, os.R_OK):
+ sys.stderr.write("Used config in %s.\n" % fpath)
+ args.config = fpath
if args.config:
cfg = util.read_conf(args.config)
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index 635a836c..ba5f3f92 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -30,6 +30,7 @@ import re
import string
from cloudinit import log as logging
+from cloudinit import net
from cloudinit import sources
from cloudinit import util
@@ -120,17 +121,11 @@ class BrokenContextDiskDir(Exception):
class OpenNebulaNetwork(object):
- REG_DEV_MAC = re.compile(
- r'^\d+: (eth\d+):.*?link\/ether (..:..:..:..:..:..) ?',
- re.MULTILINE | re.DOTALL)
-
- def __init__(self, ip, context):
- self.ip = ip
+ def __init__(self, context, system_nics_by_mac=None):
self.context = context
- self.ifaces = self.get_ifaces()
-
- def get_ifaces(self):
- return self.REG_DEV_MAC.findall(self.ip)
+ if system_nics_by_mac is None:
+ system_nics_by_mac = get_physical_nics_by_mac()
+ self.ifaces = system_nics_by_mac
def mac2ip(self, mac):
components = mac.split(':')[2:]
@@ -188,9 +183,7 @@ class OpenNebulaNetwork(object):
conf.append('iface lo inet loopback')
conf.append('')
- for i in self.ifaces:
- dev = i[0]
- mac = i[1]
+ for mac, dev in self.ifaces.items():
ip_components = self.mac2ip(mac)
conf.append('auto ' + dev)
@@ -405,16 +398,19 @@ def read_context_disk_dir(source_dir, asuser=None):
# generate static /etc/network/interfaces
# only if there are any required context variables
# http://opennebula.org/documentation:rel3.8:cong#network_configuration
- for k in context:
- if re.match(r'^ETH\d+_IP$', k):
- (out, _) = util.subp(['ip', 'link'])
- net = OpenNebulaNetwork(out, context)
- results['network-interfaces'] = net.gen_conf()
- break
+ ipaddr_keys = [k for k in context if re.match(r'^ETH\d+_IP$', k)]
+ if ipaddr_keys:
+ onet = OpenNebulaNetwork(context)
+ results['network-interfaces'] = onet.gen_conf()
return results
+def get_physical_nics_by_mac():
+ devs = net.get_interfaces_by_mac()
+ return dict([(m, n) for m, n in devs.items() if net.is_physical(n)])
+
+
# Legacy: Must be present in case we load an old pkl object
DataSourceOpenNebulaNet = DataSourceOpenNebula
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index 689ed4cc..1b3e9b70 100644
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -232,7 +232,7 @@ class WALinuxAgentShim(object):
def _get_value_from_leases_file(fallback_lease_file):
leases = []
content = util.load_file(fallback_lease_file)
- LOG.debug("content is {}".format(content))
+ LOG.debug("content is %s", content)
for line in content.splitlines():
if 'unknown-245' in line:
# Example line from Ubuntu
diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py
new file mode 100644
index 00000000..b0a721c2
--- /dev/null
+++ b/cloudinit/sources/helpers/digitalocean.py
@@ -0,0 +1,218 @@
+# vi: ts=4 expandtab
+#
+# Author: Ben Howard <bh@digitalocean.com>
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import json
+import random
+
+from cloudinit import log as logging
+from cloudinit import net as cloudnet
+from cloudinit import url_helper
+from cloudinit import util
+
+NIC_MAP = {'public': 'eth0', 'private': 'eth1'}
+
+LOG = logging.getLogger(__name__)
+
+
+def assign_ipv4_link_local(nic=None):
+ """Bring up NIC using an address using link-local (ip4LL) IPs. On
+ DigitalOcean, the link-local domain is per-droplet routed, so there
+ is no risk of collisions. However, to be more safe, the ip4LL
+ address is random.
+ """
+
+ if not nic:
+ for cdev in sorted(cloudnet.get_devicelist()):
+ if cloudnet.is_physical(cdev):
+ nic = cdev
+ LOG.debug("assigned nic '%s' for link-local discovery", nic)
+ break
+
+ if not nic:
+ raise RuntimeError("unable to find interfaces to access the"
+ "meta-data server. This droplet is broken.")
+
+ addr = "169.254.{0}.{1}/16".format(random.randint(1, 168),
+ random.randint(0, 255))
+
+ ip_addr_cmd = ['ip', 'addr', 'add', addr, 'dev', nic]
+ ip_link_cmd = ['ip', 'link', 'set', 'dev', nic, 'up']
+
+ if not util.which('ip'):
+ raise RuntimeError("No 'ip' command available to configure ip4LL "
+ "address")
+
+ try:
+ (result, _err) = util.subp(ip_addr_cmd)
+ LOG.debug("assigned ip4LL address '%s' to '%s'", addr, nic)
+
+ (result, _err) = util.subp(ip_link_cmd)
+ LOG.debug("brought device '%s' up", nic)
+ except Exception:
+ util.logexc(LOG, "ip4LL address assignment of '%s' to '%s' failed."
+ " Droplet networking will be broken", addr, nic)
+ raise
+
+ return nic
+
+
+def del_ipv4_link_local(nic=None):
+ """Remove the ip4LL address. While this is not necessary, the ip4LL
+ address is extraneous and confusing to users.
+ """
+ if not nic:
+ LOG.debug("no link_local address interface defined, skipping link "
+ "local address cleanup")
+ return
+
+ LOG.debug("cleaning up ipv4LL address")
+
+ ip_addr_cmd = ['ip', 'addr', 'flush', 'dev', nic]
+
+ try:
+ (result, _err) = util.subp(ip_addr_cmd)
+ LOG.debug("removed ip4LL addresses from %s", nic)
+
+ except Exception as e:
+ util.logexc(LOG, "failed to remove ip4LL address from '%s'.", nic, e)
+
+
+def convert_network_configuration(config, dns_servers):
+ """Convert the DigitalOcean Network description into Cloud-init's netconfig
+ format.
+
+ Example JSON:
+ {'public': [
+ {'mac': '04:01:58:27:7f:01',
+ 'ipv4': {'gateway': '45.55.32.1',
+ 'netmask': '255.255.224.0',
+ 'ip_address': '45.55.50.93'},
+ 'anchor_ipv4': {
+ 'gateway': '10.17.0.1',
+ 'netmask': '255.255.0.0',
+ 'ip_address': '10.17.0.9'},
+ 'type': 'public',
+ 'ipv6': {'gateway': '....',
+ 'ip_address': '....',
+ 'cidr': 64}}
+ ],
+ 'private': [
+ {'mac': '04:01:58:27:7f:02',
+ 'ipv4': {'gateway': '10.132.0.1',
+ 'netmask': '255.255.0.0',
+ 'ip_address': '10.132.75.35'},
+ 'type': 'private'}
+ ]
+ }
+ """
+
+ def _get_subnet_part(pcfg, nameservers=None):
+ subpart = {'type': 'static',
+ 'control': 'auto',
+ 'address': pcfg.get('ip_address'),
+ 'gateway': pcfg.get('gateway')}
+
+ if nameservers:
+ subpart['dns_nameservers'] = nameservers
+
+ if ":" in pcfg.get('ip_address'):
+ subpart['address'] = "{0}/{1}".format(pcfg.get('ip_address'),
+ pcfg.get('cidr'))
+ else:
+ subpart['netmask'] = pcfg.get('netmask')
+
+ return subpart
+
+ all_nics = []
+ for k in ('public', 'private'):
+ if k in config:
+ all_nics.extend(config[k])
+
+ macs_to_nics = cloudnet.get_interfaces_by_mac()
+ nic_configs = []
+
+ for nic in all_nics:
+
+ mac_address = nic.get('mac')
+ sysfs_name = macs_to_nics.get(mac_address)
+ nic_type = nic.get('type', 'unknown')
+ # Note: the entry 'public' above contains a list, but
+ # the list will only ever have one nic inside it per digital ocean.
+ # If it ever had more than one nic, then this code would
+ # assign all 'public' the same name.
+ if_name = NIC_MAP.get(nic_type, sysfs_name)
+
+ LOG.debug("mapped %s interface to %s, assigning name of %s",
+ mac_address, sysfs_name, if_name)
+
+ ncfg = {'type': 'physical',
+ 'mac_address': mac_address,
+ 'name': if_name}
+
+ subnets = []
+ for netdef in ('ipv4', 'ipv6', 'anchor_ipv4', 'anchor_ipv6'):
+ raw_subnet = nic.get(netdef, None)
+ if not raw_subnet:
+ continue
+
+ sub_part = _get_subnet_part(raw_subnet)
+ if nic_type == 'public' and 'anchor' not in netdef:
+ # add DNS resolvers to the public interfaces only
+ sub_part = _get_subnet_part(raw_subnet, dns_servers)
+ else:
+ # remove the gateway any non-public interfaces
+ if 'gateway' in sub_part:
+ del sub_part['gateway']
+
+ subnets.append(sub_part)
+
+ ncfg['subnets'] = subnets
+ nic_configs.append(ncfg)
+ LOG.debug("nic '%s' configuration: %s", if_name, ncfg)
+
+ return {'version': 1, 'config': nic_configs}
+
+
+def read_metadata(url, timeout=2, sec_between=2, retries=30):
+ response = url_helper.readurl(url, timeout=timeout,
+ sec_between=sec_between, retries=retries)
+ if not response.ok():
+ raise RuntimeError("unable to read metadata at %s" % url)
+ return json.loads(response.contents.decode())
+
+
+def read_sysinfo():
+ # DigitalOcean embeds vendor ID and instance/droplet_id in the
+ # SMBIOS information
+
+ # Detect if we are on DigitalOcean and return the Droplet's ID
+ vendor_name = util.read_dmi_data("system-manufacturer")
+ if vendor_name != "DigitalOcean":
+ return (False, None)
+
+ droplet_id = util.read_dmi_data("system-serial-number")
+ if droplet_id:
+ LOG.debug("system identified via SMBIOS as DigitalOcean Droplet: %s",
+ droplet_id)
+ else:
+ msg = ("system identified via SMBIOS as a DigitalOcean "
+ "Droplet, but did not provide an ID. Please file a "
+ "support ticket at: "
+ "https://cloud.digitalocean.com/support/tickets/new")
+ LOG.critical(msg)
+ raise RuntimeError(msg)
+
+ return (True, droplet_id)
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 7c37eb8f..9a3d3cd7 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -154,7 +154,7 @@ def target_path(target, path=None):
def decode_binary(blob, encoding='utf-8'):
# Converts a binary type into a text type using given encoding.
- if isinstance(blob, six.text_type):
+ if isinstance(blob, six.string_types):
return blob
return blob.decode(encoding)
@@ -199,7 +199,7 @@ def fully_decoded_payload(part):
encoding = charset.input_codec
else:
encoding = 'utf-8'
- return cte_payload.decode(encoding, errors='surrogateescape')
+ return cte_payload.decode(encoding, 'surrogateescape')
return cte_payload
@@ -282,9 +282,6 @@ class ProcessExecutionError(IOError):
'reason': self.reason,
}
IOError.__init__(self, message)
- # For backward compatibility with Python 2.
- if not hasattr(self, 'message'):
- self.message = message
class SeLinuxGuard(object):
@@ -1762,7 +1759,7 @@ def delete_dir_contents(dirname):
def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
- logstring=False, decode="replace", target=None):
+ logstring=False, decode="replace", target=None, update_env=None):
# not supported in cloud-init (yet), for now kept in the call signature
# to ease maintaining code shared between cloud-init and curtin
@@ -1773,6 +1770,13 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
rcs = [0]
devnull_fp = None
+
+ if update_env:
+ if env is None:
+ env = os.environ
+ env = env.copy()
+ env.update(update_env)
+
try:
if target_path(target) != "/":
args = ['chroot', target] + list(args)
@@ -1814,7 +1818,7 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
def ldecode(data, m='utf-8'):
if not isinstance(data, bytes):
return data
- return data.decode(m, errors=decode)
+ return data.decode(m, decode)
out = ldecode(out)
err = ldecode(err)
@@ -2337,7 +2341,9 @@ def read_dmi_data(key):
# running dmidecode can be problematic on some arches (LP: #1243287)
uname_arch = os.uname()[4]
- if uname_arch.startswith("arm") or uname_arch == "aarch64":
+ if not (uname_arch == "x86_64" or
+ (uname_arch.startswith("i") and uname_arch[2:] == "86") or
+ uname_arch == 'aarch64'):
LOG.debug("dmidata is not supported on %s", uname_arch)
return None
@@ -2369,3 +2375,15 @@ def get_installed_packages(target=None):
pkgs_inst.add(re.sub(":.*", "", pkg))
return pkgs_inst
+
+
+def system_is_snappy():
+ # channel.ini is configparser loadable.
+ # snappy will move to using /etc/system-image/config.d/*.ini
+ # this is certainly not a perfect test, but good enough for now.
+ content = load_file("/etc/system-image/channel.ini", quiet=True)
+ if 'ubuntu-core' in content.lower():
+ return True
+ if os.path.isdir("/etc/system-image/config.d/"):
+ return True
+ return False
diff --git a/config/cloud.cfg b/config/cloud.cfg
index 7c94ec5c..1b93e7f9 100644
--- a/config/cloud.cfg
+++ b/config/cloud.cfg
@@ -30,6 +30,8 @@ cloud_init_modules:
- write-files
- growpart
- resizefs
+ - disk_setup
+ - mounts
- set_hostname
- update_hostname
- update_etc_hosts
@@ -43,15 +45,14 @@ cloud_config_modules:
# Emit the cloud config ready event
# this can be used by upstart jobs for 'start on cloud-config'.
- emit_upstart
- - disk_setup
- - mounts
- - ntp
+ - snap_config
- ssh-import-id
- locale
- set-passwords
- grub-dpkg
- apt-pipelining
- apt-configure
+ - ntp
- timezone
- disable-ec2-metadata
- runcmd
diff --git a/doc/examples/cloud-config-seed-random.txt b/doc/examples/cloud-config-seed-random.txt
index 08f69a9f..142b10cd 100644
--- a/doc/examples/cloud-config-seed-random.txt
+++ b/doc/examples/cloud-config-seed-random.txt
@@ -24,7 +24,7 @@
# Note: command could be ['pollinate',
# '--server=http://local.pollinate.server']
# which would have pollinate populate /dev/urandom from provided server
-seed_random:
+random_seed:
file: '/dev/urandom'
data: 'my random string'
encoding: 'raw'
diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt
index 0e8ed243..9c5202f5 100644
--- a/doc/examples/cloud-config-user-groups.txt
+++ b/doc/examples/cloud-config-user-groups.txt
@@ -30,6 +30,7 @@ users:
gecos: Magic Cloud App Daemon User
inactive: true
system: true
+ - snapuser: joe@joeuser.io
# Valid Values:
# name: The user's login name
@@ -80,6 +81,13 @@ users:
# cloud-init does not parse/check the syntax of the sudo
# directive.
# system: Create the user as a system user. This means no home directory.
+# snapuser: Create a Snappy (Ubuntu-Core) user via the snap create-user
+# command available on Ubuntu systems. If the user has an account
+# on the Ubuntu SSO, specifying the email will allow snap to
+# request a username and any public ssh keys and will import
+# these into the system with username specifed by SSO account.
+# If 'username' is not set in SSO, then username will be the
+# shortname before the email domain.
#
# Default user creation:
diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py
index 8a391f21..66b3b654 100644
--- a/doc/rtd/conf.py
+++ b/doc/rtd/conf.py
@@ -48,7 +48,7 @@ version = version.version_string()
release = version
# Set the default Pygments syntax
-highlight_language = 'python'
+highlight_language = 'yaml'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst
index fe04b1a9..f8ff3c9f 100644
--- a/doc/rtd/index.rst
+++ b/doc/rtd/index.rst
@@ -23,6 +23,7 @@ Summary
topics/dir_layout
topics/examples
topics/datasources
+ topics/logging
topics/modules
topics/merging
topics/moreinfo
diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst
index 0d7d4aca..3a9c808c 100644
--- a/doc/rtd/topics/datasources.rst
+++ b/doc/rtd/topics/datasources.rst
@@ -1,11 +1,11 @@
.. _datasources:
-=========
+===========
Datasources
-=========
-----------
+===========
+----------------------
What is a datasource?
-----------
+----------------------
Datasources are sources of configuration data for cloud-init that typically come
from the user (aka userdata) or come from the stack that created the configuration
@@ -70,9 +70,9 @@ The current interface that a datasource object must provide is the following:
def get_package_mirror_info(self)
----------------------------
+---
EC2
----------------------------
+---
The EC2 datasource is the oldest and most widely used datasource that cloud-init
supports. This datasource interacts with a *magic* ip that is provided to the
@@ -130,61 +130,61 @@ To see which versions are supported from your cloud provider use the following U
...
latest
----------------------------
+------------
Config Drive
----------------------------
+------------
.. include:: ../../sources/configdrive/README.rst
----------------------------
+----------
OpenNebula
----------------------------
+----------
.. include:: ../../sources/opennebula/README.rst
----------------------------
+---------
Alt cloud
----------------------------
+---------
.. include:: ../../sources/altcloud/README.rst
----------------------------
+--------
No cloud
----------------------------
+--------
.. include:: ../../sources/nocloud/README.rst
----------------------------
+----
MAAS
----------------------------
+----
*TODO*
For now see: http://maas.ubuntu.com/
----------------------------
+----------
CloudStack
----------------------------
+----------
.. include:: ../../sources/cloudstack/README.rst
----------------------------
+---
OVF
----------------------------
+---
*TODO*
For now see: https://bazaar.launchpad.net/~cloud-init-dev/cloud-init/trunk/files/head:/doc/sources/ovf/
----------------------------
+---------
OpenStack
----------------------------
+---------
.. include:: ../../sources/openstack/README.rst
----------------------------
+-------------
Fallback/None
----------------------------
+-------------
This is the fallback datasource when no other datasource can be selected. It is
the equivalent of a *empty* datasource in that it provides a empty string as userdata
diff --git a/doc/rtd/topics/dir_layout.rst b/doc/rtd/topics/dir_layout.rst
index 8815d33d..6dcb22ce 100644
--- a/doc/rtd/topics/dir_layout.rst
+++ b/doc/rtd/topics/dir_layout.rst
@@ -1,6 +1,6 @@
-=========
+================
Directory layout
-=========
+================
Cloudinits's directory structure is somewhat different from a regular application::
diff --git a/doc/rtd/topics/examples.rst b/doc/rtd/topics/examples.rst
index 36508bde..2e6cfa1e 100644
--- a/doc/rtd/topics/examples.rst
+++ b/doc/rtd/topics/examples.rst
@@ -1,11 +1,11 @@
.. _yaml_examples:
-=========
+=====================
Cloud config examples
-=========
+=====================
Including users and groups
----------------------------
+--------------------------
.. literalinclude:: ../../examples/cloud-config-user-groups.txt
:language: yaml
@@ -21,21 +21,21 @@ Writing out arbitrary files
Adding a yum repository
----------------------------
+-----------------------
.. literalinclude:: ../../examples/cloud-config-yum-repo.txt
:language: yaml
:linenos:
Configure an instances trusted CA certificates
-------------------------------------------------------
+----------------------------------------------
.. literalinclude:: ../../examples/cloud-config-ca-certs.txt
:language: yaml
:linenos:
Configure an instances resolv.conf
-------------------------------------------------------
+----------------------------------
*Note:* when using a config drive and a RHEL like system resolv.conf
will also be managed 'automatically' due to the available information
@@ -47,28 +47,28 @@ that wish to have different settings use this module.
:linenos:
Install and run `chef`_ recipes
-------------------------------------------------------
+-------------------------------
.. literalinclude:: ../../examples/cloud-config-chef.txt
:language: yaml
:linenos:
Setup and run `puppet`_
-------------------------------------------------------
+-----------------------
.. literalinclude:: ../../examples/cloud-config-puppet.txt
:language: yaml
:linenos:
Add apt repositories
----------------------------
+--------------------
.. literalinclude:: ../../examples/cloud-config-add-apt-repos.txt
:language: yaml
:linenos:
Run commands on first boot
----------------------------
+--------------------------
.. literalinclude:: ../../examples/cloud-config-boot-cmds.txt
:language: yaml
@@ -80,21 +80,21 @@ Run commands on first boot
Alter the completion message
----------------------------
+----------------------------
.. literalinclude:: ../../examples/cloud-config-final-message.txt
:language: yaml
:linenos:
Install arbitrary packages
----------------------------
+--------------------------
.. literalinclude:: ../../examples/cloud-config-install-packages.txt
:language: yaml
:linenos:
Run apt or yum upgrade
----------------------------
+----------------------
.. literalinclude:: ../../examples/cloud-config-update-packages.txt
:language: yaml
@@ -108,26 +108,46 @@ Adjust mount points mounted
:linenos:
Call a url when finished
----------------------------
+------------------------
.. literalinclude:: ../../examples/cloud-config-phone-home.txt
:language: yaml
:linenos:
Reboot/poweroff when finished
----------------------------
+-----------------------------
.. literalinclude:: ../../examples/cloud-config-power-state.txt
:language: yaml
:linenos:
Configure instances ssh-keys
----------------------------
+----------------------------
.. literalinclude:: ../../examples/cloud-config-ssh-keys.txt
:language: yaml
:linenos:
+Additional apt configuration
+----------------------------
+
+.. literalinclude:: ../../examples/cloud-config-apt.txt
+ :language: yaml
+ :linenos:
+
+Disk setup
+----------
+
+.. literalinclude:: ../../examples/cloud-config-disk-setup.txt
+ :language: yaml
+ :linenos:
+
+Register RedHat Subscription
+----------------------------
+
+.. literalinclude:: ../../examples/cloud-config-rh_subscription.txt
+ :language: yaml
+ :linenos:
.. _chef: http://www.opscode.com/chef/
.. _puppet: http://puppetlabs.com/
diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst
index eba9533f..1dd92309 100644
--- a/doc/rtd/topics/format.rst
+++ b/doc/rtd/topics/format.rst
@@ -1,18 +1,18 @@
-=========
+=======
Formats
-=========
+=======
User data that will be acted upon by cloud-init must be in one of the following types.
Gzip Compressed Content
-------------------------
+-----------------------
Content found to be gzip compressed will be uncompressed.
The uncompressed data will then be used as if it were not compressed.
This is typically is useful because user-data is limited to ~16384 [#]_ bytes.
Mime Multi Part Archive
-------------------------
+-----------------------
This list of rules is applied to each part of this multi-part file.
Using a mime-multi part file, the user can specify more than one type of data.
@@ -31,7 +31,7 @@ Supported content-types:
- text/cloud-boothook
Helper script to generate mime messages
-~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. code-block:: python
@@ -59,7 +59,7 @@ Helper script to generate mime messages
User-Data Script
-------------------------
+----------------
Typically used by those who just want to execute a shell script.
diff --git a/doc/rtd/topics/logging.rst b/doc/rtd/topics/logging.rst
new file mode 100644
index 00000000..b010aa96
--- /dev/null
+++ b/doc/rtd/topics/logging.rst
@@ -0,0 +1,175 @@
+=======
+Logging
+=======
+Cloud-init supports both local and remote logging configurable through python's
+built-in logging configuration and through the cloud-init rsyslog module.
+
+Command Output
+--------------
+Cloud-init can redirect its stdout and stderr based on config given under the
+``output`` config key. The output of any commands run by cloud-init and any
+user or vendor scripts provided will also be included here. The ``output``
+key accepts a dictionary for configuration. Output files may be specified
+individually for each stage (``init``, ``config``, and ``final``), or a single
+key ``all`` may be used to specify output for all stages.
+
+The output for each stage may be specified as a dictionary of ``output`` and
+``error`` keys, for stdout and stderr respectively, as a tuple with stdout
+first and stderr second, or as a single string to use for both. The strings
+passed to all of these keys are handled by the system shell, so any form of
+redirection that can be used in bash is valid, including piping cloud-init's
+output to ``tee``, or ``logger``. If only a filename is provided, cloud-init
+will append its output to the file as though ``>>`` was specified.
+
+By default, cloud-init loads its output configuration from
+``/etc/cloud/coud.cfg.d/05_logging.cfg``. The default config directs both
+stdout and stderr from all cloud-init stages to
+``/var/log/cloud-init-output.log``. The default config is given as ::
+
+ output: { all: "| tee -a /var/log/cloud-init-output.log" }
+
+For a more complex example, the following configuration would output the init
+stage to ``/var/log/cloud-init.out`` and ``/var/log/cloud-init.err``, for
+stdout and stderr respectively, replacing anything that was previously there.
+For the config stage, it would pipe both stdout and stderr through
+``tee -a /var/log/cloud-config.log``. For the final stage it would append the
+output of stdout and stderr to ``/var/log/cloud-final.out`` and
+``/var/log/cloud-final.err`` respectively. ::
+
+ output:
+ init:
+ output: "> /var/log/cloud-init.out"
+ error: "> /var/log/cloud-init.err"
+ config: "tee -a /var/log/cloud-config.log"
+ final:
+ - ">> /var/log/cloud-final.out"
+ - "/var/log/cloud-final.err"
+
+Python Logging
+--------------
+Cloud-init uses the python logging module, and can accept config for this
+module using the standard python fileConfig format. Cloud-init looks for config
+for the logging module under the ``logcfg`` key.
+
+.. note::
+ the logging configuration is not yaml, it is python ``fileConfig`` format,
+ and is passed through directly to the python logging module. please use the
+ correct syntax for a multi-line string in yaml.
+
+By default, cloud-init uses the logging configuration provided in
+``/etc/cloud/cloud.cfg.d/05_logging.cfg``. The default python logging
+configuration writes all cloud-init events with a priority of ``WARNING`` or
+higher to console, and writes all events with a level of ``DEBUG`` or higher
+to ``/var/log/cloud-init.log`` and via syslog.
+
+Python's fileConfig format consists of sections with headings in the format
+``[title]`` and key value pairs in each section. Configuration for python
+logging must contain the sections ``[loggers]``, ``[handlers]``, and
+``[formatters]``, which name the entities of their respective types that will
+be defined. The section name for each defined logger, handler and formatter
+will start with its type, followed by an underscore (``_``) and the name of the
+entity. For example, if a logger was specified with the name ``log01``, config
+for the logger would be in the section ``[logger_log01]``.
+
+Logger config entries contain basic logging set up. They may specify a list of
+handlers to send logging events to as well as the lowest priority level of
+events to handle. A logger named ``root`` must be specified and its
+configuration (under ``[logger_root]``) must contain a level and a list of
+handlers. A level entry can be any of the following: ``DEBUG``, ``INFO``,
+``WARNING``, ``ERROR``, ``CRITICAL``, or ``NOTSET``. For the ``root`` logger
+the ``NOTSET`` option will allow all logging events to be recorded.
+
+Each configured handler must specify a class under the python's ``logging``
+package namespace. A handler may specify a message formatter to use, a priority
+level, and arguments for the handler class. Common handlers are
+``StreamHandler``, which handles stream redirects (i.e. logging to stderr),
+and ``FileHandler`` which outputs to a log file. The logging module also
+supports logging over net sockets, over http, via smtp, and additional
+complex configurations. For full details about the handlers available for
+python logging, please see the documentation for `python logging handlers`_.
+
+Log messages are formatted using the ``logging.Formatter`` class, which is
+configured using ``formatter`` config entities. A default format of
+``%(message)s`` is given if no formatter configs are specified. Formatter
+config entities accept a format string which supports variable replacements.
+These may also accept a ``datefmt`` string which may be used to configure the
+timestamp used in the log messages. The format variables ``%(asctime)s``,
+``%(levelname)s`` and ``%(message)s`` are commonly used and represent the
+timestamp, the priority level of the event and the event message. For
+additional information on logging formatters see `python logging formatters`_.
+
+.. note::
+ by default the format string used in the logging formatter are in python's
+ old style ``%s`` form. the ``str.format()`` and ``string.Template`` styles
+ can also be used by using ``{`` or ``$`` in place of ``%`` by setting the
+ ``style`` parameter in formatter config.
+
+A simple, but functional python logging configuration for cloud-init is below.
+It will log all messages of priority ``DEBUG`` or higher both stderr and
+``/tmp/my.log`` using a ``StreamHandler`` and a ``FileHandler``, using
+the default format string ``%(message)s``::
+
+ logcfg: |
+ [loggers]
+ keys=root,cloudinit
+ [handlers]
+ keys=ch,cf
+ [formatters]
+ keys=
+ [logger_root]
+ level=DEBUG
+ handlers=
+ [logger_cloudinit]
+ level=DEBUG
+ qualname=cloudinit
+ handlers=ch,cf
+ [handler_ch]
+ class=StreamHandler
+ level=DEBUG
+ args=(sys.stderr,)
+ [handler_cf]
+ class=FileHandler
+ level=DEBUG
+ args=('/tmp/my.log',)
+
+For additional information about configuring python's logging module, please
+see the documentation for `python logging config`_.
+
+Rsyslog Module
+--------------
+Cloud-init's ``cc_rsyslog`` module allows for fully customizable rsyslog
+configuration under the ``rsyslog`` config key. The simplest way to
+use the rsyslog module is by specifying remote servers under the ``remotes``
+key in ``rsyslog`` config. The ``remotes`` key takes a dictionary where each
+key represents the name of an rsyslog server and each value is the
+configuration for that server. The format for server config is:
+
+ - optional filter for log messages (defaults to ``*.*``)
+ - optional leading ``@`` or ``@@``, indicating udp and tcp respectively
+ (defaults to ``@``, for udp)
+ - ipv4 or ipv6 hostname or address. ipv6 addresses must be in ``[::1]``
+ format, (e.g. ``@[fd00::1]:514``)
+ - optional port number (defaults to ``514``)
+
+For example, to send logging to an rsyslog server named ``log_serv`` with
+address ``10.0.4.1``, using port number ``514``, over udp, with all log
+messages enabled one could use either of the following.
+
+With all options specified::
+
+ rsyslog:
+ remotes:
+ log_serv: "*.* @10.0.4.1:514"
+
+With defaults used::
+
+ rsyslog:
+ remotes:
+ log_serv: "10.0.4.1"
+
+
+For more information on rsyslog configuration, see :ref:`cc_rsyslog`.
+
+.. _python logging config: https://docs.python.org/3/library/logging.config.html#configuration-file-format
+.. _python logging handlers: https://docs.python.org/3/library/logging.handlers.html
+.. _python logging formatters: https://docs.python.org/3/library/logging.html#formatter-objects
diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst
index 4202338b..57892f2d 100644
--- a/doc/rtd/topics/modules.rst
+++ b/doc/rtd/topics/modules.rst
@@ -1,342 +1,57 @@
=======
Modules
=======
-
-Apt Configure
--------------
-
-**Internal name:** ``cc_apt_configure``
-
.. automodule:: cloudinit.config.cc_apt_configure
-
-Apt Pipelining
---------------
-
-**Internal name:** ``cc_apt_pipelining``
-
.. automodule:: cloudinit.config.cc_apt_pipelining
-
-Bootcmd
--------
-
-**Internal name:** ``cc_bootcmd``
-
.. automodule:: cloudinit.config.cc_bootcmd
-
-Byobu
------
-
-**Internal name:** ``cc_byobu``
-
.. automodule:: cloudinit.config.cc_byobu
-
-Ca Certs
---------
-
-**Internal name:** ``cc_ca_certs``
-
.. automodule:: cloudinit.config.cc_ca_certs
-
-Chef
-----
-
-**Internal name:** ``cc_chef``
-
.. automodule:: cloudinit.config.cc_chef
- :members:
-
-Debug
------
-
-**Internal name:** ``cc_debug``
-
.. automodule:: cloudinit.config.cc_debug
- :members:
-
-Disable Ec2 Metadata
---------------------
-
-**Internal name:** ``cc_disable_ec2_metadata``
-
.. automodule:: cloudinit.config.cc_disable_ec2_metadata
-
-Disk Setup
-----------
-
-**Internal name:** ``cc_disk_setup``
-
.. automodule:: cloudinit.config.cc_disk_setup
-
-Emit Upstart
-------------
-
-**Internal name:** ``cc_emit_upstart``
-
.. automodule:: cloudinit.config.cc_emit_upstart
-
-Final Message
--------------
-
-**Internal name:** ``cc_final_message``
-
+.. automodule:: cloudinit.config.cc_fan
.. automodule:: cloudinit.config.cc_final_message
-
-Foo
----
-
-**Internal name:** ``cc_foo``
-
.. automodule:: cloudinit.config.cc_foo
-
-Growpart
---------
-
-**Internal name:** ``cc_growpart``
-
.. automodule:: cloudinit.config.cc_growpart
-
-Grub Dpkg
----------
-
-**Internal name:** ``cc_grub_dpkg``
-
.. automodule:: cloudinit.config.cc_grub_dpkg
-
-Keys To Console
----------------
-
-**Internal name:** ``cc_keys_to_console``
-
.. automodule:: cloudinit.config.cc_keys_to_console
-
-Landscape
----------
-
-**Internal name:** ``cc_landscape``
-
.. automodule:: cloudinit.config.cc_landscape
-
-Locale
-------
-
-**Internal name:** ``cc_locale``
-
.. automodule:: cloudinit.config.cc_locale
-
-Mcollective
------------
-
-**Internal name:** ``cc_mcollective``
-
+.. automodule:: cloudinit.config.cc_lxd
.. automodule:: cloudinit.config.cc_mcollective
-
-Migrator
---------
-
-**Internal name:** ``cc_migrator``
-
.. automodule:: cloudinit.config.cc_migrator
-
-Mounts
-------
-
-**Internal name:** ``cc_mounts``
-
.. automodule:: cloudinit.config.cc_mounts
-
-Package Update Upgrade Install
-------------------------------
-
-**Internal name:** ``cc_package_update_upgrade_install``
-
+.. automodule:: cloudinit.config.cc_ntp
.. automodule:: cloudinit.config.cc_package_update_upgrade_install
-
-Phone Home
-----------
-
-**Internal name:** ``cc_phone_home``
-
.. automodule:: cloudinit.config.cc_phone_home
-
-Power State Change
-------------------
-
-**Internal name:** ``cc_power_state_change``
-
.. automodule:: cloudinit.config.cc_power_state_change
-
-Puppet
-------
-
-**Internal name:** ``cc_puppet``
-
.. automodule:: cloudinit.config.cc_puppet
-
-Resizefs
---------
-
-**Internal name:** ``cc_resizefs``
-
.. automodule:: cloudinit.config.cc_resizefs
-
-Resolv Conf
------------
-
-**Internal name:** ``cc_resolv_conf``
-
.. automodule:: cloudinit.config.cc_resolv_conf
-
-Rightscale Userdata
--------------------
-
-**Internal name:** ``cc_rightscale_userdata``
-
+.. automodule:: cloudinit.config.cc_rh_subscription
.. automodule:: cloudinit.config.cc_rightscale_userdata
-
-Rsyslog
--------
-
-**Internal name:** ``cc_rsyslog``
-
.. automodule:: cloudinit.config.cc_rsyslog
-
-Runcmd
-------
-
-**Internal name:** ``cc_runcmd``
-
.. automodule:: cloudinit.config.cc_runcmd
-
-Salt Minion
------------
-
-**Internal name:** ``cc_salt_minion``
-
.. automodule:: cloudinit.config.cc_salt_minion
-
-Scripts Per Boot
-----------------
-
-**Internal name:** ``cc_scripts_per_boot``
-
.. automodule:: cloudinit.config.cc_scripts_per_boot
-
-Scripts Per Instance
---------------------
-
-**Internal name:** ``cc_scripts_per_instance``
-
.. automodule:: cloudinit.config.cc_scripts_per_instance
-
-Scripts Per Once
-----------------
-
-**Internal name:** ``cc_scripts_per_once``
-
.. automodule:: cloudinit.config.cc_scripts_per_once
-
-Scripts User
-------------
-
-**Internal name:** ``cc_scripts_user``
-
.. automodule:: cloudinit.config.cc_scripts_user
-
-Scripts Vendor
---------------
-
-**Internal name:** ``cc_scripts_vendor``
-
.. automodule:: cloudinit.config.cc_scripts_vendor
-
-Seed Random
------------
-
-**Internal name:** ``cc_seed_random``
-
.. automodule:: cloudinit.config.cc_seed_random
-
-Set Hostname
-------------
-
-**Internal name:** ``cc_set_hostname``
-
.. automodule:: cloudinit.config.cc_set_hostname
-
-Set Passwords
--------------
-
-**Internal name:** ``cc_set_passwords``
-
.. automodule:: cloudinit.config.cc_set_passwords
-
-Ssh
----
-
-**Internal name:** ``cc_ssh``
-
+.. automodule:: cloudinit.config.cc_snappy
+.. automodule:: cloudinit.config.cc_spacewalk
.. automodule:: cloudinit.config.cc_ssh
-
-Ssh Authkey Fingerprints
-------------------------
-
-**Internal name:** ``cc_ssh_authkey_fingerprints``
-
.. automodule:: cloudinit.config.cc_ssh_authkey_fingerprints
-
-Ssh Import Id
--------------
-
-**Internal name:** ``cc_ssh_import_id``
-
.. automodule:: cloudinit.config.cc_ssh_import_id
-
-Timezone
---------
-
-**Internal name:** ``cc_timezone``
-
.. automodule:: cloudinit.config.cc_timezone
-
-Ubuntu Init Switch
-------------------
-
-**Internal name:** ``cc_ubuntu_init_switch``
-
.. automodule:: cloudinit.config.cc_ubuntu_init_switch
- :members:
-
-Update Etc Hosts
-----------------
-
-**Internal name:** ``cc_update_etc_hosts``
-
.. automodule:: cloudinit.config.cc_update_etc_hosts
-
-Update Hostname
----------------
-
-**Internal name:** ``cc_update_hostname``
-
.. automodule:: cloudinit.config.cc_update_hostname
-
-Users Groups
-------------
-
-**Internal name:** ``cc_users_groups``
-
.. automodule:: cloudinit.config.cc_users_groups
-
-Write Files
------------
-
-**Internal name:** ``cc_write_files``
-
.. automodule:: cloudinit.config.cc_write_files
-
-Yum Add Repo
-------------
-
-**Internal name:** ``cc_yum_add_repo``
-
.. automodule:: cloudinit.config.cc_yum_add_repo
diff --git a/doc/rtd/topics/moreinfo.rst b/doc/rtd/topics/moreinfo.rst
index 19e96af0..b34cb7dc 100644
--- a/doc/rtd/topics/moreinfo.rst
+++ b/doc/rtd/topics/moreinfo.rst
@@ -1,9 +1,9 @@
-=========
+================
More information
-=========
+================
Useful external references
--------------------------
+--------------------------
- `The beauty of cloudinit`_
- `Introduction to cloud-init`_ (video)
diff --git a/doc/sources/altcloud/README.rst b/doc/sources/altcloud/README.rst
index b5d72ebb..0a54fda1 100644
--- a/doc/sources/altcloud/README.rst
+++ b/doc/sources/altcloud/README.rst
@@ -14,7 +14,7 @@ The format of the Custom Properties entry must be:
For example to pass a simple bash script:
-::
+.. sourcecode:: sh
% cat simple_script.bash
#!/bin/bash
@@ -55,13 +55,13 @@ For example, to pass the same ``simple_script.bash`` to vSphere:
Create the ISO
-----------------
-::
+.. sourcecode:: sh
% mkdir my-iso
NOTE: The file name on the ISO must be: ``user-data.txt``
-::
+.. sourcecode:: sh
% cp simple_scirpt.bash my-iso/user-data.txt
% genisoimage -o user-data.iso -r my-iso
@@ -69,7 +69,7 @@ NOTE: The file name on the ISO must be: ``user-data.txt``
Verify the ISO
-----------------
-::
+.. sourcecode:: sh
% sudo mkdir /media/vsphere_iso
% sudo mount -o loop JoeV_CI_02.iso /media/vsphere_iso
diff --git a/doc/sources/configdrive/README.rst b/doc/sources/configdrive/README.rst
index 48ff579d..8c40735f 100644
--- a/doc/sources/configdrive/README.rst
+++ b/doc/sources/configdrive/README.rst
@@ -46,7 +46,7 @@ The following criteria are required to as a config drive:
formatted.
Version 2
-~~~~~~~~~~~
+~~~~~~~~~
The following criteria are required to as a config drive:
@@ -70,7 +70,7 @@ The following criteria are required to as a config drive:
- meta-data.json (not mandatory)
Keys and values
-~~~~~~~~~~~
+~~~~~~~~~~~~~~~
Cloud-init's behavior can be modified by keys found in the meta.js (version 1 only) file in the following ways.
diff --git a/packages/bddeb b/packages/bddeb
index abb7b607..79ac9768 100755
--- a/packages/bddeb
+++ b/packages/bddeb
@@ -29,6 +29,7 @@ if "avoid-pep8-E402-import-not-top-of-file":
# file pypi package name to a debian/ubuntu package name.
STD_NAMED_PACKAGES = [
'configobj',
+ 'coverage',
'jinja2',
'jsonpatch',
'oauthlib',
diff --git a/systemd/cloud-init-local.service b/systemd/cloud-init-local.service
index b19eeaee..55834ba4 100644
--- a/systemd/cloud-init-local.service
+++ b/systemd/cloud-init-local.service
@@ -4,9 +4,11 @@ DefaultDependencies=no
Wants=local-fs.target
Wants=network-pre.target
After=local-fs.target
-Conflicts=shutdown.target
+Before=basic.target
+Before=NetworkManager.service
Before=network-pre.target
Before=shutdown.target
+Conflicts=shutdown.target
[Service]
Type=oneshot
diff --git a/systemd/cloud-init.service b/systemd/cloud-init.service
index 6fb655e6..5c71b213 100644
--- a/systemd/cloud-init.service
+++ b/systemd/cloud-init.service
@@ -1,9 +1,20 @@
[Unit]
Description=Initial cloud-init job (metadata service crawler)
-After=cloud-init-local.service networking.service
-Before=network-online.target sshd.service sshd-keygen.service systemd-user-sessions.service
+DefaultDependencies=no
+Wants=cloud-init-local.service
+Wants=local-fs.target
+Wants=sshd-keygen.service
+Wants=sshd.service
+After=cloud-init-local.service
+After=networking.service
Requires=networking.service
-Wants=local-fs.target cloud-init-local.service sshd.service sshd-keygen.service
+Before=basic.target
+Before=dbus.socket
+Before=network-online.target
+Before=sshd-keygen.service
+Before=sshd.service
+Before=systemd-user-sessions.service
+Conflicts=shutdown.target
[Service]
Type=oneshot
diff --git a/sysvinit/gentoo/cloud-init b/sysvinit/gentoo/cloud-init
index 5afc0f2e..531a715d 100644
--- a/sysvinit/gentoo/cloud-init
+++ b/sysvinit/gentoo/cloud-init
@@ -2,6 +2,7 @@
# add depends for network, dns, fs etc
depend() {
after cloud-init-local
+ after net
before cloud-config
provide cloud-init
}
diff --git a/sysvinit/gentoo/cloud-init-local b/sysvinit/gentoo/cloud-init-local
index 9bd0b569..0f8cf65c 100644
--- a/sysvinit/gentoo/cloud-init-local
+++ b/sysvinit/gentoo/cloud-init-local
@@ -2,7 +2,7 @@
depend() {
after localmount
- after netmount
+ before net
before cloud-init
provide cloud-init-local
}
diff --git a/test-requirements.txt b/test-requirements.txt
index 6bf38940..0e7fc8fb 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -3,6 +3,7 @@ httpretty>=0.7.1
mock
nose
unittest2
+coverage
# Only needed if you want to know the test times
# nose-timer
diff --git a/tests/unittests/__init__.py b/tests/unittests/__init__.py
index e69de29b..1b34b5af 100644
--- a/tests/unittests/__init__.py
+++ b/tests/unittests/__init__.py
@@ -0,0 +1,9 @@
+try:
+ # For test cases, avoid the following UserWarning to stderr:
+ # You don't have the C version of NameMapper installed ...
+ from Cheetah import NameMapper as _nm
+ _nm.C_VERSION = True
+except ImportError:
+ pass
+
+# vi: ts=4 expandtab
diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py
index 1cdc05a1..a2355a79 100644
--- a/tests/unittests/helpers.py
+++ b/tests/unittests/helpers.py
@@ -205,6 +205,14 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
self.patched_funcs.enter_context(
mock.patch.object(sys, 'stderr', stderr))
+ def reRoot(self, root=None):
+ if root is None:
+ root = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, root)
+ self.patchUtils(root)
+ self.patchOS(root)
+ return root
+
def import_httpretty():
"""Import HTTPretty and monkey patch Python 3.4 issue.
diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py
index 13db8a4c..55d9b93f 100644
--- a/tests/unittests/test_data.py
+++ b/tests/unittests/test_data.py
@@ -3,8 +3,6 @@
import gzip
import logging
import os
-import shutil
-import tempfile
try:
from unittest import mock
@@ -98,10 +96,7 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):
ci = stages.Init()
ci.datasource = FakeDataSource(blob)
- new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, new_root)
- self.patchUtils(new_root)
- self.patchOS(new_root)
+ self.reRoot()
ci.fetch()
ci.consume_data()
cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
@@ -127,9 +122,7 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):
{ "op": "add", "path": "/foo", "value": "quxC" }
]
'''
- new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, new_root)
- self._patchIn(new_root)
+ self.reRoot()
initer = stages.Init()
initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
initer.read_cfg()
@@ -167,9 +160,7 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):
{ "op": "add", "path": "/foo", "value": "quxC" }
]
'''
- new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, new_root)
- self._patchIn(new_root)
+ self.reRoot()
initer = stages.Init()
initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
initer.read_cfg()
@@ -212,12 +203,9 @@ c: d
message.attach(message_cc)
message.attach(message_jp)
+ self.reRoot()
ci = stages.Init()
ci.datasource = FakeDataSource(str(message))
- new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, new_root)
- self.patchUtils(new_root)
- self.patchOS(new_root)
ci.fetch()
ci.consume_data()
cc_contents = util.load_file(ci.paths.get_ipath("cloud_config"))
@@ -245,9 +233,7 @@ name: user
run:
- z
'''
- new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, new_root)
- self._patchIn(new_root)
+ self.reRoot()
initer = stages.Init()
initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
initer.read_cfg()
@@ -281,9 +267,7 @@ vendor_data:
enabled: True
prefix: /bin/true
'''
- new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, new_root)
- self._patchIn(new_root)
+ new_root = self.reRoot()
initer = stages.Init()
initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob)
initer.read_cfg()
@@ -342,10 +326,7 @@ p: 1
paths = c_helpers.Paths({}, ds=FakeDataSource(''))
cloud_cfg = handlers.cloud_config.CloudConfigPartHandler(paths)
- new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, new_root)
- self.patchUtils(new_root)
- self.patchOS(new_root)
+ self.reRoot()
cloud_cfg.handle_part(None, handlers.CONTENT_START, None, None, None,
None)
for i, m in enumerate(messages):
@@ -365,6 +346,7 @@ p: 1
def test_unhandled_type_warning(self):
"""Raw text without magic is ignored but shows warning."""
+ self.reRoot()
ci = stages.Init()
data = "arbitrary text\n"
ci.datasource = FakeDataSource(data)
@@ -402,10 +384,7 @@ c: 4
message.attach(gzip_part(base_content2))
ci = stages.Init()
ci.datasource = FakeDataSource(str(message))
- new_root = tempfile.mkdtemp()
- self.addCleanup(shutil.rmtree, new_root)
- self.patchUtils(new_root)
- self.patchOS(new_root)
+ self.reRoot()
ci.fetch()
ci.consume_data()
contents = util.load_file(ci.paths.get_ipath("cloud_config"))
@@ -418,6 +397,7 @@ c: 4
def test_mime_text_plain(self):
"""Mime message of type text/plain is ignored but shows warning."""
+ self.reRoot()
ci = stages.Init()
message = MIMEBase("text", "plain")
message.set_payload("Just text")
@@ -435,6 +415,7 @@ c: 4
def test_shellscript(self):
"""Raw text starting #!/bin/sh is treated as script."""
+ self.reRoot()
ci = stages.Init()
script = "#!/bin/sh\necho hello\n"
ci.datasource = FakeDataSource(script)
@@ -453,6 +434,7 @@ c: 4
def test_mime_text_x_shellscript(self):
"""Mime message of type text/x-shellscript is treated as script."""
+ self.reRoot()
ci = stages.Init()
script = "#!/bin/sh\necho hello\n"
message = MIMEBase("text", "x-shellscript")
@@ -473,6 +455,7 @@ c: 4
def test_mime_text_plain_shell(self):
"""Mime type text/plain starting #!/bin/sh is treated as script."""
+ self.reRoot()
ci = stages.Init()
script = "#!/bin/sh\necho hello\n"
message = MIMEBase("text", "plain")
@@ -493,6 +476,7 @@ c: 4
def test_mime_application_octet_stream(self):
"""Mime type application/octet-stream is ignored but shows warning."""
+ self.reRoot()
ci = stages.Init()
message = MIMEBase("application", "octet-stream")
message.set_payload(b'\xbf\xe6\xb2\xc3\xd3\xba\x13\xa4\xd8\xa1\xcc')
@@ -516,6 +500,7 @@ c: 4
{'content': non_decodable}]
message = b'#cloud-config-archive\n' + util.yaml_dumps(data).encode()
+ self.reRoot()
ci = stages.Init()
ci.datasource = FakeDataSource(message)
diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py
new file mode 100644
index 00000000..6f1de072
--- /dev/null
+++ b/tests/unittests/test_datasource/test_aliyun.py
@@ -0,0 +1,148 @@
+import functools
+import httpretty
+import os
+
+from .. import helpers as test_helpers
+from cloudinit import helpers
+from cloudinit.sources import DataSourceAliYun as ay
+
+DEFAULT_METADATA = {
+ 'instance-id': 'aliyun-test-vm-00',
+ 'eipv4': '10.0.0.1',
+ 'hostname': 'test-hostname',
+ 'image-id': 'm-test',
+ 'launch-index': '0',
+ 'mac': '00:16:3e:00:00:00',
+ 'network-type': 'vpc',
+ 'private-ipv4': '192.168.0.1',
+ 'serial-number': 'test-string',
+ 'vpc-cidr-block': '192.168.0.0/16',
+ 'vpc-id': 'test-vpc',
+ 'vswitch-id': 'test-vpc',
+ 'vswitch-cidr-block': '192.168.0.0/16',
+ 'zone-id': 'test-zone-1',
+ 'ntp-conf': {'ntp_servers': [
+ 'ntp1.aliyun.com',
+ 'ntp2.aliyun.com',
+ 'ntp3.aliyun.com']},
+ 'source-address': ['http://mirrors.aliyun.com',
+ 'http://mirrors.aliyuncs.com'],
+ 'public-keys': {'key-pair-1': {'openssh-key': 'ssh-rsa AAAAB3...'},
+ 'key-pair-2': {'openssh-key': 'ssh-rsa AAAAB3...'}}
+}
+
+DEFAULT_USERDATA = """\
+#cloud-config
+
+hostname: localhost"""
+
+
+def register_mock_metaserver(base_url, data):
+ def register_helper(register, base_url, body):
+ if isinstance(body, str):
+ register(base_url, body)
+ elif isinstance(body, list):
+ register(base_url.rstrip('/'), '\n'.join(body) + '\n')
+ elif isinstance(body, dict):
+ vals = []
+ for k, v in body.items():
+ if isinstance(v, (str, list)):
+ suffix = k.rstrip('/')
+ else:
+ suffix = k.rstrip('/') + '/'
+ vals.append(suffix)
+ url = base_url.rstrip('/') + '/' + suffix
+ register_helper(register, url, v)
+ register(base_url, '\n'.join(vals) + '\n')
+
+ register = functools.partial(httpretty.register_uri, httpretty.GET)
+ register_helper(register, base_url, data)
+
+
+class TestAliYunDatasource(test_helpers.HttprettyTestCase):
+ def setUp(self):
+ super(TestAliYunDatasource, self).setUp()
+ cfg = {'datasource': {'AliYun': {'timeout': '1', 'max_wait': '1'}}}
+ distro = {}
+ paths = helpers.Paths({})
+ self.ds = ay.DataSourceAliYun(cfg, distro, paths)
+ self.metadata_address = self.ds.metadata_urls[0]
+ self.api_ver = self.ds.api_ver
+
+ @property
+ def default_metadata(self):
+ return DEFAULT_METADATA
+
+ @property
+ def default_userdata(self):
+ return DEFAULT_USERDATA
+
+ @property
+ def metadata_url(self):
+ return os.path.join(self.metadata_address,
+ self.api_ver, 'meta-data') + '/'
+
+ @property
+ def userdata_url(self):
+ return os.path.join(self.metadata_address,
+ self.api_ver, 'user-data')
+
+ def regist_default_server(self):
+ register_mock_metaserver(self.metadata_url, self.default_metadata)
+ register_mock_metaserver(self.userdata_url, self.default_userdata)
+
+ def _test_get_data(self):
+ self.assertEqual(self.ds.metadata, self.default_metadata)
+ self.assertEqual(self.ds.userdata_raw,
+ self.default_userdata.encode('utf8'))
+
+ def _test_get_sshkey(self):
+ pub_keys = [v['openssh-key'] for (_, v) in
+ self.default_metadata['public-keys'].items()]
+ self.assertEqual(self.ds.get_public_ssh_keys(), pub_keys)
+
+ def _test_get_iid(self):
+ self.assertEqual(self.default_metadata['instance-id'],
+ self.ds.get_instance_id())
+
+ def _test_host_name(self):
+ self.assertEqual(self.default_metadata['hostname'],
+ self.ds.get_hostname())
+
+ @httpretty.activate
+ def test_with_mock_server(self):
+ self.regist_default_server()
+ self.ds.get_data()
+ self._test_get_data()
+ self._test_get_sshkey()
+ self._test_get_iid()
+ self._test_host_name()
+
+ def test_parse_public_keys(self):
+ public_keys = {}
+ self.assertEqual(ay.parse_public_keys(public_keys), [])
+
+ public_keys = {'key-pair-0': 'ssh-key-0'}
+ self.assertEqual(ay.parse_public_keys(public_keys),
+ [public_keys['key-pair-0']])
+
+ public_keys = {'key-pair-0': 'ssh-key-0', 'key-pair-1': 'ssh-key-1'}
+ self.assertEqual(set(ay.parse_public_keys(public_keys)),
+ set([public_keys['key-pair-0'],
+ public_keys['key-pair-1']]))
+
+ public_keys = {'key-pair-0': ['ssh-key-0', 'ssh-key-1']}
+ self.assertEqual(ay.parse_public_keys(public_keys),
+ public_keys['key-pair-0'])
+
+ public_keys = {'key-pair-0': {'openssh-key': []}}
+ self.assertEqual(ay.parse_public_keys(public_keys), [])
+
+ public_keys = {'key-pair-0': {'openssh-key': 'ssh-key-0'}}
+ self.assertEqual(ay.parse_public_keys(public_keys),
+ [public_keys['key-pair-0']['openssh-key']])
+
+ public_keys = {'key-pair-0': {'openssh-key': ['ssh-key-0',
+ 'ssh-key-1']}}
+ self.assertEqual(ay.parse_public_keys(public_keys),
+ public_keys['key-pair-0']['openssh-key'])
diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/test_datasource/test_digitalocean.py
index f5d2ef35..7bde0820 100644
--- a/tests/unittests/test_datasource/test_digitalocean.py
+++ b/tests/unittests/test_datasource/test_digitalocean.py
@@ -20,25 +20,123 @@ import json
from cloudinit import helpers
from cloudinit import settings
from cloudinit.sources import DataSourceDigitalOcean
+from cloudinit.sources.helpers import digitalocean
-from .. import helpers as test_helpers
-from ..helpers import HttprettyTestCase
-
-httpretty = test_helpers.import_httpretty()
+from ..helpers import mock, TestCase
DO_MULTIPLE_KEYS = ["ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@do.co",
"ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@do.co"]
DO_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@do.co"
-DO_META = {
- 'user_data': 'user_data_here',
- 'vendor_data': 'vendor_data_here',
- 'public_keys': DO_SINGLE_KEY,
- 'region': 'nyc3',
- 'id': '2000000',
- 'hostname': 'cloudinit-test',
+# the following JSON was taken from droplet (that's why its a string)
+DO_META = json.loads("""
+{
+ "droplet_id": "22532410",
+ "hostname": "utl-96268",
+ "vendor_data": "vendordata goes here",
+ "user_data": "userdata goes here",
+ "public_keys": "",
+ "auth_key": "authorization_key",
+ "region": "nyc3",
+ "interfaces": {
+ "private": [
+ {
+ "ipv4": {
+ "ip_address": "10.132.6.205",
+ "netmask": "255.255.0.0",
+ "gateway": "10.132.0.1"
+ },
+ "mac": "04:01:57:d1:9e:02",
+ "type": "private"
+ }
+ ],
+ "public": [
+ {
+ "ipv4": {
+ "ip_address": "192.0.0.20",
+ "netmask": "255.255.255.0",
+ "gateway": "104.236.0.1"
+ },
+ "ipv6": {
+ "ip_address": "2604:A880:0800:0000:1000:0000:0000:0000",
+ "cidr": 64,
+ "gateway": "2604:A880:0800:0000:0000:0000:0000:0001"
+ },
+ "anchor_ipv4": {
+ "ip_address": "10.0.0.5",
+ "netmask": "255.255.0.0",
+ "gateway": "10.0.0.1"
+ },
+ "mac": "04:01:57:d1:9e:01",
+ "type": "public"
+ }
+ ]
+ },
+ "floating_ip": {
+ "ipv4": {
+ "active": false
+ }
+ },
+ "dns": {
+ "nameservers": [
+ "2001:4860:4860::8844",
+ "2001:4860:4860::8888",
+ "8.8.8.8"
+ ]
+ }
+}
+""")
+
+# This has no private interface
+DO_META_2 = {
+ "droplet_id": 27223699,
+ "hostname": "smtest1",
+ "vendor_data": "\n".join([
+ ('"Content-Type: multipart/mixed; '
+ 'boundary=\"===============8645434374073493512==\"'),
+ 'MIME-Version: 1.0',
+ '',
+ '--===============8645434374073493512==',
+ 'MIME-Version: 1.0'
+ 'Content-Type: text/cloud-config; charset="us-ascii"'
+ 'Content-Transfer-Encoding: 7bit'
+ 'Content-Disposition: attachment; filename="cloud-config"'
+ '',
+ '#cloud-config',
+ 'disable_root: false',
+ 'manage_etc_hosts: true',
+ '',
+ '',
+ '--===============8645434374073493512=='
+ ]),
+ "public_keys": [
+ "ssh-rsa AAAAB3NzaN...N3NtHw== smoser@brickies"
+ ],
+ "auth_key": "88888888888888888888888888888888",
+ "region": "nyc3",
+ "interfaces": {
+ "public": [{
+ "ipv4": {
+ "ip_address": "45.55.249.133",
+ "netmask": "255.255.192.0",
+ "gateway": "45.55.192.1"
+ },
+ "anchor_ipv4": {
+ "ip_address": "10.17.0.5",
+ "netmask": "255.255.0.0",
+ "gateway": "10.17.0.1"
+ },
+ "mac": "ae:cc:08:7c:88:00",
+ "type": "public"
+ }]
+ },
+ "floating_ip": {"ipv4": {"active": True, "ip_address": "138.197.59.92"}},
+ "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]},
+ "tags": None,
}
+DO_META['public_keys'] = DO_SINGLE_KEY
+
MD_URL = 'http://169.254.169.254/metadata/v1.json'
@@ -46,69 +144,189 @@ def _mock_dmi():
return (True, DO_META.get('id'))
-def _request_callback(method, uri, headers):
- return (200, headers, json.dumps(DO_META))
-
-
-class TestDataSourceDigitalOcean(HttprettyTestCase):
+class TestDataSourceDigitalOcean(TestCase):
"""
Test reading the meta-data
"""
- def setUp(self):
- self.ds = DataSourceDigitalOcean.DataSourceDigitalOcean(
- settings.CFG_BUILTIN, None,
- helpers.Paths({}))
- self.ds._get_sysinfo = _mock_dmi
- super(TestDataSourceDigitalOcean, self).setUp()
-
- @httpretty.activate
- def test_connection(self):
- httpretty.register_uri(
- httpretty.GET, MD_URL,
- body=json.dumps(DO_META))
-
- success = self.ds.get_data()
- self.assertTrue(success)
-
- @httpretty.activate
- def test_metadata(self):
- httpretty.register_uri(
- httpretty.GET, MD_URL,
- body=_request_callback)
- self.ds.get_data()
+ def get_ds(self, get_sysinfo=_mock_dmi):
+ ds = DataSourceDigitalOcean.DataSourceDigitalOcean(
+ settings.CFG_BUILTIN, None, helpers.Paths({}))
+ ds.use_ip4LL = False
+ if get_sysinfo is not None:
+ ds._get_sysinfo = get_sysinfo
+ return ds
- self.assertEqual(DO_META.get('user_data'),
- self.ds.get_userdata_raw())
+ @mock.patch('cloudinit.sources.helpers.digitalocean.read_sysinfo')
+ def test_returns_false_not_on_docean(self, m_read_sysinfo):
+ m_read_sysinfo.return_value = (False, None)
+ ds = self.get_ds(get_sysinfo=None)
+ self.assertEqual(False, ds.get_data())
+ self.assertTrue(m_read_sysinfo.called)
- self.assertEqual(DO_META.get('vendor_data'),
- self.ds.get_vendordata_raw())
+ @mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata')
+ def test_metadata(self, mock_readmd):
+ mock_readmd.return_value = DO_META.copy()
- self.assertEqual(DO_META.get('region'),
- self.ds.availability_zone)
+ ds = self.get_ds()
+ ret = ds.get_data()
+ self.assertTrue(ret)
- self.assertEqual(DO_META.get('id'),
- self.ds.get_instance_id())
+ self.assertTrue(mock_readmd.called)
- self.assertEqual(DO_META.get('hostname'),
- self.ds.get_hostname())
+ self.assertEqual(DO_META.get('user_data'), ds.get_userdata_raw())
+ self.assertEqual(DO_META.get('vendor_data'), ds.get_vendordata_raw())
+ self.assertEqual(DO_META.get('region'), ds.availability_zone)
+ self.assertEqual(DO_META.get('droplet_id'), ds.get_instance_id())
+ self.assertEqual(DO_META.get('hostname'), ds.get_hostname())
# Single key
self.assertEqual([DO_META.get('public_keys')],
- self.ds.get_public_ssh_keys())
+ ds.get_public_ssh_keys())
- self.assertIsInstance(self.ds.get_public_ssh_keys(), list)
+ self.assertIsInstance(ds.get_public_ssh_keys(), list)
- @httpretty.activate
- def test_multiple_ssh_keys(self):
- DO_META['public_keys'] = DO_MULTIPLE_KEYS
- httpretty.register_uri(
- httpretty.GET, MD_URL,
- body=_request_callback)
- self.ds.get_data()
+ @mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata')
+ def test_multiple_ssh_keys(self, mock_readmd):
+ metadata = DO_META.copy()
+ metadata['public_keys'] = DO_MULTIPLE_KEYS
+ mock_readmd.return_value = metadata.copy()
+
+ ds = self.get_ds()
+ ret = ds.get_data()
+ self.assertTrue(ret)
+
+ self.assertTrue(mock_readmd.called)
# Multiple keys
- self.assertEqual(DO_META.get('public_keys'),
- self.ds.get_public_ssh_keys())
+ self.assertEqual(metadata['public_keys'], ds.get_public_ssh_keys())
+ self.assertIsInstance(ds.get_public_ssh_keys(), list)
+
+
+class TestNetworkConvert(TestCase):
+
+ def _get_networking(self):
+ netcfg = digitalocean.convert_network_configuration(
+ DO_META['interfaces'], DO_META['dns']['nameservers'])
+ self.assertIn('config', netcfg)
+ return netcfg
+
+ def test_networking_defined(self):
+ netcfg = self._get_networking()
+ self.assertIsNotNone(netcfg)
+
+ for nic_def in netcfg.get('config'):
+ print(json.dumps(nic_def, indent=3))
+ n_type = nic_def.get('type')
+ n_subnets = nic_def.get('type')
+ n_name = nic_def.get('name')
+ n_mac = nic_def.get('mac_address')
+
+ self.assertIsNotNone(n_type)
+ self.assertIsNotNone(n_subnets)
+ self.assertIsNotNone(n_name)
+ self.assertIsNotNone(n_mac)
+
+ def _get_nic_definition(self, int_type, expected_name):
+ """helper function to return if_type (i.e. public) and the expected
+ name used by cloud-init (i.e eth0)"""
+ netcfg = self._get_networking()
+ meta_def = (DO_META.get('interfaces')).get(int_type)[0]
+
+ self.assertEqual(int_type, meta_def.get('type'))
+
+ for nic_def in netcfg.get('config'):
+ print(nic_def)
+ if nic_def.get('name') == expected_name:
+ return nic_def, meta_def
+
+ def _get_match_subn(self, subnets, ip_addr):
+ """get the matching subnet definition based on ip address"""
+ for subn in subnets:
+ address = subn.get('address')
+ self.assertIsNotNone(address)
+
+ # equals won't work because of ipv6 addressing being in
+ # cidr notation, i.e fe00::1/64
+ if ip_addr in address:
+ print(json.dumps(subn, indent=3))
+ return subn
+
+ def test_public_interface_defined(self):
+ """test that the public interface is defined as eth0"""
+ (nic_def, meta_def) = self._get_nic_definition('public', 'eth0')
+ self.assertEqual('eth0', nic_def.get('name'))
+ self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address'))
+ self.assertEqual('physical', nic_def.get('type'))
+
+ def test_private_interface_defined(self):
+ """test that the private interface is defined as eth1"""
+ (nic_def, meta_def) = self._get_nic_definition('private', 'eth1')
+ self.assertEqual('eth1', nic_def.get('name'))
+ self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address'))
+ self.assertEqual('physical', nic_def.get('type'))
+
+ def _check_dns_nameservers(self, subn_def):
+ self.assertIn('dns_nameservers', subn_def)
+ expected_nameservers = DO_META['dns']['nameservers']
+ nic_nameservers = subn_def.get('dns_nameservers')
+ self.assertEqual(expected_nameservers, nic_nameservers)
+
+ def test_public_interface_ipv6(self):
+ """test public ipv6 addressing"""
+ (nic_def, meta_def) = self._get_nic_definition('public', 'eth0')
+ ipv6_def = meta_def.get('ipv6')
+ self.assertIsNotNone(ipv6_def)
+
+ subn_def = self._get_match_subn(nic_def.get('subnets'),
+ ipv6_def.get('ip_address'))
+
+ cidr_notated_address = "{0}/{1}".format(ipv6_def.get('ip_address'),
+ ipv6_def.get('cidr'))
+
+ self.assertEqual(cidr_notated_address, subn_def.get('address'))
+ self.assertEqual(ipv6_def.get('gateway'), subn_def.get('gateway'))
+ self._check_dns_nameservers(subn_def)
+
+ def test_public_interface_ipv4(self):
+ """test public ipv4 addressing"""
+ (nic_def, meta_def) = self._get_nic_definition('public', 'eth0')
+ ipv4_def = meta_def.get('ipv4')
+ self.assertIsNotNone(ipv4_def)
+
+ subn_def = self._get_match_subn(nic_def.get('subnets'),
+ ipv4_def.get('ip_address'))
+
+ self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask'))
+ self.assertEqual(ipv4_def.get('gateway'), subn_def.get('gateway'))
+ self._check_dns_nameservers(subn_def)
+
+ def test_public_interface_anchor_ipv4(self):
+ """test public ipv4 addressing"""
+ (nic_def, meta_def) = self._get_nic_definition('public', 'eth0')
+ ipv4_def = meta_def.get('anchor_ipv4')
+ self.assertIsNotNone(ipv4_def)
+
+ subn_def = self._get_match_subn(nic_def.get('subnets'),
+ ipv4_def.get('ip_address'))
+
+ self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask'))
+ self.assertNotIn('gateway', subn_def)
+
+ def test_convert_without_private(self):
+ netcfg = digitalocean.convert_network_configuration(
+ DO_META_2['interfaces'], DO_META_2['dns']['nameservers'])
- self.assertIsInstance(self.ds.get_public_ssh_keys(), list)
+ byname = {}
+ for i in netcfg['config']:
+ if 'name' in i:
+ if i['name'] in byname:
+ raise ValueError("name '%s' in config twice: %s" %
+ (i['name'], netcfg))
+ byname[i['name']] = i
+ self.assertTrue('eth0' in byname)
+ self.assertTrue('subnets' in byname['eth0'])
+ eth0 = byname['eth0']
+ self.assertEqual(
+ sorted(['45.55.249.133', '10.17.0.5']),
+ sorted([i['address'] for i in eth0['subnets']]))
diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py
index d796f030..ce5b5550 100644
--- a/tests/unittests/test_datasource/test_opennebula.py
+++ b/tests/unittests/test_datasource/test_opennebula.py
@@ -1,7 +1,7 @@
from cloudinit import helpers
from cloudinit.sources import DataSourceOpenNebula as ds
from cloudinit import util
-from ..helpers import TestCase, populate_dir
+from ..helpers import mock, populate_dir, TestCase
import os
import pwd
@@ -31,12 +31,7 @@ SSH_KEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460-%i'
HOSTNAME = 'foo.example.com'
PUBLIC_IP = '10.0.0.3'
-CMD_IP_OUT = '''\
-1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
- link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
-2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
- link/ether 02:00:0a:12:01:01 brd ff:ff:ff:ff:ff:ff
-'''
+DS_PATH = "cloudinit.sources.DataSourceOpenNebula"
class TestOpenNebulaDataSource(TestCase):
@@ -233,18 +228,19 @@ class TestOpenNebulaDataSource(TestCase):
class TestOpenNebulaNetwork(unittest.TestCase):
- def setUp(self):
- super(TestOpenNebulaNetwork, self).setUp()
+ system_nics = {'02:00:0a:12:01:01': 'eth0'}
def test_lo(self):
- net = ds.OpenNebulaNetwork('', {})
+ net = ds.OpenNebulaNetwork(context={}, system_nics_by_mac={})
self.assertEqual(net.gen_conf(), u'''\
auto lo
iface lo inet loopback
''')
- def test_eth0(self):
- net = ds.OpenNebulaNetwork(CMD_IP_OUT, {})
+ @mock.patch(DS_PATH + ".get_physical_nics_by_mac")
+ def test_eth0(self, m_get_phys_by_mac):
+ m_get_phys_by_mac.return_value = self.system_nics
+ net = ds.OpenNebulaNetwork({})
self.assertEqual(net.gen_conf(), u'''\
auto lo
iface lo inet loopback
@@ -267,7 +263,8 @@ iface eth0 inet static
'ETH0_DNS': '1.2.3.6 1.2.3.7'
}
- net = ds.OpenNebulaNetwork(CMD_IP_OUT, context)
+ net = ds.OpenNebulaNetwork(context,
+ system_nics_by_mac=self.system_nics)
self.assertEqual(net.gen_conf(), u'''\
auto lo
iface lo inet loopback
diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py
index a887a930..33bf922d 100644..100755
--- a/tests/unittests/test_distros/test_user_data_normalize.py
+++ b/tests/unittests/test_distros/test_user_data_normalize.py
@@ -1,8 +1,10 @@
from cloudinit import distros
+from cloudinit.distros import ug_util
from cloudinit import helpers
from cloudinit import settings
from ..helpers import TestCase
+import mock
bcfg = {
@@ -29,7 +31,7 @@ class TestUGNormalize(TestCase):
return distro
def _norm(self, cfg, distro):
- return distros.normalize_users_groups(cfg, distro)
+ return ug_util.normalize_users_groups(cfg, distro)
def test_group_dict(self):
distro = self._make_distro('ubuntu')
@@ -236,7 +238,7 @@ class TestUGNormalize(TestCase):
}
(users, _groups) = self._norm(ug_cfg, distro)
self.assertIn('bob', users)
- (name, config) = distros.extract_default(users)
+ (name, config) = ug_util.extract_default(users)
self.assertEqual(name, 'bob')
expected_config = {}
def_config = None
@@ -295,3 +297,67 @@ class TestUGNormalize(TestCase):
self.assertIn('bob', users)
self.assertEqual({'default': False}, users['joe'])
self.assertEqual({'default': False}, users['bob'])
+
+ @mock.patch('cloudinit.util.subp')
+ def test_create_snap_user(self, mock_subp):
+ mock_subp.side_effect = [('{"username": "joe", "ssh-key-count": 1}\n',
+ '')]
+ distro = self._make_distro('ubuntu')
+ ug_cfg = {
+ 'users': [
+ {'name': 'joe', 'snapuser': 'joe@joe.com'},
+ ],
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ for (user, config) in users.items():
+ print('user=%s config=%s' % (user, config))
+ username = distro.create_user(user, **config)
+
+ snapcmd = ['snap', 'create-user', '--sudoer', '--json', 'joe@joe.com']
+ mock_subp.assert_called_with(snapcmd, capture=True, logstring=snapcmd)
+ self.assertEqual(username, 'joe')
+
+ @mock.patch('cloudinit.util.subp')
+ def test_create_snap_user_known(self, mock_subp):
+ mock_subp.side_effect = [('{"username": "joe", "ssh-key-count": 1}\n',
+ '')]
+ distro = self._make_distro('ubuntu')
+ ug_cfg = {
+ 'users': [
+ {'name': 'joe', 'snapuser': 'joe@joe.com', 'known': True},
+ ],
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ for (user, config) in users.items():
+ print('user=%s config=%s' % (user, config))
+ username = distro.create_user(user, **config)
+
+ snapcmd = ['snap', 'create-user', '--sudoer', '--json', '--known',
+ 'joe@joe.com']
+ mock_subp.assert_called_with(snapcmd, capture=True, logstring=snapcmd)
+ self.assertEqual(username, 'joe')
+
+ @mock.patch('cloudinit.util.system_is_snappy')
+ @mock.patch('cloudinit.util.is_group')
+ @mock.patch('cloudinit.util.subp')
+ def test_add_user_on_snappy_system(self, mock_subp, mock_isgrp,
+ mock_snappy):
+ mock_isgrp.return_value = False
+ mock_subp.return_value = True
+ mock_snappy.return_value = True
+ distro = self._make_distro('ubuntu')
+ ug_cfg = {
+ 'users': [
+ {'name': 'joe', 'groups': 'users', 'create_groups': True},
+ ],
+ }
+ (users, _groups) = self._norm(ug_cfg, distro)
+ for (user, config) in users.items():
+ print('user=%s config=%s' % (user, config))
+ distro.add_user(user, **config)
+
+ groupcmd = ['groupadd', 'users', '--extrausers']
+ addcmd = ['useradd', 'joe', '--extrausers', '--groups', 'users', '-m']
+
+ mock_subp.assert_any_call(groupcmd)
+ mock_subp.assert_any_call(addcmd, logstring=addcmd)
diff --git a/tests/unittests/test_handler/test_handler_apt_conf_v1.py b/tests/unittests/test_handler/test_handler_apt_conf_v1.py
index 45714efd..64acc3e0 100644
--- a/tests/unittests/test_handler/test_handler_apt_conf_v1.py
+++ b/tests/unittests/test_handler/test_handler_apt_conf_v1.py
@@ -118,7 +118,7 @@ class TestConversion(TestCase):
def test_convert_with_apt_mirror(self):
mirror = 'http://my.mirror/ubuntu'
f = cc_apt_configure.convert_to_v3_apt_format({'apt_mirror': mirror})
- self.assertIn(mirror, {m['uri'] for m in f['apt']['primary']})
+ self.assertIn(mirror, set(m['uri'] for m in f['apt']['primary']))
def test_no_old_content(self):
mirror = 'http://my.mirror/ubuntu'
diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py
index e653488a..e28067de 100644
--- a/tests/unittests/test_handler/test_handler_growpart.py
+++ b/tests/unittests/test_handler/test_handler_growpart.py
@@ -81,11 +81,11 @@ class TestConfig(TestCase):
self.cloud = cloud.Cloud(None, self.paths, None, None, None)
self.log = logging.getLogger("TestConfig")
self.args = []
- os.environ = {}
self.cloud_init = None
self.handle = cc_growpart.handle
+ @mock.patch.dict("os.environ", clear=True)
def test_no_resizers_auto_is_fine(self):
with mock.patch.object(
util, 'subp',
@@ -98,6 +98,7 @@ class TestConfig(TestCase):
mockobj.assert_called_once_with(
['growpart', '--help'], env={'LANG': 'C'})
+ @mock.patch.dict("os.environ", clear=True)
def test_no_resizers_mode_growpart_is_exception(self):
with mock.patch.object(
util, 'subp',
@@ -110,6 +111,7 @@ class TestConfig(TestCase):
mockobj.assert_called_once_with(
['growpart', '--help'], env={'LANG': 'C'})
+ @mock.patch.dict("os.environ", clear=True)
def test_mode_auto_prefers_growpart(self):
with mock.patch.object(
util, 'subp',
diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py
index 6f90defb..14366a10 100644
--- a/tests/unittests/test_handler/test_handler_lxd.py
+++ b/tests/unittests/test_handler/test_handler_lxd.py
@@ -132,3 +132,54 @@ class TestLxd(t_help.TestCase):
cc_lxd.bridge_to_debconf(data),
{"lxd/setup-bridge": "false",
"lxd/bridge-name": ""})
+
+ def test_lxd_cmd_new_full(self):
+ data = {"mode": "new",
+ "name": "testbr0",
+ "ipv4_address": "10.0.8.1",
+ "ipv4_netmask": "24",
+ "ipv4_dhcp_first": "10.0.8.2",
+ "ipv4_dhcp_last": "10.0.8.254",
+ "ipv4_dhcp_leases": "250",
+ "ipv4_nat": "true",
+ "ipv6_address": "fd98:9e0:3744::1",
+ "ipv6_netmask": "64",
+ "ipv6_nat": "true",
+ "domain": "lxd"}
+ self.assertEqual(
+ cc_lxd.bridge_to_cmd(data),
+ (["lxc", "network", "create", "testbr0",
+ "ipv4.address=10.0.8.1/24", "ipv4.nat=true",
+ "ipv4.dhcp.ranges=10.0.8.2-10.0.8.254",
+ "ipv6.address=fd98:9e0:3744::1/64",
+ "ipv6.nat=true", "dns.domain=lxd",
+ "--force-local"],
+ ["lxc", "network", "attach-profile",
+ "testbr0", "default", "eth0", "--force-local"]))
+
+ def test_lxd_cmd_new_partial(self):
+ data = {"mode": "new",
+ "ipv6_address": "fd98:9e0:3744::1",
+ "ipv6_netmask": "64",
+ "ipv6_nat": "true"}
+ self.assertEqual(
+ cc_lxd.bridge_to_cmd(data),
+ (["lxc", "network", "create", "lxdbr0", "ipv4.address=none",
+ "ipv6.address=fd98:9e0:3744::1/64", "ipv6.nat=true",
+ "--force-local"],
+ ["lxc", "network", "attach-profile",
+ "lxdbr0", "default", "eth0", "--force-local"]))
+
+ def test_lxd_cmd_existing(self):
+ data = {"mode": "existing",
+ "name": "testbr0"}
+ self.assertEqual(
+ cc_lxd.bridge_to_cmd(data),
+ (None, ["lxc", "network", "attach-profile",
+ "testbr0", "default", "eth0", "--force-local"]))
+
+ def test_lxd_cmd_none(self):
+ data = {"mode": "none"}
+ self.assertEqual(
+ cc_lxd.bridge_to_cmd(data),
+ (None, None))
diff --git a/tests/unittests/test_handler/test_handler_snappy.py b/tests/unittests/test_handler/test_handler_snappy.py
index 57dce1bc..e320dd82 100644
--- a/tests/unittests/test_handler/test_handler_snappy.py
+++ b/tests/unittests/test_handler/test_handler_snappy.py
@@ -1,14 +1,22 @@
from cloudinit.config.cc_snappy import (
makeop, get_package_ops, render_snap_op)
-from cloudinit import util
+from cloudinit.config.cc_snap_config import (
+ add_assertions, add_snap_user, ASSERTIONS_FILE)
+from cloudinit import (distros, helpers, cloud, util)
+from cloudinit.config.cc_snap_config import handle as snap_handle
+from cloudinit.sources import DataSourceNone
+from ..helpers import FilesystemMockingTestCase, mock
from .. import helpers as t_help
+import logging
import os
import shutil
import tempfile
+import textwrap
import yaml
+LOG = logging.getLogger(__name__)
ALLOWED = (dict, list, int, str)
@@ -287,6 +295,289 @@ class TestInstallPackages(t_help.TestCase):
self.assertEqual(yaml.safe_load(mydata), data_found)
+class TestSnapConfig(FilesystemMockingTestCase):
+
+ SYSTEM_USER_ASSERTION = textwrap.dedent("""
+ type: system-user
+ authority-id: LqvZQdfyfGlYvtep4W6Oj6pFXP9t1Ksp
+ brand-id: LqvZQdfyfGlYvtep4W6Oj6pFXP9t1Ksp
+ email: foo@bar.com
+ password: $6$E5YiAuMIPAwX58jG$miomhVNui/vf7f/3ctB/f0RWSKFxG0YXzrJ9rtJ1ikvzt
+ series:
+ - 16
+ since: 2016-09-10T16:34:00+03:00
+ until: 2017-11-10T16:34:00+03:00
+ username: baz
+ sign-key-sha3-384: RuVvnp4n52GilycjfbbTCI3_L8Y6QlIE75wxMc0KzGV3AUQqVd9GuXoj
+
+ AcLBXAQAAQoABgUCV/UU1wAKCRBKnlMoJQLkZVeLD/9/+hIeVywtzsDA3oxl+P+u9D13y9s6svP
+ Jd6Wnf4FTw6sq1GjBE4ZA7lrwSaRCUJ9Vcsvf2q9OGPY7mOb2TBxaDe0PbUMjrSrqllSSQwhpNI
+ zG+NxkkKuxsUmLzFa+k9m6cyojNbw5LFhQZBQCGlr3JYqC0tIREq/UsZxj+90TUC87lDJwkU8GF
+ s4CR+rejZj4itIcDcVxCSnJH6hv6j2JrJskJmvObqTnoOlcab+JXdamXqbldSP3UIhWoyVjqzkj
+ +to7mXgx+cCUA9+ngNCcfUG+1huGGTWXPCYkZ78HvErcRlIdeo4d3xwtz1cl/w3vYnq9og1XwsP
+ Yfetr3boig2qs1Y+j/LpsfYBYncgWjeDfAB9ZZaqQz/oc8n87tIPZDJHrusTlBfop8CqcM4xsKS
+ d+wnEY8e/F24mdSOYmS1vQCIDiRU3MKb6x138Ud6oHXFlRBbBJqMMctPqWDunWzb5QJ7YR0I39q
+ BrnEqv5NE0G7w6HOJ1LSPG5Hae3P4T2ea+ATgkb03RPr3KnXnzXg4TtBbW1nytdlgoNc/BafE1H
+ f3NThcq9gwX4xWZ2PAWnqVPYdDMyCtzW3Ck+o6sIzx+dh4gDLPHIi/6TPe/pUuMop9CBpWwez7V
+ v1z+1+URx6Xlq3Jq18y5pZ6fY3IDJ6km2nQPMzcm4Q==""")
+
+ ACCOUNT_ASSERTION = textwrap.dedent("""
+ type: account-key
+ authority-id: canonical
+ revision: 2
+ public-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0
+ account-id: canonical
+ name: store
+ since: 2016-04-01T00:00:00.0Z
+ body-length: 717
+ sign-key-sha3-384: -CvQKAwRQ5h3Ffn10FILJoEZUXOv6km9FwA80-Rcj-f-6jadQ89VRswH
+
+ AcbBTQRWhcGAARAA0KKYYQWuHOrsFVi4p4l7ZzSvX7kLgJFFeFgOkzdWKBTHEnsMKjl5mefFe9j
+ qe8NlmJdfY7BenP7XeBtwKp700H/t9lLrZbpTNAPHXYxEWFJp5bPqIcJYBZ+29oLVLN1Tc5X482
+ vCiDqL8+pPYqBrK2fNlyPlNNSum9wI70rDDL4r6FVvr+osTnGejibdV8JphWX+lrSQDnRSdM8KJ
+ UM43vTgLGTi9W54oRhsA2OFexRfRksTrnqGoonCjqX5wO3OFSaMDzMsO2MJ/hPfLgDqw53qjzuK
+ Iec9OL3k5basvu2cj5u9tKwVFDsCKK2GbKUsWWpx2KTpOifmhmiAbzkTHbH9KaoMS7p0kJwhTQG
+ o9aJ9VMTWHJc/NCBx7eu451u6d46sBPCXS/OMUh2766fQmoRtO1OwCTxsRKG2kkjbMn54UdFULl
+ VfzvyghMNRKIezsEkmM8wueTqGUGZWa6CEZqZKwhe/PROxOPYzqtDH18XZknbU1n5lNb7vNfem9
+ 2ai+3+JyFnW9UhfvpVF7gzAgdyCqNli4C6BIN43uwoS8HkykocZS/+Gv52aUQ/NZ8BKOHLw+7an
+ Q0o8W9ltSLZbEMxFIPSN0stiZlkXAp6DLyvh1Y4wXSynDjUondTpej2fSvSlCz/W5v5V7qA4nIc
+ vUvV7RjVzv17ut0AEQEAAQ==
+
+ AcLDXAQAAQoABgUCV83k9QAKCRDUpVvql9g3IBT8IACKZ7XpiBZ3W4lqbPssY6On81WmxQLtvsM
+ WTp6zZpl/wWOSt2vMNUk9pvcmrNq1jG9CuhDfWFLGXEjcrrmVkN3YuCOajMSPFCGrxsIBLSRt/b
+ nrKykdLAAzMfG8rP1d82bjFFiIieE+urQ0Kcv09Jtdvavq3JT1Tek5mFyyfhHNlQEKOzWqmRWiL
+ 3c3VOZUs1ZD8TSlnuq/x+5T0X0YtOyGjSlVxk7UybbyMNd6MZfNaMpIG4x+mxD3KHFtBAC7O6kL
+ eX3i6j5nCY5UABfA3DZEAkWP4zlmdBEOvZ9t293NaDdOpzsUHRkoi0Zez/9BHQ/kwx/uNc2WqrY
+ inCmu16JGNeXqsyinnLl7Ghn2RwhvDMlLxF6RTx8xdx1yk6p3PBTwhZMUvuZGjUtN/AG8BmVJQ1
+ rsGSRkkSywvnhVJRB2sudnrMBmNS2goJbzSbmJnOlBrd2WsV0T9SgNMWZBiov3LvU4o2SmAb6b+
+ rYwh8H5QHcuuYJuxDjFhPswIp6Wes5T6hUicf3SWtObcDS4HSkVS4ImBjjX9YgCuFy7QdnooOWE
+ aPvkRw3XCVeYq0K6w9GRsk1YFErD4XmXXZjDYY650MX9v42Sz5MmphHV8jdIY5ssbadwFSe2rCQ
+ 6UX08zy7RsIb19hTndE6ncvSNDChUR9eEnCm73eYaWTWTnq1cxdVP/s52r8uss++OYOkPWqh5nO
+ haRn7INjH/yZX4qXjNXlTjo0PnHH0q08vNKDwLhxS+D9du+70FeacXFyLIbcWllSbJ7DmbumGpF
+ yYbtj3FDDPzachFQdIG3lSt+cSUGeyfSs6wVtc3cIPka/2Urx7RprfmoWSI6+a5NcLdj0u2z8O9
+ HxeIgxDpg/3gT8ZIuFKePMcLDM19Fh/p0ysCsX+84B9chNWtsMSmIaE57V+959MVtsLu7SLb9gi
+ skrju0pQCwsu2wHMLTNd1f3PTHmrr49hxetTus07HSQUApMtAGKzQilF5zqFjbyaTd4xgQbd+PK
+ CjFyzQTDOcUhXpuUGt/IzlqiFfsCsmbj2K4KdSNYMlqIgZ3Azu8KvZLIhsyN7v5vNIZSPfEbjde
+ ClU9r0VRiJmtYBUjcSghD9LWn+yRLwOxhfQVjm0cBwIt5R/yPF/qC76yIVuWUtM5Y2/zJR1J8OF
+ qWchvlImHtvDzS9FQeLyzJAOjvZ2CnWp2gILgUz0WQdOk1Dq8ax7KS9BQ42zxw9EZAEPw3PEFqR
+ IQsRTONp+iVS8YxSmoYZjDlCgRMWUmawez/Fv5b9Fb/XkO5Eq4e+KfrpUujXItaipb+tV8h5v3t
+ oG3Ie3WOHrVjCLXIdYslpL1O4nadqR6Xv58pHj6k""")
+
+ test_assertions = [ACCOUNT_ASSERTION, SYSTEM_USER_ASSERTION]
+
+ def setUp(self):
+ super(TestSnapConfig, self).setUp()
+ self.subp = util.subp
+ self.new_root = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.new_root)
+
+ def _get_cloud(self, distro, metadata=None):
+ self.patchUtils(self.new_root)
+ paths = helpers.Paths({})
+ cls = distros.fetch(distro)
+ mydist = cls(distro, {}, paths)
+ myds = DataSourceNone.DataSourceNone({}, mydist, paths)
+ if metadata:
+ myds.metadata.update(metadata)
+ return cloud.Cloud(myds, paths, {}, mydist, None)
+
+ @mock.patch('cloudinit.util.write_file')
+ @mock.patch('cloudinit.util.subp')
+ def test_snap_config_add_assertions(self, msubp, mwrite):
+ add_assertions(self.test_assertions)
+
+ combined = "\n".join(self.test_assertions)
+ mwrite.assert_any_call(ASSERTIONS_FILE, combined.encode('utf-8'))
+ msubp.assert_called_with(['snap', 'ack', ASSERTIONS_FILE],
+ capture=True)
+
+ def test_snap_config_add_assertions_empty(self):
+ self.assertRaises(ValueError, add_assertions, [])
+
+ def test_add_assertions_nonlist(self):
+ self.assertRaises(ValueError, add_assertions, {})
+
+ @mock.patch('cloudinit.util.write_file')
+ @mock.patch('cloudinit.util.subp')
+ def test_snap_config_add_assertions_ack_fails(self, msubp, mwrite):
+ msubp.side_effect = [util.ProcessExecutionError("Invalid assertion")]
+ self.assertRaises(util.ProcessExecutionError, add_assertions,
+ self.test_assertions)
+
+ @mock.patch('cloudinit.config.cc_snap_config.add_assertions')
+ @mock.patch('cloudinit.config.cc_snap_config.util')
+ def test_snap_config_handle_no_config(self, mock_util, mock_add):
+ cfg = {}
+ cc = self._get_cloud('ubuntu')
+ cc.distro = mock.MagicMock()
+ cc.distro.name = 'ubuntu'
+ mock_util.which.return_value = None
+ snap_handle('snap_config', cfg, cc, LOG, None)
+ mock_add.assert_not_called()
+
+ def test_snap_config_add_snap_user_no_config(self):
+ usercfg = add_snap_user(cfg=None)
+ self.assertEqual(usercfg, None)
+
+ def test_snap_config_add_snap_user_not_dict(self):
+ cfg = ['foobar']
+ self.assertRaises(ValueError, add_snap_user, cfg)
+
+ def test_snap_config_add_snap_user_no_email(self):
+ cfg = {'assertions': [], 'known': True}
+ usercfg = add_snap_user(cfg=cfg)
+ self.assertEqual(usercfg, None)
+
+ @mock.patch('cloudinit.config.cc_snap_config.util')
+ def test_snap_config_add_snap_user_email_only(self, mock_util):
+ email = 'janet@planetjanet.org'
+ cfg = {'email': email}
+ mock_util.which.return_value = None
+ mock_util.system_is_snappy.return_value = True
+ mock_util.subp.side_effect = [
+ ("false\n", ""), # snap managed
+ ]
+
+ usercfg = add_snap_user(cfg=cfg)
+
+ self.assertEqual(usercfg, {'snapuser': email, 'known': False})
+
+ @mock.patch('cloudinit.config.cc_snap_config.util')
+ def test_snap_config_add_snap_user_email_known(self, mock_util):
+ email = 'janet@planetjanet.org'
+ known = True
+ cfg = {'email': email, 'known': known}
+ mock_util.which.return_value = None
+ mock_util.system_is_snappy.return_value = True
+ mock_util.subp.side_effect = [
+ ("false\n", ""), # snap managed
+ (self.SYSTEM_USER_ASSERTION, ""), # snap known system-user
+ ]
+
+ usercfg = add_snap_user(cfg=cfg)
+
+ self.assertEqual(usercfg, {'snapuser': email, 'known': known})
+
+ @mock.patch('cloudinit.config.cc_snap_config.add_assertions')
+ @mock.patch('cloudinit.config.cc_snap_config.util')
+ def test_snap_config_handle_system_not_snappy(self, mock_util, mock_add):
+ cfg = {'snappy': {'assertions': self.test_assertions}}
+ cc = self._get_cloud('ubuntu')
+ cc.distro = mock.MagicMock()
+ cc.distro.name = 'ubuntu'
+ mock_util.which.return_value = None
+ mock_util.system_is_snappy.return_value = False
+
+ snap_handle('snap_config', cfg, cc, LOG, None)
+
+ mock_add.assert_not_called()
+
+ @mock.patch('cloudinit.config.cc_snap_config.add_assertions')
+ @mock.patch('cloudinit.config.cc_snap_config.util')
+ def test_snap_config_handle_snapuser(self, mock_util, mock_add):
+ email = 'janet@planetjanet.org'
+ cfg = {
+ 'snappy': {
+ 'assertions': self.test_assertions,
+ 'email': email,
+ }
+ }
+ cc = self._get_cloud('ubuntu')
+ cc.distro = mock.MagicMock()
+ cc.distro.name = 'ubuntu'
+ mock_util.which.return_value = None
+ mock_util.system_is_snappy.return_value = True
+ mock_util.subp.side_effect = [
+ ("false\n", ""), # snap managed
+ ]
+
+ snap_handle('snap_config', cfg, cc, LOG, None)
+
+ mock_add.assert_called_with(self.test_assertions)
+ usercfg = {'snapuser': email, 'known': False}
+ cc.distro.create_user.assert_called_with(email, **usercfg)
+
+ @mock.patch('cloudinit.config.cc_snap_config.add_assertions')
+ @mock.patch('cloudinit.config.cc_snap_config.util')
+ def test_snap_config_handle_snapuser_known(self, mock_util, mock_add):
+ email = 'janet@planetjanet.org'
+ cfg = {
+ 'snappy': {
+ 'assertions': self.test_assertions,
+ 'email': email,
+ 'known': True,
+ }
+ }
+ cc = self._get_cloud('ubuntu')
+ cc.distro = mock.MagicMock()
+ cc.distro.name = 'ubuntu'
+ mock_util.which.return_value = None
+ mock_util.system_is_snappy.return_value = True
+ mock_util.subp.side_effect = [
+ ("false\n", ""), # snap managed
+ (self.SYSTEM_USER_ASSERTION, ""), # snap known system-user
+ ]
+
+ snap_handle('snap_config', cfg, cc, LOG, None)
+
+ mock_add.assert_called_with(self.test_assertions)
+ usercfg = {'snapuser': email, 'known': True}
+ cc.distro.create_user.assert_called_with(email, **usercfg)
+
+ @mock.patch('cloudinit.config.cc_snap_config.add_assertions')
+ @mock.patch('cloudinit.config.cc_snap_config.util')
+ def test_snap_config_handle_snapuser_known_managed(self, mock_util,
+ mock_add):
+ email = 'janet@planetjanet.org'
+ cfg = {
+ 'snappy': {
+ 'assertions': self.test_assertions,
+ 'email': email,
+ 'known': True,
+ }
+ }
+ cc = self._get_cloud('ubuntu')
+ cc.distro = mock.MagicMock()
+ cc.distro.name = 'ubuntu'
+ mock_util.which.return_value = None
+ mock_util.system_is_snappy.return_value = True
+ mock_util.subp.side_effect = [
+ ("true\n", ""), # snap managed
+ ]
+
+ snap_handle('snap_config', cfg, cc, LOG, None)
+
+ mock_add.assert_called_with(self.test_assertions)
+ cc.distro.create_user.assert_not_called()
+
+ @mock.patch('cloudinit.config.cc_snap_config.add_assertions')
+ @mock.patch('cloudinit.config.cc_snap_config.util')
+ def test_snap_config_handle_snapuser_known_no_assertion(self, mock_util,
+ mock_add):
+ email = 'janet@planetjanet.org'
+ cfg = {
+ 'snappy': {
+ 'assertions': [self.ACCOUNT_ASSERTION],
+ 'email': email,
+ 'known': True,
+ }
+ }
+ cc = self._get_cloud('ubuntu')
+ cc.distro = mock.MagicMock()
+ cc.distro.name = 'ubuntu'
+ mock_util.which.return_value = None
+ mock_util.system_is_snappy.return_value = True
+ mock_util.subp.side_effect = [
+ ("true\n", ""), # snap managed
+ ("", ""), # snap known system-user
+ ]
+
+ snap_handle('snap_config', cfg, cc, LOG, None)
+
+ mock_add.assert_called_with([self.ACCOUNT_ASSERTION])
+ cc.distro.create_user.assert_not_called()
+
+
def makeop_tmpd(tmpd, op, name, config=None, path=None, cfgfile=None):
if cfgfile:
cfgfile = os.path.sep.join([tmpd, cfgfile])
diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py
index 41b9a6d0..78c080ca 100644
--- a/tests/unittests/test_net.py
+++ b/tests/unittests/test_net.py
@@ -53,6 +53,45 @@ DHCP_EXPECTED_1 = {
'dns_nameservers': ['192.168.122.1']}],
}
+DHCP6_CONTENT_1 = """
+DEVICE=eno1
+HOSTNAME=
+DNSDOMAIN=
+reason='PREINIT'
+interface='eno1'
+DEVICE=eno1
+HOSTNAME=
+DNSDOMAIN=
+reason='FAIL'
+interface='eno1'
+DEVICE=eno1
+HOSTNAME=
+DNSDOMAIN=
+reason='PREINIT6'
+interface='eno1'
+DEVICE=eno1
+IPV6PROTO=dhcp6
+IPV6ADDR=2001:67c:1562:8010:0:1::
+IPV6NETMASK=64
+IPV6DNS0=2001:67c:1562:8010::2:1
+IPV6DOMAINSEARCH=
+HOSTNAME=
+DNSDOMAIN=
+reason='BOUND6'
+interface='eno1'
+new_ip6_address='2001:67c:1562:8010:0:1::'
+new_ip6_prefixlen='64'
+new_dhcp6_name_servers='2001:67c:1562:8010::2:1'
+"""
+
+DHCP6_EXPECTED_1 = {
+ 'name': 'eno1',
+ 'type': 'physical',
+ 'subnets': [{'control': 'manual',
+ 'dns_nameservers': ['2001:67c:1562:8010::2:1'],
+ 'netmask': '64',
+ 'type': 'dhcp6'}]}
+
STATIC_CONTENT_1 = """
DEVICE='eth1'
@@ -590,6 +629,10 @@ class TestCmdlineConfigParsing(TestCase):
found = cmdline._klibc_to_config_entry(DHCP_CONTENT_1)
self.assertEqual(found, ('eth0', DHCP_EXPECTED_1))
+ def test_cmdline_convert_dhcp6(self):
+ found = cmdline._klibc_to_config_entry(DHCP6_CONTENT_1)
+ self.assertEqual(found, ('eno1', DHCP6_EXPECTED_1))
+
def test_cmdline_convert_static(self):
found = cmdline._klibc_to_config_entry(STATIC_CONTENT_1)
self.assertEqual(found, ('eth1', STATIC_EXPECTED_1))
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index d2031f59..f6a8ab75 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -223,8 +223,10 @@ class TestKeyValStrings(helpers.TestCase):
class TestGetCmdline(helpers.TestCase):
def test_cmdline_reads_debug_env(self):
- os.environ['DEBUG_PROC_CMDLINE'] = 'abcd 123'
- self.assertEqual(os.environ['DEBUG_PROC_CMDLINE'], util.get_cmdline())
+ with mock.patch.dict("os.environ",
+ values={'DEBUG_PROC_CMDLINE': 'abcd 123'}):
+ ret = util.get_cmdline()
+ self.assertEqual("abcd 123", ret)
class TestLoadYaml(helpers.TestCase):
@@ -384,7 +386,7 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase):
dmi_name = 'use-dmidecode'
self._configure_dmidecode_return(dmi_name, dmi_val)
- expected = {'armel': None, 'aarch64': None, 'x86_64': dmi_val}
+ expected = {'armel': None, 'aarch64': dmi_val, 'x86_64': dmi_val}
found = {}
# we do not run the 'dmi-decode' binary on some arches
# verify that anything requested that is not in the sysfs dir
@@ -516,6 +518,7 @@ class TestSubp(helpers.TestCase):
utf8_invalid = b'ab\xaadef'
utf8_valid = b'start \xc3\xa9 end'
utf8_valid_2 = b'd\xc3\xa9j\xc8\xa7'
+ printenv = ['bash', '-c', 'for n in "$@"; do echo "$n=${!n}"; done', '--']
def printf_cmd(self, *args):
# bash's printf supports \xaa. So does /usr/bin/printf
@@ -550,7 +553,7 @@ class TestSubp(helpers.TestCase):
def test_subp_decode_invalid_utf8_replaces(self):
(out, _err) = util.subp(self.stdin2out, capture=True,
data=self.utf8_invalid)
- expected = self.utf8_invalid.decode('utf-8', errors='replace')
+ expected = self.utf8_invalid.decode('utf-8', 'replace')
self.assertEqual(out, expected)
def test_subp_decode_strict_raises(self):
@@ -566,6 +569,29 @@ class TestSubp(helpers.TestCase):
self.assertEqual(err, data)
self.assertEqual(out, b'')
+ def test_subp_reads_env(self):
+ with mock.patch.dict("os.environ", values={'FOO': 'BAR'}):
+ out, err = util.subp(self.printenv + ['FOO'], capture=True)
+ self.assertEqual('FOO=BAR', out.splitlines()[0])
+
+ def test_subp_env_and_update_env(self):
+ out, err = util.subp(
+ self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True,
+ env={'FOO': 'BAR'},
+ update_env={'HOME': '/myhome', 'K2': 'V2'})
+ self.assertEqual(
+ ['FOO=BAR', 'HOME=/myhome', 'K1=', 'K2=V2'], out.splitlines())
+
+ def test_subp_update_env(self):
+ extra = {'FOO': 'BAR', 'HOME': '/root', 'K1': 'V1'}
+ with mock.patch.dict("os.environ", values=extra):
+ out, err = util.subp(
+ self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True,
+ update_env={'HOME': '/myhome', 'K2': 'V2'})
+
+ self.assertEqual(
+ ['FOO=BAR', 'HOME=/myhome', 'K1=V1', 'K2=V2'], out.splitlines())
+
def test_returns_none_if_no_capture(self):
(out, err) = util.subp(self.stdin2out, data=b'', capture=False)
self.assertEqual(err, None)
@@ -577,4 +603,12 @@ class TestSubp(helpers.TestCase):
self.assertEqual("/target/my/path/",
util.target_path("/target/", "///my/path/"))
+
+class TestEncode(helpers.TestCase):
+ """Test the encoding functions"""
+ def test_decode_binary_plain_text_with_hex(self):
+ blob = 'BOOTABLE_FLAG=\x80init=/bin/systemd'
+ text = util.decode_binary(blob)
+ self.assertEqual(text, blob)
+
# vi: ts=4 expandtab
diff --git a/tox.ini b/tox.ini
index e7a6f22c..08318a9f 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = py27,py3,flake8
+envlist = py27, py3, flake8, xenial
recreate = True
[testenv]
@@ -19,6 +19,9 @@ setenv =
[testenv:py3]
basepython = python3
+commands = {envpython} -m nose \
+ {posargs:--with-coverage --cover-erase \
+ --cover-branches --cover-package=cloudinit --cover-inclusive}
[testenv:py26]
commands = nosetests {posargs:tests}
@@ -28,3 +31,50 @@ setenv =
[flake8]
ignore=H404,H405,H105,H301,H104,H403,H101
exclude = .venv,.tox,dist,doc,*egg,.git,build,tools
+
+[testenv:doc]
+basepython = python3
+deps = {[testenv]deps}
+ sphinx
+commands = {envpython} -m sphinx {posargs:doc/rtd doc/rtd_html}
+
+[testenv:xenial]
+basepython = python3
+deps =
+ # requirements
+ jinja2==2.8
+ pyyaml==3.11
+ PrettyTable==0.7.2
+ oauthlib==1.0.3
+ pyserial==3.0.1
+ configobj==5.0.6
+ requests==2.9.1
+ # jsonpatch ubuntu is 1.10, not 1.19 (#839779)
+ jsonpatch==1.10
+ six==1.10.0
+ # test-requirements
+ httpretty==0.8.6
+ mock==1.3.0
+ nose==1.3.7
+ unittest2==1.1.0
+ contextlib2==0.5.1
+ pep8==1.7.0
+ pyflakes==1.1.0
+ flake8==2.5.4
+ hacking==0.10.2
+
+[testenv:centos6]
+basepython = python2.6
+commands = nosetests {posargs:tests}
+deps =
+ # requirements
+ argparse==1.2.1
+ jinja2==2.2.1
+ pyyaml==3.10
+ PrettyTable==0.7.2
+ oauthlib==0.6.0
+ configobj==4.6.0
+ requests==2.6.0
+ jsonpatch==1.2
+ six==1.9.0
+ -r{toxinidir}/test-requirements.txt