summaryrefslogtreecommitdiff
path: root/doc/rtd
diff options
context:
space:
mode:
Diffstat (limited to 'doc/rtd')
-rw-r--r--doc/rtd/conf.py45
-rw-r--r--doc/rtd/index.rst7
-rw-r--r--doc/rtd/topics/availability.rst14
-rw-r--r--doc/rtd/topics/boot.rst16
-rw-r--r--doc/rtd/topics/bugs.rst4
-rw-r--r--doc/rtd/topics/cli.rst10
-rw-r--r--doc/rtd/topics/cloud_tests.rst764
-rw-r--r--doc/rtd/topics/code_review.rst8
-rw-r--r--doc/rtd/topics/contributing.rst2
-rw-r--r--doc/rtd/topics/datasources.rst15
-rw-r--r--doc/rtd/topics/datasources/aliyun.rst17
-rw-r--r--doc/rtd/topics/datasources/altcloud.rst2
-rw-r--r--doc/rtd/topics/datasources/azure.rst84
-rw-r--r--doc/rtd/topics/datasources/cloudsigma.rst2
-rw-r--r--doc/rtd/topics/datasources/cloudstack.rst4
-rw-r--r--doc/rtd/topics/datasources/configdrive.rst2
-rw-r--r--doc/rtd/topics/datasources/digitalocean.rst6
-rw-r--r--doc/rtd/topics/datasources/e24cloud.rst4
-rw-r--r--doc/rtd/topics/datasources/ec2.rst2
-rw-r--r--doc/rtd/topics/datasources/fallback.rst2
-rw-r--r--doc/rtd/topics/datasources/gce.rst24
-rw-r--r--doc/rtd/topics/datasources/lxd.rst65
-rw-r--r--doc/rtd/topics/datasources/nocloud.rst10
-rw-r--r--doc/rtd/topics/datasources/opennebula.rst10
-rw-r--r--doc/rtd/topics/datasources/openstack.rst16
-rw-r--r--doc/rtd/topics/datasources/oracle.rst2
-rw-r--r--doc/rtd/topics/datasources/ovf.rst19
-rw-r--r--doc/rtd/topics/datasources/rbxcloud.rst2
-rw-r--r--doc/rtd/topics/datasources/smartos.rst4
-rw-r--r--doc/rtd/topics/datasources/upcloud.rst24
-rw-r--r--doc/rtd/topics/datasources/vmware.rst358
-rw-r--r--doc/rtd/topics/datasources/vultr.rst35
-rw-r--r--doc/rtd/topics/datasources/zstack.rst2
-rw-r--r--doc/rtd/topics/debugging.rst16
-rw-r--r--doc/rtd/topics/dir_layout.rst2
-rw-r--r--doc/rtd/topics/events.rst95
-rw-r--r--doc/rtd/topics/examples.rst6
-rw-r--r--doc/rtd/topics/faq.rst10
-rw-r--r--doc/rtd/topics/format.rst42
-rw-r--r--doc/rtd/topics/hacking.rst2
-rw-r--r--doc/rtd/topics/instancedata.rst61
-rw-r--r--doc/rtd/topics/integration_tests.rst199
-rw-r--r--doc/rtd/topics/logging.rst6
-rw-r--r--doc/rtd/topics/merging.rst8
-rw-r--r--doc/rtd/topics/modules.rst4
-rw-r--r--doc/rtd/topics/network-config-format-eni.rst2
-rw-r--r--doc/rtd/topics/network-config-format-v1.rst30
-rw-r--r--doc/rtd/topics/network-config-format-v2.rst42
-rw-r--r--doc/rtd/topics/network-config.rst30
-rw-r--r--doc/rtd/topics/security.rst2
-rw-r--r--doc/rtd/topics/testing.rst160
-rw-r--r--doc/rtd/topics/vendordata.rst6
52 files changed, 1268 insertions, 1036 deletions
diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py
index 684822c2..9976afa4 100644
--- a/doc/rtd/conf.py
+++ b/doc/rtd/conf.py
@@ -1,24 +1,24 @@
import os
import sys
+from cloudinit import version
+
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
-sys.path.insert(0, os.path.abspath('../../'))
-sys.path.insert(0, os.path.abspath('../'))
-sys.path.insert(0, os.path.abspath('./'))
-sys.path.insert(0, os.path.abspath('.'))
+sys.path.insert(0, os.path.abspath("../../"))
+sys.path.insert(0, os.path.abspath("../"))
+sys.path.insert(0, os.path.abspath("./"))
+sys.path.insert(0, os.path.abspath("."))
-from cloudinit import version
-from cloudinit.config.schema import get_schema_doc
-# Supress warnings for docs that aren't used yet
+# Suppress warnings for docs that aren't used yet
# unused_docs = [
# ]
# General information about the project.
-project = 'cloud-init'
-copyright = '2020, Canonical Ltd.'
+project = "cloud-init"
+copyright = "2020, Canonical Ltd."
# -- General configuration ----------------------------------------------------
@@ -28,17 +28,17 @@ copyright = '2020, Canonical Ltd.'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
- 'm2r',
- 'sphinx.ext.autodoc',
- 'sphinx.ext.autosectionlabel',
- 'sphinx.ext.viewcode',
+ "m2r",
+ "sphinx.ext.autodoc",
+ "sphinx.ext.autosectionlabel",
+ "sphinx.ext.viewcode",
]
# The suffix of source filenames.
-source_suffix = '.rst'
+source_suffix = ".rst"
# The master toctree document.
-master_doc = 'index'
+master_doc = "index"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@@ -47,7 +47,7 @@ version = version.version_string()
release = version
# Set the default Pygments syntax
-highlight_language = 'yaml'
+highlight_language = "yaml"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
@@ -61,17 +61,8 @@ show_authors = False
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
-html_theme = 'sphinx_rtd_theme'
+html_theme = "sphinx_rtd_theme"
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
-html_logo = 'static/logo.png'
-
-def generate_docstring_from_schema(app, what, name, obj, options, lines):
- """Override module docs from schema when present."""
- if what == 'module' and hasattr(obj, "schema"):
- del lines[:]
- lines.extend(get_schema_doc(obj.schema).split('\n'))
-
-def setup(app):
- app.connect('autodoc-process-docstring', generate_docstring_from_schema)
+html_logo = "static/logo.png"
diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst
index ddcb0b31..251a904d 100644
--- a/doc/rtd/index.rst
+++ b/doc/rtd/index.rst
@@ -27,7 +27,7 @@ Getting help
Having trouble? We would like to help!
- Try the :ref:`FAQ` – its got answers to some common questions
-- Ask a question in the ``#cloud-init`` IRC channel on Freenode
+- Ask a question in the ``#cloud-init`` IRC channel on Libera
- Join and ask questions on the `cloud-init mailing list <https://launchpad.net/~cloud-init>`_
- Find a bug? `Report bugs on Launchpad <https://bugs.launchpad.net/cloud-init/+filebug>`_
@@ -49,6 +49,7 @@ Having trouble? We would like to help!
topics/format.rst
topics/examples.rst
+ topics/events.rst
topics/modules.rst
topics/merging.rst
@@ -67,7 +68,7 @@ Having trouble? We would like to help!
:titlesonly:
:caption: Development
- topics/hacking.rst
+ topics/contributing.rst
topics/code_review.rst
topics/security.rst
topics/debugging.rst
@@ -75,7 +76,7 @@ Having trouble? We would like to help!
topics/dir_layout.rst
topics/analyze.rst
topics/docs.rst
+ topics/testing.rst
topics/integration_tests.rst
- topics/cloud_tests.rst
.. vi: textwidth=79
diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst
index 8f56a7d2..d8ca9d16 100644
--- a/doc/rtd/topics/availability.rst
+++ b/doc/rtd/topics/availability.rst
@@ -14,18 +14,20 @@ distributions and clouds, both public and private.
Distributions
=============
-Cloud-init has support across all major Linux distributions, FreeBSD, NetBSD
-and OpenBSD:
+Cloud-init has support across all major Linux distributions, FreeBSD, NetBSD,
+OpenBSD and DragonFlyBSD:
- Alpine Linux
- ArchLinux
- Debian
+- DragonFlyBSD
- Fedora
- FreeBSD
- Gentoo Linux
- NetBSD
- OpenBSD
-- RHEL/CentOS
+- Photon OS
+- RHEL/CentOS/AlmaLinux/Rocky Linux/EuroLinux
- SLES/openSUSE
- Ubuntu
@@ -42,7 +44,7 @@ environments in the public cloud:
- Softlayer
- Rackspace Public Cloud
- IBM Cloud
-- Digital Ocean
+- DigitalOcean
- Bigstep
- Hetzner
- Joyent
@@ -55,6 +57,9 @@ environments in the public cloud:
- CloudStack
- AltCloud
- SmartOS
+- UpCloud
+- Vultr
+- Zadara Edge Cloud Platform
Additionally, cloud-init is supported on these private clouds:
@@ -63,5 +68,6 @@ Additionally, cloud-init is supported on these private clouds:
- LXD
- KVM
- Metal-as-a-Service (MAAS)
+- VMware
.. vi: textwidth=79
diff --git a/doc/rtd/topics/boot.rst b/doc/rtd/topics/boot.rst
index a5282e35..b904eaf4 100644
--- a/doc/rtd/topics/boot.rst
+++ b/doc/rtd/topics/boot.rst
@@ -65,10 +65,10 @@ If this is an instance's first boot, then the selected network configuration
is rendered. This includes clearing of all previous (stale) configuration
including persistent device naming with old mac addresses.
-This stage must block network bring-up or any stale configuration might
-already have been applied. That could have negative effects such as DHCP
-hooks or broadcast of an old hostname. It would also put the system in
-an odd state to recover from as it may then have to restart network
+This stage must block network bring-up or any stale configuration that might
+have already been applied. Otherwise, that could have negative effects such
+as DHCP hooks or broadcast of an old hostname. It would also put the system
+in an odd state to recover from, as it may then have to restart network
devices.
Cloud-init then exits and expects for the continued boot of the operating
@@ -93,7 +93,7 @@ Network
+---------+--------+----------------------------------------------------------+
This stage requires all configured networking to be online, as it will fully
-process any user-data that is found. Here, processing means:
+process any user-data that is found. Here processing means:
* retrieve any ``#include`` or ``#include-once`` (recursively) including http
* decompress any compressed content
@@ -106,7 +106,7 @@ from sources only available via network. For example, a user may have
provided user-data in a network resource that describes how local mounts
should be done.
-On some clouds such as Azure, this stage will create filesystems to be
+On some clouds, such as Azure, this stage will create filesystems to be
mounted, including ones that have stale (previous instance) references in
``/etc/fstab``. As such, entries ``/etc/fstab`` other than those necessary for
cloud-init to run should not be done until after this stage.
@@ -146,11 +146,11 @@ Final
This stage runs as late in boot as possible. Any scripts that a user is
accustomed to running after logging into a system should run correctly here.
-Things that run here include
+Things that run here include:
* package installations
* configuration management plugins (puppet, chef, salt-minion)
- * user-scripts (i.e. shell scripts passed as user-data)
+ * user-defined scripts (i.e. shell scripts passed as user-data)
For scripts external to cloud-init looking to wait until cloud-init is
finished, the ``cloud-init status`` subcommand can help block external
diff --git a/doc/rtd/topics/bugs.rst b/doc/rtd/topics/bugs.rst
index 4b60776b..ee3828de 100644
--- a/doc/rtd/topics/bugs.rst
+++ b/doc/rtd/topics/bugs.rst
@@ -17,7 +17,7 @@ To aid in debugging, please collect the necessary logs. To do so, run the
.. code-block:: shell-session
- $ cloud-init collect-logs
+ $ sudo cloud-init collect-logs
Wrote /home/ubuntu/cloud-init.tar.gz
If your version of cloud-init does not have the `collect-logs` subcommand,
@@ -25,7 +25,7 @@ then please manually collect the base log files by doing the following:
.. code-block:: shell-session
- $ dmesg > dmesg.txt
+ $ sudo dmesg > dmesg.txt
$ sudo journalctl -o short-precise > journal.txt
$ sudo tar -cvf cloud-init.tar dmesg.txt journal.txt /run/cloud-init \
/var/log/cloud-init.log /var/log/cloud-init-output.log
diff --git a/doc/rtd/topics/cli.rst b/doc/rtd/topics/cli.rst
index 0ff230b5..e2f48bf0 100644
--- a/doc/rtd/topics/cli.rst
+++ b/doc/rtd/topics/cli.rst
@@ -17,10 +17,10 @@ option. This can be used against cloud-init itself or any of its subcommands.
-h, --help show this help message and exit
--version, -v show program's version number and exit
--file FILES, -f FILES
- additional yaml configuration files to use
+ additional yaml configuration files to use
--debug, -d show additional pre-action logging (default: False)
--force force running even if no datasource is found (use at
- your own risk)
+ your own risk)
Subcommands:
{init,modules,single,query,dhclient-hook,features,analyze,devel,collect-logs,clean,status}
@@ -115,10 +115,14 @@ Current subcommands:
containing the jinja template header ``## template: jinja`` and renders
that content with any instance-data.json variables present.
* ``schema``: a **#cloud-config** format and schema
- validator. It accepts a cloud-config yaml file and annotates potential
+ validator. It accepts a cloud-config YAML file and annotates potential
schema errors locally without the need for deployment. Schema
validation is work in progress and supports a subset of cloud-config
modules.
+ * ``hotplug-hook``: respond to newly added system devices by retrieving
+ updated system metadata and bringing up/down the corresponding device.
+ This command is intended to be called via a systemd service and is
+ not considered user-accessible except for debugging purposes.
.. _cli_features:
diff --git a/doc/rtd/topics/cloud_tests.rst b/doc/rtd/topics/cloud_tests.rst
deleted file mode 100644
index 0fbb1301..00000000
--- a/doc/rtd/topics/cloud_tests.rst
+++ /dev/null
@@ -1,764 +0,0 @@
-************************
-Cloud tests (Deprecated)
-************************
-
-Cloud tests are longer be maintained. For writing integration
-tests, see the :ref:`integration_tests` page.
-
-Overview
-========
-
-This page describes the execution, development, and architecture of the
-cloud-init integration tests:
-
-* Execution explains the options available and running of tests
-* Development shows how to write test cases
-* Architecture explains the internal processes
-
-Execution
-=========
-
-Overview
---------
-
-In order to avoid the need for dependencies and ease the setup and
-configuration users can run the integration tests via tox:
-
-.. code-block:: shell-session
-
- $ git clone https://github.com/canonical/cloud-init
- $ cd cloud-init
- $ tox -e citest -- -h
-
-Everything after the double dash will be passed to the integration tests.
-Executing tests has several options:
-
-* ``run`` an alias to run both ``collect`` and ``verify``. The ``tree_run``
- command does the same thing, except uses a deb built from the current
- working tree.
-
-* ``collect`` deploys on the specified platform and distro, patches with the
- requested deb or rpm, and finally collects output of the arbitrary
- commands. Similarly, ```tree_collect`` will collect output using a deb
- built from the current working tree.
-
-* ``verify`` given a directory of test data, run the Python unit tests on
- it to generate results.
-
-* ``bddeb`` will build a deb of the current working tree.
-
-Run
----
-
-The first example will provide a complete end-to-end run of data
-collection and verification. There are additional examples below
-explaining how to run one or the other independently.
-
-.. code-block:: shell-session
-
- $ git clone https://github.com/canonical/cloud-init
- $ cd cloud-init
- $ tox -e citest -- run --verbose \
- --os-name stretch --os-name xenial \
- --deb cloud-init_0.7.8~my_patch_all.deb \
- --preserve-data --data-dir ~/collection \
- --preserve-instance
-
-The above command will do the following:
-
-* ``run`` both collect output and run tests the output
-
-* ``--verbose`` verbose output
-
-* ``--os-name stretch`` on the Debian Stretch release
-
-* ``--os-name xenial`` on the Ubuntu Xenial release
-
-* ``--deb cloud-init_0.7.8~patch_all.deb`` use this deb as the version of
- cloud-init to run with
-
-* ``--preserve-data`` always preserve collected data, do not remove data
- after successful test run
-
-* ``--preserve-instance`` do not destroy the instance after test to allow
- for debugging the stopped instance during integration test development. By
- default, test instances are destroyed after the test completes.
-
-* ``--data-dir ~/collection`` write collected data into `~/collection`,
- rather than using a temporary directory
-
-For a more detailed explanation of each option see below.
-
-.. note::
- By default, data collected by the run command will be written into a
- temporary directory and deleted after a successful. If you would
- like to preserve this data, please use the option ``--preserve-data``.
-
-Collect
--------
-
-If developing tests it may be necessary to see if cloud-config works as
-expected and the correct files are pulled down. In this case only a
-collect can be ran by running:
-
-.. code-block:: shell-session
-
- $ tox -e citest -- collect -n xenial --data-dir /tmp/collection
-
-The above command will run the collection tests on xenial and place
-all results into `/tmp/collection`.
-
-Verify
-------
-
-When developing tests it is much easier to simply rerun the verify scripts
-without the more lengthy collect process. This can be done by running:
-
-.. code-block:: shell-session
-
- $ tox -e citest -- verify --data-dir /tmp/collection
-
-The above command will run the verify scripts on the data discovered in
-`/tmp/collection`.
-
-TreeRun and TreeCollect
------------------------
-
-If working on a cloud-init feature or resolving a bug, it may be useful to
-run the current copy of cloud-init in the integration testing environment.
-The integration testing suite can automatically build a deb based on the
-current working tree of cloud-init and run the test suite using this deb.
-
-The ``tree_run`` and ``tree_collect`` commands take the same arguments as
-the ``run`` and ``collect`` commands. These commands will build a deb and
-write it into a temporary file, then start the test suite and pass that deb
-in. To build a deb only, and not run the test suite, the ``bddeb`` command
-can be used.
-
-Note that code in the cloud-init working tree that has not been committed
-when the cloud-init deb is built will still be included. To build a
-cloud-init deb from or use the ``tree_run`` command using a copy of
-cloud-init located in a different directory, use the option ``--cloud-init
-/path/to/cloud-init``.
-
-.. code-block:: shell-session
-
- $ tox -e citest -- tree_run --verbose \
- --os-name xenial --os-name stretch \
- --test modules/final_message --test modules/write_files \
- --result /tmp/result.yaml
-
-Bddeb
------
-
-The ``bddeb`` command can be used to generate a deb file. This is used by the
-tree_run and tree_collect commands to build a deb of the current working tree
-using the packaging template contained in the ``packages/debian/`` directory.
-It can also be used to generate a deb for use in other situations and avoid
-needing to have all the build and test dependencies installed locally.
-
-* ``--bddeb-args``: arguments to pass through to bddeb
-* ``--build-os``: distribution to use as build system (default is xenial)
-* ``--build-platform``: platform to use for build system (default is lxd)
-* ``--cloud-init``: path to base of cloud-init tree (default is '.')
-* ``--deb``: path to write output deb to (default is '.')
-* ``--packaging-branch``: import the ``debian/`` packaging directory
- from the specified branch (default: ``ubuntu/devel``) instead of using
- the packaging template.
-
-Setup Image
------------
-
-By default an image that is used will remain unmodified, but certain
-scenarios may require image modification. For example, many images may use
-a much older cloud-init. As a result tests looking at newer functionality
-will fail because a newer version of cloud-init may be required. The
-following options can be used for further customization:
-
-* ``--deb``: install the specified deb into the image
-* ``--rpm``: install the specified rpm into the image
-* ``--repo``: enable a repository and upgrade cloud-init afterwards
-* ``--ppa``: enable a ppa and upgrade cloud-init afterwards
-* ``--upgrade``: upgrade cloud-init from repos
-* ``--upgrade-full``: run a full system upgrade
-* ``--script``: execute a script in the image. This can perform any setup
- required that is not covered by the other options
-
-Test Case Development
-=====================
-
-Overview
---------
-
-As a test writer you need to develop a test configuration and a
-verification file:
-
- * The test configuration specifies a specific cloud-config to be used by
- cloud-init and a list of arbitrary commands to capture the output of
- (e.g my_test.yaml)
-
- * The verification file runs tests on the collected output to determine
- the result of the test (e.g. my_test.py)
-
-The names must match, however the extensions will of course be different,
-yaml vs py.
-
-Configuration
--------------
-
-The test configuration is a YAML file such as *ntp_server.yaml* below:
-
-.. code-block:: yaml
-
- #
- # Empty NTP config to setup using defaults
- #
- # NOTE: this should not require apt feature, use 'which' rather than 'dpkg -l'
- # NOTE: this should not require no_ntpdate feature, use 'which' to check for
- # installation rather than 'dpkg -l', as 'grep ntp' matches 'ntpdate'
- # NOTE: the verifier should check for any ntp server not 'ubuntu.pool.ntp.org'
- cloud_config: |
- #cloud-config
- ntp:
- servers:
- - pool.ntp.org
- required_features:
- - apt
- - no_ntpdate
- - ubuntu_ntp
- collect_scripts:
- ntp_installed_servers: |
- #!/bin/bash
- dpkg -l | grep ntp | wc -l
- ntp_conf_dist_servers: |
- #!/bin/bash
- ls /etc/ntp.conf.dist | wc -l
- ntp_conf_servers: |
- #!/bin/bash
- cat /etc/ntp.conf | grep '^server'
-
-There are several keys, 1 required and some optional, in the YAML file:
-
-1. The required key is ``cloud_config``. This should be a string of valid
- YAML that is exactly what would normally be placed in a cloud-config
- file, including the cloud-config header. This essentially sets up the
- scenario under test.
-
-2. One optional key is ``collect_scripts``. This key has one or more
- sub-keys containing strings of arbitrary commands to execute (e.g.
- ```cat /var/log/cloud-config-output.log```). In the example above the
- output of dpkg is captured, grep for ntp, and the number of lines
- reported. The name of the sub-key is important. The sub-key is used by
- the verification script to recall the output of the commands ran.
-
-3. The optional ``enabled`` key enables or disables the test case. By
- default the test case will be enabled.
-
-4. The optional ``required_features`` key may be used to specify a list
- of features flags that an image must have to be able to run the test
- case. For example, if a test case relies on an image supporting apt,
- then the config for the test case should include ``required_features:
- [ apt ]``.
-
-
-Default Collect Scripts
------------------------
-
-By default the following files will be collected for every test. There is
-no need to specify these items:
-
-* ``/var/log/cloud-init.log``
-* ``/var/log/cloud-init-output.log``
-* ``/run/cloud-init/.instance-id``
-* ``/run/cloud-init/result.json``
-* ``/run/cloud-init/status.json``
-* ```dpkg-query -W -f='${Version}' cloud-init```
-
-Verification
-------------
-
-The verification script is a Python file with unit tests like the one,
-`ntp_server.py`, below:
-
-.. code-block:: python
-
- # This file is part of cloud-init. See LICENSE file for license information.
-
- """cloud-init Integration Test Verify Script"""
- from tests.cloud_tests.testcases import base
-
-
- class TestNtp(base.CloudTestCase):
- """Test ntp module"""
-
- def test_ntp_installed(self):
- """Test ntp installed"""
- out = self.get_data_file('ntp_installed_empty')
- self.assertEqual(1, int(out))
-
- def test_ntp_dist_entries(self):
- """Test dist config file has one entry"""
- out = self.get_data_file('ntp_conf_dist_empty')
- self.assertEqual(1, int(out))
-
- def test_ntp_entires(self):
- """Test config entries"""
- out = self.get_data_file('ntp_conf_empty')
- self.assertIn('pool 0.ubuntu.pool.ntp.org iburst', out)
- self.assertIn('pool 1.ubuntu.pool.ntp.org iburst', out)
- self.assertIn('pool 2.ubuntu.pool.ntp.org iburst', out)
- self.assertIn('pool 3.ubuntu.pool.ntp.org iburst', out)
-
- # vi: ts=4 expandtab
-
-
-Here is a breakdown of the unit test file:
-
-* The import statement allows access to the output files.
-
-* The class can be named anything, but must import the
- ``base.CloudTestCase``, either directly or via another test class.
-
-* There can be 1 to N number of functions with any name, however only
- functions starting with ``test_*`` will be executed.
-
-* There can be 1 to N number of classes in a test module, however only
- classes inheriting from ``base.CloudTestCase`` will be loaded.
-
-* Output from the commands can be accessed via
- ``self.get_data_file('key')`` where key is the sub-key of
- ``collect_scripts`` above.
-
-* The cloud config that the test ran with can be accessed via
- ``self.cloud_config``, or any entry from the cloud config can be accessed
- via ``self.get_config_entry('key')``.
-
-* See the base ``CloudTestCase`` for additional helper functions.
-
-Layout
-------
-
-Integration tests are located under the `tests/cloud_tests` directory.
-Test configurations are placed under `configs` and the test verification
-scripts under `testcases`:
-
-.. code-block:: shell-session
-
- cloud-init$ tree -d tests/cloud_tests/
- tests/cloud_tests/
- ├── configs
- │   ├── bugs
- │   ├── examples
- │   ├── main
- │   └── modules
- └── testcases
- ├── bugs
- ├── examples
- ├── main
- └── modules
-
-The sub-folders of bugs, examples, main, and modules help organize the
-tests. View the README.md in each to understand in more detail each
-directory.
-
-Test Creation Helper
---------------------
-
-The integration testing suite has a built in helper to aid in test
-development. Help can be invoked via ``tox -e citest -- create --help``. It
-can create a template test case config file with user data passed in from
-the command line, as well as a template test case verifier module.
-
-The following would create a test case named ``example`` under the
-``modules`` category with the given description, and cloud config data read
-in from ``/tmp/user_data``.
-
-.. code-block:: shell-session
-
- $ tox -e citest -- create modules/example \
- -d "a simple example test case" -c "$(< /tmp/user_data)"
-
-
-Development Checklist
----------------------
-
-* Configuration File
- * Named 'your_test.yaml'
- * Contains at least a valid cloud-config
- * Optionally, commands to capture additional output
- * Valid YAML
- * Placed in the appropriate sub-folder in the configs directory
- * Any image features required for the test are specified
-* Verification File
- * Named 'your_test.py'
- * Valid unit tests validating output collected
- * Passes pylint & pep8 checks
- * Placed in the appropriate sub-folder in the test cases directory
-* Tested by running the test:
-
- .. code-block:: shell-session
-
- $ tox -e citest -- run -verbose \
- --os-name <release target> \
- --test modules/your_test.yaml \
- [--deb <build of cloud-init>]
-
-
-Platforms
-=========
-
-EC2
----
-To run on the EC2 platform it is required that the user has an AWS credentials
-configuration file specifying his or her access keys and a default region.
-These configuration files are the standard that the AWS cli and other AWS
-tools utilize for interacting directly with AWS itself and are normally
-generated when running ``aws configure``:
-
-.. code-block:: shell-session
-
- $ cat $HOME/.aws/credentials
- [default]
- aws_access_key_id = <KEY HERE>
- aws_secret_access_key = <KEY HERE>
-
-.. code-block:: shell-session
-
- $ cat $HOME/.aws/config
- [default]
- region = us-west-2
-
-
-Azure Cloud
------------
-
-To run on Azure Cloud platform users login with Service Principal and export
-credentials file. Region is defaulted and can be set in
-``tests/cloud_tests/platforms.yaml``. The Service Principal credentials are
-the standard authentication for Azure SDK to interact with Azure Services:
-
-Create Service Principal account or login
-
-.. code-block:: shell-session
-
- $ az ad sp create-for-rbac --name "APP_ID" --password "STRONG-SECRET-PASSWORD"
-
-.. code-block:: shell-session
-
- $ az login --service-principal --username "APP_ID" --password "STRONG-SECRET-PASSWORD"
-
-Export credentials
-
-.. code-block:: shell-session
-
- $ az ad sp create-for-rbac --sdk-auth > $HOME/.azure/credentials.json
-
-.. code-block:: json
-
- {
- "clientId": "<Service principal ID>",
- "clientSecret": "<Service principal secret/password>",
- "subscriptionId": "<Subscription associated with the service principal>",
- "tenantId": "<The service principal's tenant>",
- "activeDirectoryEndpointUrl": "https://login.microsoftonline.com",
- "resourceManagerEndpointUrl": "https://management.azure.com/",
- "activeDirectoryGraphResourceId": "https://graph.windows.net/",
- "sqlManagementEndpointUrl": "https://management.core.windows.net:8443/",
- "galleryEndpointUrl": "https://gallery.azure.com/",
- "managementEndpointUrl": "https://management.core.windows.net/"
- }
-
-Set region in platforms.yaml
-
-.. code-block:: yaml
-
- azurecloud:
- enabled: true
- region: West US 2
- vm_size: Standard_DS1_v2
- storage_sku: standard_lrs
- tag: ci
-
-
-Architecture
-============
-
-The following section outlines the high-level architecture of the
-integration process.
-
-Overview
---------
-The process flow during a complete end-to-end LXD-backed test.
-
-1. Configuration
- * The back end and specific distro releases are verified as supported
- * The test or tests that need to be run are determined either by
- directory or by individual yaml
-
-2. Image Creation
- * Acquire the request LXD image
- * Install the specified cloud-init package
- * Clean the image so that it does not appear to have been booted
- * A snapshot of the image is created and reused by all tests
-
-3. Configuration
- * For each test, the cloud-config is injected into a copy of the
- snapshot and booted
- * The framework waits for ``/var/lib/cloud/instance/boot-finished``
- (up to 120 seconds)
- * All default commands are ran and output collected
- * Any commands the user specified are executed and output collected
-
-4. Verification
- * The default commands are checked for any failures, errors, and
- warnings to validate basic functionality of cloud-init completed
- successfully
- * The user generated unit tests are then ran validating against the
- collected output
-
-5. Results
- * If any failures were detected the test suite returns a failure
- * Results can be dumped in yaml format to a specified file using the
- ``-r <result_file_name>.yaml`` option
-
-Configuring the Test Suite
---------------------------
-
-Most of the behavior of the test suite is configurable through several yaml
-files. These control the behavior of the test suite's platforms, images, and
-tests. The main config files for platforms, images and test cases are
-``platforms.yaml``, ``releases.yaml`` and ``testcases.yaml``.
-
-Config handling
-^^^^^^^^^^^^^^^
-
-All configurable parts of the test suite use a defaults + overrides system
-for managing config entries. All base config items are dictionaries.
-
-Merging is done on a key-by-key basis, with all keys in the default and
-override represented in the final result. If a key exists both in
-the defaults and the overrides, then the behavior depends on the type of data
-the key refers to. If it is atomic data or a list, then the overrides will
-replace the default. If the data is a dictionary then the value will be the
-result of merging that dictionary from the default config and that
-dictionary from the overrides.
-
-Merging is done using the function
-``tests.cloud_tests.config.merge_config``, which can be examined for more
-detail on config merging behavior.
-
-The following demonstrates merge behavior:
-
-.. code-block:: yaml
-
- defaults:
- list_item:
- - list_entry_1
- - list_entry_2
- int_item_1: 123
- int_item_2: 234
- dict_item:
- subkey_1: 1
- subkey_2: 2
- subkey_dict:
- subsubkey_1: a
- subsubkey_2: b
-
- overrides:
- list_item:
- - overridden_list_entry
- int_item_1: 0
- dict_item:
- subkey_2: false
- subkey_dict:
- subsubkey_2: 'new value'
-
- result:
- list_item:
- - overridden_list_entry
- int_item_1: 0
- int_item_2: 234
- dict_item:
- subkey_1: 1
- subkey_2: false
- subkey_dict:
- subsubkey_1: a
- subsubkey_2: 'new value'
-
-
-Image Config
-------------
-
-Image configuration is handled in ``releases.yaml``. The image configuration
-controls how platforms locate and acquire images, how the platforms should
-interact with the images, how platforms should detect when an image has
-fully booted, any options that are required to set the image up, and
-features that the image supports.
-
-Since settings for locating an image and interacting with it differ from
-platform to platform, there are 4 levels of settings available for images on
-top of the default image settings. The structure of the image config file
-is:
-
-.. code-block:: yaml
-
- default_release_config:
- default:
- ...
- <platform>:
- ...
- <platform>:
- ...
-
- releases:
- <release name>:
- <default>:
- ...
- <platform>:
- ...
- <platform>:
- ...
-
-
-The base config is created from the overall defaults and the overrides for
-the platform. The overrides are created from the default config for the
-image and the platform specific overrides for the image.
-
-System Boot
-^^^^^^^^^^^
-
-The test suite must be able to test if a system has fully booted and if
-cloud-init has finished running, so that running collect scripts does not
-race against the target image booting. This is done using the
-``system_ready_script`` and ``cloud_init_ready_script`` image config keys.
-
-Each of these keys accepts a small bash test statement as a string that must
-return 0 or 1. Since this test statement will be added into a larger bash
-statement it must be a single statement using the ``[`` test syntax.
-
-The default image config provides a system ready script that works for any
-systemd based image. If the image is not systemd based, then a different
-test statement must be provided. The default config also provides a test
-for whether or not cloud-init has finished which checks for the file
-``/run/cloud-init/result.json``. This should be sufficient for most systems
-as writing this file is one of the last things cloud-init does.
-
-The setting ``boot_timeout`` controls how long, in seconds, the platform
-should wait for an image to boot. If the system ready script has not
-indicated that the system is fully booted within this time an error will be
-raised.
-
-Feature Flags
-^^^^^^^^^^^^^
-
-Not all test cases can work on all images due to features the test case
-requires not being present on that image. If a test case requires features
-in an image that are not likely to be present across all distros and
-platforms that the test suite supports, then the test can be skipped
-everywhere it is not supported.
-
-Feature flags, which are names for features supported on some images, but
-not all that may be required by test cases. Configuration for feature flags
-is provided in ``releases.yaml`` under the ``features`` top level key. The
-features config includes a list of all currently defined feature flags,
-their meanings, and a list of feature groups.
-
-Feature groups are groups of features that many images have in common. For
-example, the ``Ubuntu_specific`` feature group includes features that
-should be present across most Ubuntu releases, but may or may not be for
-other distros. Feature groups are specified for an image as a list under
-the key ``feature_groups``.
-
-An image's feature flags are derived from the features groups that that
-image has and any feature overrides provided. Feature overrides can be
-specified under the ``features`` key which accepts a dictionary of
-``{<feature_name>: true/false}`` mappings. If a feature is omitted from an
-image's feature flags or set to false in the overrides then the test suite
-will skip any tests that require that feature when using that image.
-
-Feature flags may be overridden at run time using the ``--feature-override``
-command line argument. It accepts a feature flag and value to set in the
-format ``<feature name>=true/false``. Multiple ``--feature-override``
-flags can be used, and will all be applied to all feature flags for images
-used during a test.
-
-Setup Overrides
-^^^^^^^^^^^^^^^
-
-If an image requires some of the options for image setup to be used, then it
-may specify overrides for the command line arguments passed into setup
-image. These may be specified as a dictionary under the ``setup_overrides``
-key. When an image is set up, the arguments that control how it is set up
-will be the arguments from the command line, with any entries in
-``setup_overrides`` used to override these arguments.
-
-For example, images that do not come with cloud-init already installed
-should have ``setup_overrides: {upgrade: true}`` specified so that in the
-event that no additional setup options are given, cloud-init will be
-installed from the image's repos before running tests. Note that if other
-options such as ``--deb`` are passed in on the command line, these will
-still work as expected, since apt's policy for cloud-init would prefer the
-locally installed deb over an older version from the repos.
-
-Platform Specific Options
-^^^^^^^^^^^^^^^^^^^^^^^^^
-
-There are many platform specific options in image configuration that allow
-platforms to locate images and that control additional setup that the
-platform may have to do to make the image usable. For information on how
-these work, please consult the documentation for that platform in the
-integration testing suite and the ``releases.yaml`` file for examples.
-
-Error Handling
---------------
-
-The test suite makes an attempt to run as many tests as possible even in the
-event of some failing so that automated runs collect as much data as
-possible. In the event that something goes wrong while setting up for or
-running a test, the test suite will attempt to continue running any tests
-which have not been affected by the error.
-
-For example, if the test suite was told to run tests on one platform for two
-releases and an error occurred setting up the first image, all tests for
-that image would be skipped, and the test suite would continue to set up
-the second image and run tests on it. Or, if the system does not start
-properly for one test case out of many to run on that image, that test case
-will be skipped and the next one will be run.
-
-Note that if any errors occur, the test suite will record the failure and
-where it occurred in the result data and write it out to the specified
-result file.
-
-Results
--------
-
-The test suite generates result data that includes how long each stage of
-the test suite took and which parts were and were not successful. This data
-is dumped to the log after the collect and verify stages, and may also be
-written out in yaml format to a file. If part of the setup failed, the
-traceback for the failure and the error message will be included in the
-result file. If a test verifier finds a problem with the collected data
-from a test run, the class, test function and test will be recorded in the
-result data.
-
-Exit Codes
-^^^^^^^^^^
-
-The test suite counts how many errors occur throughout a run. The exit code
-after a run is the number of errors that occurred. If the exit code is
-non-zero then something is wrong either with the test suite, the
-configuration for an image, a test case, or cloud-init itself.
-
-Note that the exit code does not always directly correspond to the number
-of failed test cases, since in some cases, a single error during image setup
-can mean that several test cases are not run. If run is used, then the exit
-code will be the sum of the number of errors in the collect and verify
-stages.
-
-Data Dir
-^^^^^^^^
-
-When using run, the collected data is written into a temporary directory. In
-the event that all tests pass, this directory is deleted, but if a test
-fails or an error occurs, this data will be left in place, and a message
-will be written to the log giving the location of the data.
diff --git a/doc/rtd/topics/code_review.rst b/doc/rtd/topics/code_review.rst
index 68c10405..33aad789 100644
--- a/doc/rtd/topics/code_review.rst
+++ b/doc/rtd/topics/code_review.rst
@@ -22,7 +22,7 @@ questions about the code review process, or at any point during the
code review process, these are the available avenues:
* if you have an open Pull Request, comment on that pull request
-* join the ``#cloud-init`` channel on the Freenode IRC network and ask
+* join the ``#cloud-init`` channel on the Libera IRC network and ask
away
* send an email to the cloud-init mailing list,
cloud-init@lists.launchpad.net
@@ -58,12 +58,12 @@ Reviewer
Committer
A cloud-init core developer (i.e. a person who has permission to
- merge PRs into master).
+ merge PRs into **main**).
Prerequisites For Landing Pull Requests
=======================================
-Before a PR can be landed into master, the following conditions *must*
+Before a PR can be landed into **main**, the following conditions *must*
be met:
* the CLA has been signed by the **Proposer** (or is covered by an
@@ -148,7 +148,7 @@ temporarily closed. (The first two are covered in this section; see
(In the below, when the verbs "merge" or "squash merge" are used, they
should be understood to mean "squash merged using the GitHub UI", which
-is the only way that changes can land in cloud-init's master branch.)
+is the only way that changes can land in cloud-init's **main** branch.)
These are the steps that comprise the review phase:
diff --git a/doc/rtd/topics/contributing.rst b/doc/rtd/topics/contributing.rst
new file mode 100644
index 00000000..b9aee867
--- /dev/null
+++ b/doc/rtd/topics/contributing.rst
@@ -0,0 +1,2 @@
+.. include:: ../../../CONTRIBUTING.rst
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst
index 3d026143..f73a5b2a 100644
--- a/doc/rtd/topics/datasources.rst
+++ b/doc/rtd/topics/datasources.rst
@@ -4,9 +4,9 @@ Datasources
***********
Datasources are sources of configuration data for cloud-init that typically
-come from the user (e.g. userdata) or come from the cloud that created the
-configuration drive (e.g. metadata). Typical userdata would include files,
-yaml, and shell scripts while typical metadata would include server name,
+come from the user (i.e. userdata) or come from the cloud that created the
+configuration drive (i.e. metadata). Typical userdata would include files,
+YAML, and shell scripts while typical metadata would include server name,
instance id, display name and other cloud specific details.
Since there are multiple ways to provide this data (each cloud solution seems
@@ -29,16 +29,17 @@ The following is a list of documents for each supported datasource:
datasources/aliyun.rst
datasources/altcloud.rst
+ datasources/ec2.rst
datasources/azure.rst
datasources/cloudsigma.rst
datasources/cloudstack.rst
datasources/configdrive.rst
datasources/digitalocean.rst
datasources/e24cloud.rst
- datasources/ec2.rst
datasources/exoscale.rst
datasources/fallback.rst
datasources/gce.rst
+ datasources/lxd.rst
datasources/maas.rst
datasources/nocloud.rst
datasources/opennebula.rst
@@ -47,9 +48,11 @@ The following is a list of documents for each supported datasource:
datasources/ovf.rst
datasources/rbxcloud.rst
datasources/smartos.rst
+ datasources/upcloud.rst
+ datasources/vmware.rst
+ datasources/vultr.rst
datasources/zstack.rst
-
Creation
========
@@ -96,7 +99,7 @@ need to take care of the following items:
your datasource module name to the end of the ``datasource_list``
entry in ``cloudinit/settings.py``.
-* **Add your your cloud platform to apport collection prompts:** Update the
+* **Add your cloud platform to apport collection prompts:** Update the
list of cloud platforms in ``cloudinit/apport.py``. This list will be
provided to the user who invokes ``ubuntu-bug cloud-init``.
diff --git a/doc/rtd/topics/datasources/aliyun.rst b/doc/rtd/topics/datasources/aliyun.rst
index 3f4f40ca..0bb9c19e 100644
--- a/doc/rtd/topics/datasources/aliyun.rst
+++ b/doc/rtd/topics/datasources/aliyun.rst
@@ -12,6 +12,21 @@ The Alibaba Cloud metadata service is available at the well known url
Alibaba Cloud ECS on `metadata
<https://www.alibabacloud.com/help/zh/faq-detail/49122.htm>`__.
+Configuration
+-------------
+The following configuration can be set for the datasource in system
+configuration (in ``/etc/cloud/cloud.cfg`` or ``/etc/cloud/cloud.cfg.d/``).
+
+An example configuration with the default values is provided below:
+
+.. sourcecode:: yaml
+
+ datasource:
+ AliYun:
+ metadata_urls: ["http://100.100.100.200"]
+ timeout: 50
+ max_wait: 120
+
Versions
^^^^^^^^
Like the EC2 metadata service, Alibaba Cloud's metadata service provides
@@ -71,4 +86,4 @@ If no user-data is provided, this will return a 404.
#!/bin/sh
echo "Hello World."
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/altcloud.rst b/doc/rtd/topics/datasources/altcloud.rst
index 9d7e3de1..acd5e2a3 100644
--- a/doc/rtd/topics/datasources/altcloud.rst
+++ b/doc/rtd/topics/datasources/altcloud.rst
@@ -91,4 +91,4 @@ For more information on Delta Cloud see: http://deltacloud.apache.org
.. _RHEVm: https://www.redhat.com/virtualization/rhev/desktop/rhevm/
.. _vSphere: https://www.vmware.com/products/datacenter-virtualization/vsphere/overview.html
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst
index e04c3a33..1bd03970 100644
--- a/doc/rtd/topics/datasources/azure.rst
+++ b/doc/rtd/topics/datasources/azure.rst
@@ -5,28 +5,6 @@ Azure
This datasource finds metadata and user-data from the Azure cloud platform.
-walinuxagent
-------------
-walinuxagent has several functions within images. For cloud-init
-specifically, the relevant functionality it performs is to register the
-instance with the Azure cloud platform at boot so networking will be
-permitted. For more information about the other functionality of
-walinuxagent, see `Azure's documentation
-<https://github.com/Azure/WALinuxAgent#introduction>`_ for more details.
-(Note, however, that only one of walinuxagent's provisioning and cloud-init
-should be used to perform instance customisation.)
-
-If you are configuring walinuxagent yourself, you will want to ensure that you
-have `Provisioning.UseCloudInit
-<https://github.com/Azure/WALinuxAgent#provisioningusecloudinit>`_ set to
-``y``.
-
-
-Builtin Agent
--------------
-An alternative to using walinuxagent to register to the Azure cloud platform
-is to use the ``__builtin__`` agent command. This section contains more
-background on what that code path does, and how to enable it.
The Azure cloud platform provides initial data to an instance via an attached
CD formatted in UDF. That CD contains a 'ovf-env.xml' file that provides some
@@ -41,16 +19,6 @@ by calling a script in /etc/dhcp/dhclient-exit-hooks or a file in
'dhclient_hook' of cloud-init itself. This sub-command will write the client
information in json format to /run/cloud-init/dhclient.hook/<interface>.json.
-In order for cloud-init to leverage this method to find the endpoint, the
-cloud.cfg file must contain:
-
-.. sourcecode:: yaml
-
- datasource:
- Azure:
- set_hostname: False
- agent_command: __builtin__
-
If those files are not available, the fallback is to check the leases file
for the endpoint server (again option 245).
@@ -83,9 +51,6 @@ configuration (in ``/etc/cloud/cloud.cfg`` or ``/etc/cloud/cloud.cfg.d/``).
The settings that may be configured are:
- * **agent_command**: Either __builtin__ (default) or a command to run to getcw
- metadata. If __builtin__, get metadata from walinuxagent. Otherwise run the
- provided command to obtain metadata.
* **apply_network_config**: Boolean set to True to use network configuration
described by Azure's IMDS endpoint instead of fallback network config of
dhcp on eth0. Default is True. For Ubuntu 16.04 or earlier, default is
@@ -95,20 +60,6 @@ The settings that may be configured are:
custom DHCP option 245 from Azure fabric.
* **disk_aliases**: A dictionary defining which device paths should be
interpreted as ephemeral images. See cc_disk_setup module for more info.
- * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to
- metadata changes. The '``hostname_bounce: command``' entry can be either
- the literal string 'builtin' or a command to execute. The command will be
- invoked after the hostname is set, and will have the 'interface' in its
- environment. If ``set_hostname`` is not true, then ``hostname_bounce``
- will be ignored. An example might be:
-
- ``command: ["sh", "-c", "killall dhclient; dhclient $interface"]``
-
- * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to
- metadata changes. Azure will throttle ifup/down in some cases after metadata
- has been updated to inform dhcp server about updated hostnames.
- * **set_hostname**: Boolean set to True when we want Azure to set the hostname
- based on metadata.
Configuration for the datasource can also be read from a
``dscfg`` entry in the ``LinuxProvisioningConfigurationSet``. Content in
@@ -121,18 +72,11 @@ An example configuration with the default values is provided below:
datasource:
Azure:
- agent_command: __builtin__
apply_network_config: true
data_dir: /var/lib/waagent
dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases
disk_aliases:
ephemeral0: /dev/disk/cloud/azure_resource
- hostname_bounce:
- interface: eth0
- command: builtin
- policy: true
- hostname_command: hostname
- set_hostname: true
Userdata
@@ -144,9 +88,7 @@ child of the ``LinuxProvisioningConfigurationSet`` (a sibling to ``UserName``)
If both ``UserData`` and ``CustomData`` are provided behavior is undefined on
which will be selected.
-In the example below, user-data provided is 'this is my userdata', and the
-datasource config provided is ``{"agent_command": ["start", "walinuxagent"]}``.
-That agent command will take affect as if it were specified in system config.
+In the example below, user-data provided is 'this is my userdata'
Example:
@@ -184,21 +126,17 @@ The hostname is provided to the instance in the ovf-env.xml file as
Whatever value the instance provides in its dhcp request will resolve in the
domain returned in the 'search' request.
-The interesting issue is that a generic image will already have a hostname
-configured. The ubuntu cloud images have 'ubuntu' as the hostname of the
-system, and the initial dhcp request on eth0 is not guaranteed to occur after
-the datasource code has been run. So, on first boot, that initial value will
-be sent in the dhcp request and *that* value will resolve.
-
-In order to make the ``HostName`` provided in the ovf-env.xml resolve, a
-dhcp request must be made with the new value. Walinuxagent (in its current
-version) handles this by polling the state of hostname and bouncing ('``ifdown
-eth0; ifup eth0``' the network interface if it sees that a change has been
-made.
+A generic image will already have a hostname configured. The ubuntu
+cloud images have 'ubuntu' as the hostname of the system, and the
+initial dhcp request on eth0 is not guaranteed to occur after the
+datasource code has been run. So, on first boot, that initial value
+will be sent in the dhcp request and *that* value will resolve.
-cloud-init handles this by setting the hostname in the DataSource's 'get_data'
-method via '``hostname $HostName``', and then bouncing the interface. This
+In order to make the ``HostName`` provided in the ovf-env.xml resolve,
+a dhcp request must be made with the new value. cloud-init handles
+this by setting the hostname in the DataSource's 'get_data' method via
+'``hostname $HostName``', and then bouncing the interface. This
behavior can be configured or disabled in the datasource config. See
'Configuration' above.
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/cloudsigma.rst b/doc/rtd/topics/datasources/cloudsigma.rst
index 86b834c8..dee665a4 100644
--- a/doc/rtd/topics/datasources/cloudsigma.rst
+++ b/doc/rtd/topics/datasources/cloudsigma.rst
@@ -39,4 +39,4 @@ value. If this field does not exist the default value is "net".
.. _server context: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
.. _meta field: http://cloudsigma-docs.readthedocs.org/en/latest/meta.html
.. _config formats: http://cloudinit.readthedocs.org/en/latest/topics/format.html
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/cloudstack.rst b/doc/rtd/topics/datasources/cloudstack.rst
index a24de34f..e889ab6e 100644
--- a/doc/rtd/topics/datasources/cloudstack.rst
+++ b/doc/rtd/topics/datasources/cloudstack.rst
@@ -46,11 +46,9 @@ An example configuration with the default values is provided below:
CloudStack:
max_wait: 120
timeout: 50
- datasource_list:
- - CloudStack
.. _Apache CloudStack: http://cloudstack.apache.org/
.. _CloudStack Administrator Guide: http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/latest/virtual_machines.html#user-data-and-meta-data
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/configdrive.rst b/doc/rtd/topics/datasources/configdrive.rst
index 4fcbccee..ecc37df6 100644
--- a/doc/rtd/topics/datasources/configdrive.rst
+++ b/doc/rtd/topics/datasources/configdrive.rst
@@ -128,4 +128,4 @@ what all can be present here.
.. _iso9660: https://en.wikipedia.org/wiki/ISO_9660
.. _vfat: https://en.wikipedia.org/wiki/File_Allocation_Table
.. _the config drive extension: https://docs.openstack.org/nova/latest/admin/config-drive.html
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/digitalocean.rst b/doc/rtd/topics/datasources/digitalocean.rst
index 88f1e5f5..801841c1 100644
--- a/doc/rtd/topics/datasources/digitalocean.rst
+++ b/doc/rtd/topics/datasources/digitalocean.rst
@@ -1,7 +1,7 @@
.. _datasource_digital_ocean:
-Digital Ocean
-=============
+DigitalOcean
+============
The `DigitalOcean`_ datasource consumes the content served from DigitalOcean's
`metadata service`_. This metadata service serves information about the
@@ -29,4 +29,4 @@ DigitalOcean's datasource can be configured as follows:
.. _metadata service: https://developers.digitalocean.com/metadata/
.. _Full documentation: https://developers.digitalocean.com/metadata/
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/e24cloud.rst b/doc/rtd/topics/datasources/e24cloud.rst
index de9a4127..2af6634b 100644
--- a/doc/rtd/topics/datasources/e24cloud.rst
+++ b/doc/rtd/topics/datasources/e24cloud.rst
@@ -2,8 +2,8 @@
E24Cloud
========
-`E24Cloud <https://www.e24cloud.com/en/>` platform provides an AWS Ec2 metadata
+`E24Cloud <https://www.e24cloud.com/en/>`_ platform provides an AWS Ec2 metadata
service clone. It identifies itself to guests using the dmi
system-manufacturer (/sys/class/dmi/id/sys_vendor).
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/ec2.rst b/doc/rtd/topics/datasources/ec2.rst
index 274ca1e4..94e4158d 100644
--- a/doc/rtd/topics/datasources/ec2.rst
+++ b/doc/rtd/topics/datasources/ec2.rst
@@ -121,4 +121,4 @@ Notes
For example: the primary NIC will have a DHCP route-metric of 100,
the next NIC will be 200.
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/fallback.rst b/doc/rtd/topics/datasources/fallback.rst
index 2b133fcd..03658f54 100644
--- a/doc/rtd/topics/datasources/fallback.rst
+++ b/doc/rtd/topics/datasources/fallback.rst
@@ -15,4 +15,4 @@ will be so that the user is not left with an inaccessible instance.
**Note:** the instance id that this datasource provides is
``iid-datasource-none``.
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/gce.rst b/doc/rtd/topics/datasources/gce.rst
index 8406695c..70aefea2 100644
--- a/doc/rtd/topics/datasources/gce.rst
+++ b/doc/rtd/topics/datasources/gce.rst
@@ -15,6 +15,28 @@ to provide ``public-keys``.
``user-data`` and ``user-data-encoding`` can be provided to cloud-init by
setting those custom metadata keys for an *instance*.
+Configuration
+-------------
+The following configuration can be set for the datasource in system
+configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`).
+
+The settings that may be configured are:
+
+ * **retries**: The number of retries that should be done for an http request.
+ This value is used only after metadata_url is selected. (default: 5)
+ * **sec_between_retries**: The amount of wait time between the retries when
+ crawling the metadata service. (default: 1)
+
+
+An example configuration with the default values is provided below:
+
+.. sourcecode:: yaml
+
+ datasource:
+ GCE:
+ retries: 5
+ sec_between_retries: 1
+
.. _GCE metadata docs: https://cloud.google.com/compute/docs/storing-retrieving-metadata#querying
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/lxd.rst b/doc/rtd/topics/datasources/lxd.rst
new file mode 100644
index 00000000..fa2dcf5d
--- /dev/null
+++ b/doc/rtd/topics/datasources/lxd.rst
@@ -0,0 +1,65 @@
+.. _datasource_lxd:
+
+LXD
+===
+
+The data source ``LXD`` allows the user to provide custom user-data,
+vendor-data, meta-data and network-config to the instance without running
+a network service (or even without having a network at all). This datasource
+performs HTTP GETs against the `LXD socket device`_ which is provided to each
+running LXD container and VM as ``/dev/lxd/sock`` and represents all
+instance-metadata as versioned HTTP routes such as:
+
+ - 1.0/meta-data
+ - 1.0/config/user.meta-data
+ - 1.0/config/user.vendor-data
+ - 1.0/config/user.user-data
+ - 1.0/config/user.<any-custom-key>
+
+The LXD socket device ``/dev/lxd/sock`` is only present on containers and VMs
+when the instance configuration has ``security.devlxd=true`` (default).
+Disabling ``security.devlxd`` configuration setting at initial launch will
+ensure that cloud-init uses the :ref:`datasource_nocloud` datasource.
+Disabling ``security.devlxd`` ove the life of the container will result in
+warnings from cloud-init and cloud-init will keep the originally detected LXD
+datasource.
+
+The LXD datasource provides cloud-init the opportunity to react to meta-data,
+vendor-data, user-data and network-config changes and render the updated
+configuration across a system reboot.
+
+One can manipulate what meta-data, vendor-data or user-data is provided to
+the launched container using the LXD profiles or
+``lxc launch ... -c <key>="<value>"`` at initial container launch using one of
+the following keys:
+
+ - user.meta-data: YAML metadata which will be appended to base meta-data
+ - user.vendor-data: YAML which overrides any meta-data values
+ - user.network-config: YAML representing either :ref:`network_config_v1` or
+ :ref:`network_config_v2` format
+ - user.user-data: YAML which takes preference and overrides both meta-data
+ and vendor-data values
+ - user.any-key: Custom user configuration key and value pairs can be passed to
+ cloud-init. Those keys/values will be present in instance-data which can be
+ used by both `#template: jinja` #cloud-config templates and
+ the `cloud-init query` command.
+
+
+By default, network configuration from this datasource will be:
+
+.. code:: yaml
+
+ version: 1
+ config:
+ - type: physical
+ name: eth0
+ subnets:
+ - type: dhcp
+ control: auto
+
+This datasource is intended to replace :ref:`datasource_nocloud`
+datasource for LXD instances with a more direct support for LXD APIs instead
+of static NoCloud seed files.
+
+.. _LXD socket device: https://linuxcontainers.org/lxd/docs/master/dev-lxd
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst
index 0ca79102..d31f5d0f 100644
--- a/doc/rtd/topics/datasources/nocloud.rst
+++ b/doc/rtd/topics/datasources/nocloud.rst
@@ -50,7 +50,9 @@ These user-data and meta-data files are expected to be in the following format.
/user-data
/meta-data
-Basically, user-data is simply user-data and meta-data is a yaml formatted file
+Both files are required to be present for it to be considered a valid seed ISO.
+
+Basically, user-data is simply user-data and meta-data is a YAML formatted file
representing what you'd find in the EC2 metadata service.
You may also optionally provide a vendor-data file in the following format.
@@ -113,11 +115,11 @@ Example metadata:
Network configuration can also be provided to cloud-init in either
:ref:`network_config_v1` or :ref:`network_config_v2` by providing that
-yaml formatted data in a file named ``network-config``. If found,
+YAML formatted data in a file named ``network-config``. If found,
this file will override a ``network-interfaces`` file.
See an example below. Note specifically that this file does not
-have a top level ``network`` key as it it is already assumed to
+have a top level ``network`` key as it is already assumed to
be network configuration based on the filename.
.. code:: yaml
@@ -149,4 +151,4 @@ be network configuration based on the filename.
.. _iso9660: https://en.wikipedia.org/wiki/ISO_9660
.. _vfat: https://en.wikipedia.org/wiki/File_Allocation_Table
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/opennebula.rst b/doc/rtd/topics/datasources/opennebula.rst
index 350a3e93..65570a53 100644
--- a/doc/rtd/topics/datasources/opennebula.rst
+++ b/doc/rtd/topics/datasources/opennebula.rst
@@ -69,13 +69,21 @@ Datasource mode configuration override. Values: local, net, disabled.
ETH<x>_NETWORK
ETH<x>_MASK
ETH<x>_GATEWAY
+ ETH<x>_GATEWAY6
ETH<x>_DOMAIN
ETH<x>_DNS
+ ETH<x>_SEARCH_DOMAIN
+ ETH<x>_MTU
+ ETH<x>_IP6
+ ETH<x>_IP6_ULA
+ ETH<x>_IP6_PREFIX_LENGTH
+ ETH<x>_IP6_GATEWAY
Static `network configuration`_.
::
+ SET_HOSTNAME
HOSTNAME
Instance hostname.
@@ -145,4 +153,4 @@ Example VM's context section
.. _contextualizing VMs: http://opennebula.org/documentation:documentation:cong
.. _network configuration: http://opennebula.org/documentation:documentation:cong#network_configuration
.. _iso9660: https://en.wikipedia.org/wiki/ISO_9660
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst
index b23b4b7c..f523c142 100644
--- a/doc/rtd/topics/datasources/openstack.rst
+++ b/doc/rtd/topics/datasources/openstack.rst
@@ -12,10 +12,10 @@ Discovery
To determine whether a platform looks like it may be OpenStack, cloud-init
checks the following environment attributes as a potential OpenStack platform:
- * Maybe OpenStack if
+ * Maybe OpenStack if:
* **non-x86 cpu architecture**: because DMI data is buggy on some arches
- * Is OpenStack **if x86 architecture and ANY** of the following
+ * Is OpenStack **if x86 architecture and ANY** of the following:
* **/proc/1/environ**: Nova-lxd contains *product_name=OpenStack Nova*
* **DMI product_name**: Either *Openstack Nova* or *OpenStack Compute*
@@ -32,7 +32,7 @@ The settings that may be configured are:
* **metadata_urls**: This list of urls will be searched for an OpenStack
metadata service. The first entry that successfully returns a 200 response
- for <url>/openstack will be selected. (default: ['http://169.254.169.254']).
+ for <url>/openstack will be selected. (default: ['http://169.254.169.254'])
* **max_wait**: the maximum amount of clock time in seconds that should be
spent searching metadata_urls. A value less than zero will result in only
one request being made, to the first in the list. (default: -1)
@@ -82,4 +82,12 @@ For more general information about how cloud-init handles vendor data,
including how it can be disabled by users on instances, see
:doc:`/topics/vendordata`.
-.. vi: textwidth=78
+OpenStack can also be configured to provide 'dynamic vendordata'
+which is provided by the DynamicJSON provider and appears under a
+different metadata path, /vendor_data2.json.
+
+Cloud-init will look for a ``cloud-init`` at the vendor_data2 path; if found,
+settings are applied after (and, hence, overriding) the settings from static
+vendor data. Both sets of vendor data can be overridden by user data.
+
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/oracle.rst b/doc/rtd/topics/datasources/oracle.rst
index 98c4657c..7e480021 100644
--- a/doc/rtd/topics/datasources/oracle.rst
+++ b/doc/rtd/topics/datasources/oracle.rst
@@ -46,4 +46,4 @@ An example configuration with the default values is provided below:
configure_secondary_nics: false
.. _Oracle Compute Infrastructure: https://cloud.oracle.com/
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/ovf.rst b/doc/rtd/topics/datasources/ovf.rst
index 6256e624..d6eb75da 100644
--- a/doc/rtd/topics/datasources/ovf.rst
+++ b/doc/rtd/topics/datasources/ovf.rst
@@ -13,6 +13,19 @@ source code tree in doc/sources/ovf
Configuration
-------------
+The following configuration can be set for the datasource in system
+configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`).
+
+The settings that may be configured are:
+
+ * disable_vmware_customization: disable or enable the vmware customization
+ based on vmware customization files. (default: True)
+ * allow_raw_data: enable or disable the vmware customization based on raw
+ cloud-init data including metadata and userdata. (default: True)
+ * vmware_cust_file_max_wait: the maximum amount of clock time in seconds that
+ should be spent waiting for vmware customization files. (default: 15)
+
+
On VMware platforms, VMTools use is required for OVF datasource configuration
settings as well as vCloud and vSphere admin configuration. User could change
the VMTools configuration options with command::
@@ -26,8 +39,8 @@ The following VMTools configuration options affect cloud-init's behavior on a bo
change this default behavior (for example: enabled by default) via
customization specification settings.
-VMWare admin can refer to (https://github.com/canonical/cloud-init/blob/master/cloudinit/sources/helpers/vmware/imc/config.py) and set the customization specification settings.
+VMWare admin can refer to (https://github.com/canonical/cloud-init/blob/main/cloudinit/sources/helpers/vmware/imc/config.py) and set the customization specification settings.
-For more information, see [VMware vSphere Product Documentation](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vm_admin.doc/GUID-9A5093A5-C54F-4502-941B-3F9C0F573A39.html) and specific VMTools parameters consumed.
+For more information, see `VMware vSphere Product Documentation <https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vm_admin.doc/GUID-9A5093A5-C54F-4502-941B-3F9C0F573A39.html>`_ and specific VMTools parameters consumed.
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/rbxcloud.rst b/doc/rtd/topics/datasources/rbxcloud.rst
index 52ec02ff..c4b3f2d0 100644
--- a/doc/rtd/topics/datasources/rbxcloud.rst
+++ b/doc/rtd/topics/datasources/rbxcloud.rst
@@ -22,4 +22,4 @@ is restarted, if the partition exists. For more information see
.. _HyperOne Virtual Machine docs: http://www.hyperone.com/
.. _FAT: https://en.wikipedia.org/wiki/File_Allocation_Table
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/smartos.rst b/doc/rtd/topics/datasources/smartos.rst
index be11dfbb..55604ffb 100644
--- a/doc/rtd/topics/datasources/smartos.rst
+++ b/doc/rtd/topics/datasources/smartos.rst
@@ -13,7 +13,7 @@ SmartOS Platform
The SmartOS virtualization platform uses meta-data to the instance via the
second serial console. On Linux, this is /dev/ttyS1. The data is a provided
via a simple protocol: something queries for the data, the console responds
-responds with the status and if "SUCCESS" returns until a single ".\n".
+with the status and if "SUCCESS" returns until a single ".\n".
New versions of the SmartOS tooling will include support for base64 encoded
data.
@@ -165,4 +165,4 @@ You can control the disk_setup then in 2 ways:
See doc/examples/cloud-config-disk-setup.txt for information on disk_setup.
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/upcloud.rst b/doc/rtd/topics/datasources/upcloud.rst
new file mode 100644
index 00000000..75f438ee
--- /dev/null
+++ b/doc/rtd/topics/datasources/upcloud.rst
@@ -0,0 +1,24 @@
+.. _datasource_upcloud:
+
+UpCloud
+=============
+
+The `UpCloud`_ datasource consumes information from UpCloud's `metadata
+service`_. This metadata service serves information about the
+running server via HTTP over the address 169.254.169.254 available in every
+DHCP-configured interface. The metadata API endpoints are fully described in
+UpCloud API documentation at
+`https://developers.upcloud.com/1.3/8-servers/#metadata-service
+<https://developers.upcloud.com/1.3/8-servers/#metadata-service>`_.
+
+Providing user-data
+-------------------
+
+When creating a server, user-data is provided by specifying it as `user_data`
+in the API or via the server creation tool in the control panel. User-data is
+immutable during server's lifetime and can be removed by deleting the server.
+
+.. _UpCloud: https://upcloud.com/
+.. _metadata service: https://upcloud.com/community/tutorials/upcloud-metadata-service/
+
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/vmware.rst b/doc/rtd/topics/datasources/vmware.rst
new file mode 100644
index 00000000..f1f48117
--- /dev/null
+++ b/doc/rtd/topics/datasources/vmware.rst
@@ -0,0 +1,358 @@
+.. _datasource_vmware:
+
+VMware
+======
+
+This datasource is for use with systems running on a VMware platform such as
+vSphere and currently supports the following data transports:
+
+
+* `GuestInfo <https://github.com/vmware/govmomi/blob/master/govc/USAGE.md#vmchange>`_ keys
+
+Configuration
+-------------
+
+The configuration method is dependent upon the transport:
+
+GuestInfo Keys
+^^^^^^^^^^^^^^
+
+One method of providing meta, user, and vendor data is by setting the following
+key/value pairs on a VM's ``extraConfig`` `property <https://vdc-repo.vmware.com/vmwb-repository/dcr-public/723e7f8b-4f21-448b-a830-5f22fd931b01/5a8257bd-7f41-4423-9a73-03307535bd42/doc/vim.vm.ConfigInfo.html>`_:
+
+.. list-table::
+ :header-rows: 1
+
+ * - Property
+ - Description
+ * - ``guestinfo.metadata``
+ - A YAML or JSON document containing the cloud-init metadata.
+ * - ``guestinfo.metadata.encoding``
+ - The encoding type for ``guestinfo.metadata``.
+ * - ``guestinfo.userdata``
+ - A YAML document containing the cloud-init user data.
+ * - ``guestinfo.userdata.encoding``
+ - The encoding type for ``guestinfo.userdata``.
+ * - ``guestinfo.vendordata``
+ - A YAML document containing the cloud-init vendor data.
+ * - ``guestinfo.vendordata.encoding``
+ - The encoding type for ``guestinfo.vendordata``.
+
+
+All ``guestinfo.*.encoding`` values may be set to ``base64`` or
+``gzip+base64``.
+
+Features
+--------
+
+This section reviews several features available in this datasource, regardless
+of how the meta, user, and vendor data was discovered.
+
+Instance data and lazy networks
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+One of the hallmarks of cloud-init is `its use of instance-data and JINJA
+queries <../instancedata.html#using-instance-data>`_
+-- the ability to write queries in user and vendor data that reference runtime
+information present in ``/run/cloud-init/instance-data.json``. This works well
+when the metadata provides all of the information up front, such as the network
+configuration. For systems that rely on DHCP, however, this information may not
+be available when the metadata is persisted to disk.
+
+This datasource ensures that even if the instance is using DHCP to configure
+networking, the same details about the configured network are available in
+``/run/cloud-init/instance-data.json`` as if static networking was used. This
+information collected at runtime is easy to demonstrate by executing the
+datasource on the command line. From the root of this repository, run the
+following command:
+
+.. code-block:: bash
+
+ PYTHONPATH="$(pwd)" python3 cloudinit/sources/DataSourceVMware.py
+
+The above command will result in output similar to the below JSON:
+
+.. code-block:: json
+
+ {
+ "hostname": "akutz.localhost",
+ "local-hostname": "akutz.localhost",
+ "local-ipv4": "192.168.0.188",
+ "local_hostname": "akutz.localhost",
+ "network": {
+ "config": {
+ "dhcp": true
+ },
+ "interfaces": {
+ "by-ipv4": {
+ "172.0.0.2": {
+ "netmask": "255.255.255.255",
+ "peer": "172.0.0.2"
+ },
+ "192.168.0.188": {
+ "broadcast": "192.168.0.255",
+ "mac": "64:4b:f0:18:9a:21",
+ "netmask": "255.255.255.0"
+ }
+ },
+ "by-ipv6": {
+ "fd8e:d25e:c5b6:1:1f5:b2fd:8973:22f2": {
+ "flags": 208,
+ "mac": "64:4b:f0:18:9a:21",
+ "netmask": "ffff:ffff:ffff:ffff::/64"
+ }
+ },
+ "by-mac": {
+ "64:4b:f0:18:9a:21": {
+ "ipv4": [
+ {
+ "addr": "192.168.0.188",
+ "broadcast": "192.168.0.255",
+ "netmask": "255.255.255.0"
+ }
+ ],
+ "ipv6": [
+ {
+ "addr": "fd8e:d25e:c5b6:1:1f5:b2fd:8973:22f2",
+ "flags": 208,
+ "netmask": "ffff:ffff:ffff:ffff::/64"
+ }
+ ]
+ },
+ "ac:de:48:00:11:22": {
+ "ipv6": []
+ }
+ }
+ }
+ },
+ "wait-on-network": {
+ "ipv4": true,
+ "ipv6": "false"
+ }
+ }
+
+
+Redacting sensitive information
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Sometimes the cloud-init userdata might contain sensitive information, and it
+may be desirable to have the ``guestinfo.userdata`` key (or other guestinfo
+keys) redacted as soon as its data is read by the datasource. This is possible
+by adding the following to the metadata:
+
+.. code-block:: yaml
+
+ redact: # formerly named cleanup-guestinfo, which will also work
+ - userdata
+ - vendordata
+
+When the above snippet is added to the metadata, the datasource will iterate
+over the elements in the ``redact`` array and clear each of the keys. For
+example, when the guestinfo transport is used, the above snippet will cause
+the following commands to be executed:
+
+.. code-block:: shell
+
+ vmware-rpctool "info-set guestinfo.userdata ---"
+ vmware-rpctool "info-set guestinfo.userdata.encoding "
+ vmware-rpctool "info-set guestinfo.vendordata ---"
+ vmware-rpctool "info-set guestinfo.vendordata.encoding "
+
+Please note that keys are set to the valid YAML string ``---`` as it is not
+possible remove an existing key from the guestinfo key-space. A key's analogous
+encoding property will be set to a single white-space character, causing the
+datasource to treat the actual key value as plain-text, thereby loading it as
+an empty YAML doc (hence the aforementioned ``---``\ ).
+
+Reading the local IP addresses
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This datasource automatically discovers the local IPv4 and IPv6 addresses for
+a guest operating system based on the default routes. However, when inspecting
+a VM externally, it's not possible to know what the *default* IP address is for
+the guest OS. That's why this datasource sets the discovered, local IPv4 and
+IPv6 addresses back in the guestinfo namespace as the following keys:
+
+
+* ``guestinfo.local-ipv4``
+* ``guestinfo.local-ipv6``
+
+It is possible that a host may not have any default, local IP addresses. It's
+also possible the reported, local addresses are link-local addresses. But these
+two keys may be used to discover what this datasource determined were the local
+IPv4 and IPv6 addresses for a host.
+
+Waiting on the network
+^^^^^^^^^^^^^^^^^^^^^^
+
+Sometimes cloud-init may bring up the network, but it will not finish coming
+online before the datasource's ``setup`` function is called, resulting in an
+``/var/run/cloud-init/instance-data.json`` file that does not have the correct
+network information. It is possible to instruct the datasource to wait until an
+IPv4 or IPv6 address is available before writing the instance data with the
+following metadata properties:
+
+.. code-block:: yaml
+
+ wait-on-network:
+ ipv4: true
+ ipv6: true
+
+If either of the above values are true, then the datasource will sleep for a
+second, check the network status, and repeat until one or both addresses from
+the specified families are available.
+
+Walkthrough
+-----------
+
+The following series of steps is a demonstration on how to configure a VM with
+this datasource:
+
+
+#. Create the metadata file for the VM. Save the following YAML to a file named
+ ``metadata.yaml``\ :
+
+ .. code-block:: yaml
+
+ instance-id: cloud-vm
+ local-hostname: cloud-vm
+ network:
+ version: 2
+ ethernets:
+ nics:
+ match:
+ name: ens*
+ dhcp4: yes
+
+#. Create the userdata file ``userdata.yaml``\ :
+
+ .. code-block:: yaml
+
+ #cloud-config
+
+ users:
+ - default
+ - name: akutz
+ primary_group: akutz
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ groups: sudo, wheel
+ lock_passwd: true
+ ssh_authorized_keys:
+ - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDE0c5FczvcGSh/tG4iw+Fhfi/O5/EvUM/96js65tly4++YTXK1d9jcznPS5ruDlbIZ30oveCBd3kT8LLVFwzh6hepYTf0YmCTpF4eDunyqmpCXDvVscQYRXyasEm5olGmVe05RrCJSeSShAeptv4ueIn40kZKOghinGWLDSZG4+FFfgrmcMCpx5YSCtX2gvnEYZJr0czt4rxOZuuP7PkJKgC/mt2PcPjooeX00vAj81jjU2f3XKrjjz2u2+KIt9eba+vOQ6HiC8c2IzRkUAJ5i1atLy8RIbejo23+0P4N2jjk17QySFOVHwPBDTYb0/0M/4ideeU74EN/CgVsvO6JrLsPBR4dojkV5qNbMNxIVv5cUwIy2ThlLgqpNCeFIDLCWNZEFKlEuNeSQ2mPtIO7ETxEL2Cz5y/7AIuildzYMc6wi2bofRC8HmQ7rMXRWdwLKWsR0L7SKjHblIwarxOGqLnUI+k2E71YoP7SZSlxaKi17pqkr0OMCF+kKqvcvHAQuwGqyumTEWOlH6TCx1dSPrW+pVCZSHSJtSTfDW2uzL6y8k10MT06+pVunSrWo5LHAXcS91htHV1M1UrH/tZKSpjYtjMb5+RonfhaFRNzvj7cCE1f3Kp8UVqAdcGBTtReoE8eRUT63qIxjw03a7VwAyB2w+9cu1R9/vAo8SBeRqw== sakutz@gmail.com
+
+#. Please note this step requires that the VM be powered off. All of the
+ commands below use the VMware CLI tool, `govc <https://github.com/vmware/govmomi/blob/master/govc>`_.
+
+ Go ahead and assign the path to the VM to the environment variable ``VM``\ :
+
+ .. code-block:: shell
+
+ export VM="/inventory/path/to/the/vm"
+
+#. Power off the VM:
+
+ .. raw:: html
+
+ <hr />
+
+ &#x26a0;&#xfe0f; <strong>First Boot Mode</strong>
+
+ To ensure the next power-on operation results in a first-boot scenario for
+ cloud-init, it may be necessary to run the following command just before
+ powering off the VM:
+
+ .. code-block:: bash
+
+ cloud-init clean
+
+ Otherwise cloud-init may not run in first-boot mode. For more information
+ on how the boot mode is determined, please see the
+ `First Boot Documentation <../boot.html#first-boot-determination>`_.
+
+ .. raw:: html
+
+ <hr />
+
+ .. code-block:: shell
+
+ govc vm.power -off "${VM}"
+
+#.
+ Export the environment variables that contain the cloud-init metadata and
+ userdata:
+
+ .. code-block:: shell
+
+ export METADATA=$(gzip -c9 <metadata.yaml | { base64 -w0 2>/dev/null || base64; }) \
+ USERDATA=$(gzip -c9 <userdata.yaml | { base64 -w0 2>/dev/null || base64; })
+
+#.
+ Assign the metadata and userdata to the VM:
+
+ .. code-block:: shell
+
+ govc vm.change -vm "${VM}" \
+ -e guestinfo.metadata="${METADATA}" \
+ -e guestinfo.metadata.encoding="gzip+base64" \
+ -e guestinfo.userdata="${USERDATA}" \
+ -e guestinfo.userdata.encoding="gzip+base64"
+
+ Please note the above commands include specifying the encoding for the
+ properties. This is important as it informs the datasource how to decode
+ the data for cloud-init. Valid values for ``metadata.encoding`` and
+ ``userdata.encoding`` include:
+
+
+ * ``base64``
+ * ``gzip+base64``
+
+#.
+ Power on the VM:
+
+ .. code-block:: shell
+
+ govc vm.power -vm "${VM}" -on
+
+If all went according to plan, the CentOS box is:
+
+* Locked down, allowing SSH access only for the user in the userdata
+* Configured for a dynamic IP address via DHCP
+* Has a hostname of ``cloud-vm``
+
+Examples
+--------
+
+This section reviews common configurations:
+
+Setting the hostname
+^^^^^^^^^^^^^^^^^^^^
+
+The hostname is set by way of the metadata key ``local-hostname``.
+
+Setting the instance ID
+^^^^^^^^^^^^^^^^^^^^^^^
+
+The instance ID may be set by way of the metadata key ``instance-id``. However,
+if this value is absent then the instance ID is read from the file
+``/sys/class/dmi/id/product_uuid``.
+
+Providing public SSH keys
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The public SSH keys may be set by way of the metadata key ``public-keys-data``.
+Each newline-terminated string will be interpreted as a separate SSH public
+key, which will be placed in distro's default user's
+``~/.ssh/authorized_keys``. If the value is empty or absent, then nothing will
+be written to ``~/.ssh/authorized_keys``.
+
+Configuring the network
+^^^^^^^^^^^^^^^^^^^^^^^
+
+The network is configured by setting the metadata key ``network`` with a value
+consistent with Network Config Versions
+`1 <../network-config-format-v1.html>`_ or
+`2 <../network-config-format-v2.html>`_\ , depending on the Linux
+distro's version of cloud-init.
+
+The metadata key ``network.encoding`` may be used to indicate the format of
+the metadata key "network". Valid encodings are ``base64`` and ``gzip+base64``.
diff --git a/doc/rtd/topics/datasources/vultr.rst b/doc/rtd/topics/datasources/vultr.rst
new file mode 100644
index 00000000..f8601700
--- /dev/null
+++ b/doc/rtd/topics/datasources/vultr.rst
@@ -0,0 +1,35 @@
+.. _datasource_vultr:
+
+Vultr
+=====
+
+The `Vultr`_ datasource retrieves basic configuration values from the locally
+accessible `metadata service`_. All data is served over HTTP from the address
+169.254.169.254. The endpoints are documented in
+`https://www.vultr.com/metadata/
+<https://www.vultr.com/metadata/>`_
+
+Configuration
+-------------
+
+Vultr's datasource can be configured as follows:
+
+ datasource:
+ Vultr:
+ url: 'http://169.254.169.254'
+ retries: 3
+ timeout: 2
+ wait: 2
+
+- *url*: The URL used to acquire the metadata configuration from
+- *retries*: Determines the number of times to attempt to connect to the
+ metadata service
+- *timeout*: Determines the timeout in seconds to wait for a response from the
+ metadata service
+- *wait*: Determines the timeout in seconds to wait before retrying after
+ accessible failure
+
+.. _Vultr: https://www.vultr.com/
+.. _metadata service: https://www.vultr.com/metadata/
+
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/datasources/zstack.rst b/doc/rtd/topics/datasources/zstack.rst
index 93a2791c..6630ad9f 100644
--- a/doc/rtd/topics/datasources/zstack.rst
+++ b/doc/rtd/topics/datasources/zstack.rst
@@ -34,4 +34,4 @@ Same as EC2, instance userdata can be queried at
user_data
password
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/debugging.rst b/doc/rtd/topics/debugging.rst
index 0d416f32..a4a2779f 100644
--- a/doc/rtd/topics/debugging.rst
+++ b/doc/rtd/topics/debugging.rst
@@ -1,6 +1,6 @@
-********************************
-Testing and debugging cloud-init
-********************************
+********************
+Debugging cloud-init
+********************
Overview
========
@@ -88,7 +88,7 @@ To quickly obtain a cloud-init log try using lxc on any ubuntu system:
.. code-block:: shell-session
- $ lxc init ubuntu-daily:xenial x1
+ $ lxc init ubuntu-daily:focal x1
$ lxc start x1
$ # Take lxc's cloud-init.log and pipe it to the analyzer
$ lxc file pull x1/var/log/cloud-init.log - | cloud-init analyze dump -i -
@@ -104,13 +104,13 @@ To quickly analyze a KVM a cloud-init log:
.. code-block:: shell-session
- $ wget https://cloud-images.ubuntu.com/daily/server/xenial/current/xenial-server-cloudimg-amd64.img
+ $ wget https://cloud-images.ubuntu.com/daily/server/focal/current/focal-server-cloudimg-amd64.img
2. Create a snapshot image to preserve the original cloud-image
.. code-block:: shell-session
- $ qemu-img create -b xenial-server-cloudimg-amd64.img -f qcow2 \
+ $ qemu-img create -b focal-server-cloudimg-amd64.img -f qcow2 \
test-cloudinit.qcow2
3. Create a seed image with metadata using `cloud-localds`
@@ -258,9 +258,9 @@ from **-proposed**
* Create a `new cloud-init bug`_ reporting the version of cloud-init
affected
- * Ping upstream cloud-init on Freenode's `#cloud-init IRC channel`_
+ * Ping upstream cloud-init on Libera's `#cloud-init IRC channel`_
.. _SRU: https://wiki.ubuntu.com/StableReleaseUpdates
.. _CloudinitUpdates: https://wiki.ubuntu.com/CloudinitUpdates
.. _new cloud-init bug: https://bugs.launchpad.net/cloud-init/+filebug
-.. _#cloud-init IRC channel: https://webchat.freenode.net/?channel=#cloud-init
+.. _#cloud-init IRC channel: https://kiwiirc.com/nextclient/irc.libera.chat/cloud-init
diff --git a/doc/rtd/topics/dir_layout.rst b/doc/rtd/topics/dir_layout.rst
index ebd63ae7..9d2c9896 100644
--- a/doc/rtd/topics/dir_layout.rst
+++ b/doc/rtd/topics/dir_layout.rst
@@ -84,4 +84,4 @@ application::
semaphore `files` which are only supposed to run `per-once` (not tied to the
instance id).
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/events.rst b/doc/rtd/topics/events.rst
new file mode 100644
index 00000000..1a562fb4
--- /dev/null
+++ b/doc/rtd/topics/events.rst
@@ -0,0 +1,95 @@
+.. _events:
+
+******************
+Events and Updates
+******************
+
+Events
+======
+
+`Cloud-init`_ will fetch and apply cloud and user data configuration
+upon several event types. The two most common events for cloud-init
+are when an instance first boots and any subsequent boot thereafter (reboot).
+In addition to boot events, cloud-init users and vendors are interested
+in when devices are added. cloud-init currently supports the following
+event types:
+
+- **BOOT_NEW_INSTANCE**: New instance first boot
+- **BOOT**: Any system boot other than 'BOOT_NEW_INSTANCE'
+- **BOOT_LEGACY**: Similar to 'BOOT', but applies networking config twice each
+ boot: once during Local stage, then again in Network stage. As this behavior
+ was previously the default behavior, this option exists to prevent regressing
+ such behavior.
+- **HOTPLUG**: Dynamic add of a system device
+
+Future work will likely include infrastructure and support for the following
+events:
+
+- **METADATA_CHANGE**: An instance's metadata has change
+- **USER_REQUEST**: Directed request to update
+
+Datasource Event Support
+========================
+
+All :ref:`datasources` by default support the ``BOOT_NEW_INSTANCE`` event.
+Each Datasource will declare a set of these events that it is capable of
+handling. Datasources may not support all event types. In some cases a system
+may be configured to allow a particular event but may be running on
+a platform whose datasource cannot support the event.
+
+Configuring Event Updates
+=========================
+
+Update configuration may be specified via user data,
+which can be used to enable or disable handling of specific events.
+This configuration will be honored as long as the events are supported by
+the datasource. However, configuration will always be applied at first
+boot, regardless of the user data specified.
+
+Updates
+~~~~~~~
+Update policy configuration defines which
+events are allowed to be handled. This is separate from whether a
+particular platform or datasource has the capability for such events.
+
+**scope**: *<name of the scope for event policy>*
+
+The ``scope`` value is a string which defines under which domain does the
+event occur. Currently the only one known scope is ``network``, though more
+scopes may be added in the future. Scopes are defined by convention but
+arbitrary values can be used.
+
+**when**: *<list of events to handle for a particular scope>*
+
+Each ``scope`` requires a ``when`` element to specify which events
+are to allowed to be handled.
+
+Hotplug
+=======
+When the hotplug event is supported by the data source and configured in
+user data, cloud-init will respond to the addition or removal of network
+interfaces to the system. In addition to fetching and updating the system
+metadata, cloud-init will also bring up/down the newly added interface.
+
+.. warning:: Due to its use of systemd sockets, hotplug functionality
+ is currently incompatible with SELinux. This issue is being tracked
+ `on Launchpad`_. Additionally, hotplug support is considered experimental for
+ non-Debian based systems.
+
+Examples
+========
+
+apply network config every boot
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+On every boot, apply network configuration found in the datasource.
+
+.. code-block:: shell-session
+
+ # apply network config on every boot
+ updates:
+ network:
+ when: ['boot']
+
+.. _Cloud-init: https://launchpad.net/cloud-init
+.. _on Launchpad: https://bugs.launchpad.net/cloud-init/+bug/1936229
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/examples.rst b/doc/rtd/topics/examples.rst
index 81860f85..8c7071e5 100644
--- a/doc/rtd/topics/examples.rst
+++ b/doc/rtd/topics/examples.rst
@@ -149,8 +149,8 @@ Disk setup
:language: yaml
:linenos:
-Register RedHat Subscription
-============================
+Register Red Hat Subscription
+=============================
.. literalinclude:: ../../examples/cloud-config-rh_subscription.txt
:language: yaml
@@ -179,4 +179,4 @@ Grow partitions
.. _chef: http://www.chef.io/chef/
.. _puppet: http://puppetlabs.com/
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/faq.rst b/doc/rtd/topics/faq.rst
index 27fabf15..125ce9f4 100644
--- a/doc/rtd/topics/faq.rst
+++ b/doc/rtd/topics/faq.rst
@@ -10,7 +10,7 @@ Having trouble? We would like to help!
- First go through this page with answers to common questions
- Use the search bar at the upper left to search these docs
-- Ask a question in the ``#cloud-init`` IRC channel on Freenode
+- Ask a question in the ``#cloud-init`` IRC channel on Libera
- Join and ask questions on the `cloud-init mailing list <https://launchpad.net/~cloud-init>`_
- Find a bug? Check out the :ref:`reporting_bugs` topic for
how to report one
@@ -56,7 +56,7 @@ instance
--------
The `/var/lib/cloud/instance` directory is a symbolic link that points
-to the most recenlty used instance-id directory. This folder contains the
+to the most recently used instance-id directory. This folder contains the
information cloud-init received from datasources, including vendor and user
data. This can be helpful to review to ensure the correct data was passed.
@@ -74,9 +74,9 @@ previous boot:
* `instance-id`: id of the instance as discovered by cloud-init. Changing
this file has no effect.
* `result.json`: json file will show both the datasource used to setup
- the instance, and if any errors occured
+ the instance, and if any errors occurred
* `status.json`: json file shows the datasource used and a break down
- of all four modules if any errors occured and the start and stop times.
+ of all four modules if any errors occurred and the start and stop times.
What datasource am I using?
===========================
@@ -139,7 +139,7 @@ cloud-config is:
To verify your YAML, we do have a short script called `validate-yaml.py`_
that can validate your user data offline.
-.. _validate-yaml.py: https://github.com/canonical/cloud-init/blob/master/tools/validate-yaml.py
+.. _validate-yaml.py: https://github.com/canonical/cloud-init/blob/main/tools/validate-yaml.py
Another option is to run the following on an instance to debug userdata
provided to the system:
diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst
index d03e4caf..93ef34f0 100644
--- a/doc/rtd/topics/format.rst
+++ b/doc/rtd/topics/format.rst
@@ -23,9 +23,11 @@ Using a mime-multi part file, the user can specify more than one type of data.
For example, both a user data script and a cloud-config type could be
specified.
-Supported content-types are listed from the cloud-init subcommand make-mime::
+Supported content-types are listed from the cloud-init subcommand make-mime:
- % cloud-init devel make-mime --list-types
+.. code-block:: shell-session
+
+ $ cloud-init devel make-mime --list-types
cloud-boothook
cloud-config
cloud-config-archive
@@ -36,6 +38,9 @@ Supported content-types are listed from the cloud-init subcommand make-mime::
x-include-once-url
x-include-url
x-shellscript
+ x-shellscript-per-boot
+ x-shellscript-per-instance
+ x-shellscript-per-once
Helper subcommand to generate mime messages
@@ -45,13 +50,28 @@ The cloud-init subcommand can generate MIME multi-part files: `make-mime`_.
``make-mime`` subcommand takes pairs of (filename, "text/" mime subtype)
separated by a colon (e.g. ``config.yaml:cloud-config``) and emits a MIME
-multipart message to stdout. An example invocation, assuming you have your
-cloud config in ``config.yaml`` and a shell script in ``script.sh`` and want
-to store the multipart message in ``user-data``::
+multipart message to stdout.
+
+Examples
+--------
+Create userdata containing both a cloud-config (``config.yaml``)
+and a shell script (``script.sh``)
+
+.. code-block:: shell-session
+
+ $ cloud-init devel make-mime -a config.yaml:cloud-config -a script.sh:x-shellscript > userdata
+
+Create userdata containing 3 shell scripts:
+
+- ``always.sh`` - Run every boot
+- ``instance.sh`` - Run once per instance
+- ``once.sh`` - Run once
+
+.. code-block:: shell-session
- % cloud-init devel make-mime -a config.yaml:cloud-config -a script.sh:x-shellscript > user-data
+ $ cloud-init devel make-mime -a always.sh:x-shellscript-per-boot -a instance.sh:x-shellscript-per-instance -a once.sh:x-shellscript-per-once
-.. _make-mime: https://github.com/canonical/cloud-init/blob/master/cloudinit/cmd/devel/make_mime.py
+.. _make-mime: https://github.com/canonical/cloud-init/blob/main/cloudinit/cmd/devel/make_mime.py
User-Data Script
@@ -70,7 +90,7 @@ archive.
Example
-------
-::
+.. code-block:: shell-session
$ cat myscript.sh
@@ -85,7 +105,7 @@ Include File
This content is a ``include`` file.
The file contains a list of urls, one per line. Each of the URLs will be read,
-and their content will be passed through this same set of rules. Ie, the
+and their content will be passed through this same set of rules. I.e., the
content read from the URL can be gzipped, mime-multi-part, or plain text. If
an error occurs reading a file the remaining files will not be read.
@@ -108,7 +128,7 @@ These things include:
- *and many more...*
.. note::
- This file must be valid yaml syntax.
+ This file must be valid YAML syntax.
See the :ref:`yaml_examples` section for a commented set of examples of
supported cloud config formats.
@@ -205,4 +225,4 @@ cloud-init from processing user-data.
.. [#] See your cloud provider for applicable user-data size limitations...
.. _blog: http://foss-boss.blogspot.com/2011/01/advanced-cloud-init-custom-handlers.html
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/hacking.rst b/doc/rtd/topics/hacking.rst
deleted file mode 100644
index 5ec25bfb..00000000
--- a/doc/rtd/topics/hacking.rst
+++ /dev/null
@@ -1,2 +0,0 @@
-.. include:: ../../../HACKING.rst
-.. vi: textwidth=78
diff --git a/doc/rtd/topics/instancedata.rst b/doc/rtd/topics/instancedata.rst
index 1850982c..f08ead69 100644
--- a/doc/rtd/topics/instancedata.rst
+++ b/doc/rtd/topics/instancedata.rst
@@ -30,7 +30,7 @@ deployed with cloud-init:
* simple static object to query to obtain a instance's metadata
* speed: avoid costly network transactions for metadata that is already cached
- on the filesytem
+ on the filesystem
* reduce need to recrawl metadata services for static metadata that is already
cached
* leverage cloud-init's best practices for crawling cloud-metadata services
@@ -180,7 +180,7 @@ Example output:
v1.platform
-------------
-An attempt to identify the cloud platfrom instance that the system is running
+An attempt to identify the cloud platform instance that the system is running
on.
Examples output:
@@ -509,14 +509,19 @@ EC2 instance:
Using instance-data
===================
-As of cloud-init v. 18.4, any variables present in
-``/run/cloud-init/instance-data.json`` can be used in:
+As of cloud-init v. 18.4, any instance-data can be used in:
* User-data scripts
* Cloud config data
* Command line interface via **cloud-init query** or
**cloud-init devel render**
+This means that any variable present in
+``/run/cloud-init/instance-data-sensitive.json`` can be used,
+unless a non-root user is using the command line interface.
+In the non-root user case,
+``/run/cloud-init/instance-data.json`` will be used instead.
+
Many clouds allow users to provide user-data to an instance at
the time the instance is launched. Cloud-init supports a number of
:ref:`user_data_formats`.
@@ -525,12 +530,18 @@ Both user-data scripts and **#cloud-config** data support jinja template
rendering.
When the first line of the provided user-data begins with,
**## template: jinja** cloud-init will use jinja to render that file.
-Any instance-data-sensitive.json variables are surfaced as dot-delimited
-jinja template variables because cloud-config modules are run as 'root'
-user.
+Any instance-data-sensitive.json variables are surfaced as jinja template
+variables because cloud-config modules are run as 'root' user.
+.. note::
+ cloud-init also provides jinja-safe key aliases for any instance-data.json
+ keys which contain jinja operator characters such as +, -, ., /, etc. Any
+ jinja operator will be replaced with underscores in the jinja-safe key
+ alias. This allows for cloud-init templates to use aliased variable
+ references which allow for jinja's dot-notation reference such as
+ ``{{ ds.v1_0.my_safe_key }}`` instead of ``{{ ds["v1.0"]["my/safe-key"] }}``.
-Below are some examples of providing these types of user-data:
+Below are some other examples of using jinja templates in user-data:
* Cloud config calling home with the ec2 public hostname and availability-zone
@@ -559,9 +570,39 @@ Below are some examples of providing these types of user-data:
{%- endif %}
...
+One way to easily explore what Jinja variables are available on your machine
+is to use the ``cloud-init query --format`` (-f) commandline option which will
+render any Jinja syntax you use. Warnings or exceptions will be raised on
+invalid instance-data keys, paths or invalid syntax.
+
+.. code-block:: shell-session
+
+ # List all instance-data keys and values as root user
+ % sudo cloud-init query --all
+ {...}
+
+ # Introspect nested keys on an object
+ % cloud-init query -f "{{ds.keys()}}"
+ dict_keys(['meta_data', '_doc'])
+
+ # Test your Jinja rendering syntax on the command-line directly
+
+ # Failure to reference valid top-level instance-data key
+ % cloud-init query -f "{{invalid.instance-data.key}}"
+ WARNING: Ignoring jinja template for query commandline: 'invalid' is undefined
+
+ # Failure to reference valid dot-delimited key path on a known top-level key
+ % cloud-init query -f "{{v1.not_here}}"
+ WARNING: Could not render jinja template variables in file 'query commandline': 'not_here'
+ CI_MISSING_JINJA_VAR/not_here
+
+ # Test expected value using valid instance-data key path
+ % cloud-init query -f "My AMI: {{ds.meta_data.ami_id}}"
+ My AMI: ami-0fecc35d3c8ba8d60
+
.. note::
Trying to reference jinja variables that don't exist in
- instance-data.json will result in warnings in ``/var/log/cloud-init.log``
+ instance-data will result in warnings in ``/var/log/cloud-init.log``
and the following string in your rendered user-data:
``CI_MISSING_JINJA_VAR/<your_varname>``.
@@ -614,4 +655,4 @@ see only redacted values.
instance booted on your favorite cloud. See :ref:`cli_devel` for more
information.
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/integration_tests.rst b/doc/rtd/topics/integration_tests.rst
index aeda326c..f9f719da 100644
--- a/doc/rtd/topics/integration_tests.rst
+++ b/doc/rtd/topics/integration_tests.rst
@@ -9,11 +9,96 @@ Overview
Integration tests are written using pytest and are located at
``tests/integration_tests``. General design principles
-laid out in :ref:`unit_testing` should be followed for integration tests.
+laid out in :ref:`testing` should be followed for integration tests.
Setup is accomplished via a set of fixtures located in
``tests/integration_tests/conftest.py``.
+Test Definition
+===============
+Tests are defined like any other pytest test. The ``user_data``
+mark can be used to supply the cloud-config user data. Platform specific
+marks can be used to limit tests to particular platforms. The
+client fixture can be used to interact with the launched
+test instance.
+
+See :ref:`Examples` section for examples.
+
+Test Execution
+==============
+Test execution happens via pytest. A tox definition exists to run integration
+tests. To run all integration tests, you would run:
+
+.. code-block:: bash
+
+ $ tox -e integration-tests
+
+Pytest arguments may also be passed. For example:
+
+.. code-block:: bash
+
+ $ tox -e integration-tests tests/integration_tests/modules/test_combined.py
+
+Configuration
+=============
+
+All possible configuration values are defined in
+`tests/integration_tests/integration_settings.py <https://github.com/canonical/cloud-init/blob/main/tests/integration_tests/integration_settings.py>`_.
+Defaults can be
+overridden by supplying values in ``tests/integration_tests/user_settings.py``
+or by providing an environment variable of the same name prepended with
+``CLOUD_INIT_``. For example, to set the ``PLATFORM`` setting:
+
+.. code-block:: bash
+
+ CLOUD_INIT_PLATFORM='ec2' pytest tests/integration_tests/
+
+
+Cloud Interation
+================
+Cloud interaction happens via the
+`pycloudlib <https://pycloudlib.readthedocs.io/en/latest/index.html>`_ library.
+In order to run integration tests, pycloudlib must first be
+`configured <https://pycloudlib.readthedocs.io/en/latest/configuration.html#configuration>`_.
+
+For a minimal setup using LXD, write the following to
+``~/.config/pycloudlib.toml``:
+
+.. code-block:: toml
+
+ [lxd]
+
+
+Image Selection
+===============
+
+Each integration testing run uses a single image as its basis. This
+image is configured using the ``OS_IMAGE`` variable; see
+:ref:`Configuration` for details of how configuration works.
+
+``OS_IMAGE`` can take two types of value: an Ubuntu series name (e.g.
+"focal"), or an image specification. If an Ubuntu series name is
+given, then the most recent image for that series on the target cloud
+will be used. For other use cases, an image specification is used.
+
+In its simplest form, an image specification can simply be a cloud's
+image ID (e.g. "ami-deadbeef", "ubuntu:focal"). In this case, the
+image so-identified will be used as the basis for this testing run.
+
+This has a drawback, however: as we do not know what OS or release is
+within the image, the integration testing framework will run *all*
+tests against the image in question. If it's a RHEL8 image, then we
+would expect Ubuntu-specific tests to fail (and vice versa).
+
+To address this, a full image specification can be given. This is of
+the form: ``<image_id>[::<os>[::<release>]]`` where ``image_id`` is a
+cloud's image ID, ``os`` is the OS name, and ``release`` is the OS
+release name. So, for example, Ubuntu 18.04 (Bionic Beaver) on LXD is
+``ubuntu:bionic::ubuntu::bionic`` or RHEL 8 on Amazon is
+``ami-justanexample::rhel::8``. When a full specification is given,
+only tests which are intended for use on that OS and release will be
+executed.
+
Image Setup
===========
@@ -34,48 +119,90 @@ is implemented via one of the ``client`` fixtures. When a client fixture
is used, a test instance from which to run tests is launched prior to
test execution and torn down after.
-Test Definition
-===============
-Tests are defined like any other pytest test. The ``user_data``
-mark can be used to supply the cloud-config user data. Platform specific
-marks can be used to limit tests to particular platforms. The
-client fixture can be used to interact with the launched
-test instance.
-
-A basic example:
+Continuous Integration
+======================
+A subset of the integration tests are run when a pull request
+is submitted on Github. The tests run on these continuous
+integration (CI) runs are given a pytest mark:
.. code-block:: python
- USER_DATA = """#cloud-config
- bootcmd:
- - echo 'hello config!' > /tmp/user_data.txt"""
-
+ @pytest.mark.ci
+
+Most new tests should *not* use this mark, so be aware that having a
+successful CI run does not necessarily mean that your test passed
+successfully.
+
+Fixtures
+========
+Integration tests rely heavily on fixtures to do initial test setup.
+One or more of these fixtures will be used in almost every integration test.
+
+Details such as the cloud platform or initial image to use are determined
+via what is specified in the :ref:`Configuration`.
+
+client
+------
+The ``client`` fixture should be used for most test cases. It ensures:
+
+- All setup performed by :ref:`session_cloud` and :ref:`setup_image`
+- `Pytest marks <https://github.com/canonical/cloud-init/blob/af7eb1deab12c7208853c5d18b55228e0ba29c4d/tests/integration_tests/conftest.py#L220-L224>`_
+ used during instance creation are obtained and applied
+- The test instance is launched
+- Test failure status is determined after test execution
+- Logs are collected (if configured) after test execution
+- The test instance is torn down after test execution
+
+``module_client`` and ``class_client`` fixtures also exist for the
+purpose of running multiple tests against a single launched instance.
+They provide the exact same functionality as ``client``, but are
+scoped to the module or class respectively.
+
+session_cloud
+-------------
+The ``session_cloud`` session-scoped fixture will provide an
+`IntegrationCloud <https://github.com/canonical/cloud-init/blob/af7eb1deab12c7208853c5d18b55228e0ba29c4d/tests/integration_tests/clouds.py#L102>`_
+instance for the currently configured cloud. The fixture also
+ensures that any custom cloud session cleanup is performed.
+
+setup_image
+-----------
+The ``setup_image`` session-scope fixture will
+create a new image to launch all further cloud instances
+during this test run. It ensures:
+
+- A cloud instance is launched on the configured platform
+- The version of cloud-init under test is installed on the instance
+- ``cloud-init clean --logs`` is run on the instance
+- A snapshot of the instance is taken to be used as the basis for
+ future instance launches
+- The originally launched instance is torn down
+- The custom created image is torn down after all tests finish
+
+Examples
+--------
+A simple test case using the ``client`` fixture:
- class TestSimple:
- @pytest.mark.user_data(USER_DATA)
- @pytest.mark.ec2
- def test_simple(self, client):
- print(client.exec('cloud-init -v'))
-
-Test Execution
-==============
-Test execution happens via pytest. To run all integration tests,
-you would run:
-
-.. code-block:: bash
+.. code-block:: python
- pytest tests/integration_tests/
+ USER_DATA = """\
+ #cloud-config
+ bootcmd:
+ - echo 'hello!' > /var/tmp/hello.txt
+ """
-Configuration
-=============
+ @pytest.mark.user_data(USER_DATA)
+ def test_bootcmd(client):
+ log = client.read_from_file("/var/log/cloud-init.log")
+ assert "Shellified 1 commands." in log
+ assert client.execute('cat /var/tmp/hello.txt').strip() == "hello!"
-All possible configuration values are defined in
-``tests/integration_tests/integration_settings.py``. Defaults can be
-overridden by supplying values in ``tests/integration_tests/user_settings.py``
-or by providing an environment variable of the same name prepended with
-``CLOUD_INIT_``. For example, to set the ``PLATFORM`` setting:
+Customizing the launch arguments before launching an instance manually:
-.. code-block:: bash
+.. code-block:: python
- CLOUD_INIT_PLATFORM='ec2' pytest tests/integration_tests/
+ def test_launch(session_cloud: IntegrationCloud, setup_image):
+ with session_cloud.launch(launch_kwargs={"wait": False}) as client:
+ client.instance.wait()
+ assert client.execute("echo hello world").strip() == "hello world"
diff --git a/doc/rtd/topics/logging.rst b/doc/rtd/topics/logging.rst
index 4fd7e28e..a14fb685 100644
--- a/doc/rtd/topics/logging.rst
+++ b/doc/rtd/topics/logging.rst
@@ -52,9 +52,9 @@ module using the standard python fileConfig format. Cloud-init looks for
config for the logging module under the ``logcfg`` key.
.. note::
- the logging configuration is not yaml, it is python ``fileConfig`` format,
+ the logging configuration is not YAML, it is python ``fileConfig`` format,
and is passed through directly to the python logging module. please use the
- correct syntax for a multi-line string in yaml.
+ correct syntax for a multi-line string in YAML.
By default, cloud-init uses the logging configuration provided in
``/etc/cloud/cloud.cfg.d/05_logging.cfg``. The default python logging
@@ -173,4 +173,4 @@ For more information on rsyslog configuration, see :ref:`cc_rsyslog`.
.. _python logging config: https://docs.python.org/3/library/logging.config.html#configuration-file-format
.. _python logging handlers: https://docs.python.org/3/library/logging.handlers.html
.. _python logging formatters: https://docs.python.org/3/library/logging.html#formatter-objects
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/merging.rst b/doc/rtd/topics/merging.rst
index 2b5e5dad..204719eb 100644
--- a/doc/rtd/topics/merging.rst
+++ b/doc/rtd/topics/merging.rst
@@ -6,8 +6,8 @@ Overview
========
This was implemented because it has been a common feature request that there be
-a way to specify how cloud-config yaml "dictionaries" provided as user-data are
-merged together when there are multiple yaml files to merge together (say when
+a way to specify how cloud-config YAML "dictionaries" provided as user-data are
+merged together when there are multiple YAML files to merge together (say when
performing an #include).
Since previously the merging algorithm was very simple and would only overwrite
@@ -236,7 +236,7 @@ Other uses
==========
In addition to being used for merging user-data sections, the default merging
-algorithm for merging 'conf.d' yaml files (which form an initial yaml config
+algorithm for merging 'conf.d' YAML files (which form an initial YAML config
for cloud-init) was also changed to use this mechanism so its full
benefits (and customization) can also be used there as well. Other places that
used the previous merging are also, similarly, now extensible (metadata
@@ -285,4 +285,4 @@ The second config
- bash4
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst
index e30fe0fe..093cee61 100644
--- a/doc/rtd/topics/modules.rst
+++ b/doc/rtd/topics/modules.rst
@@ -22,6 +22,8 @@ Modules
.. automodule:: cloudinit.config.cc_foo
.. automodule:: cloudinit.config.cc_growpart
.. automodule:: cloudinit.config.cc_grub_dpkg
+.. automodule:: cloudinit.config.cc_install_hotplug
+.. automodule:: cloudinit.config.cc_keyboard
.. automodule:: cloudinit.config.cc_keys_to_console
.. automodule:: cloudinit.config.cc_landscape
.. automodule:: cloudinit.config.cc_locale
@@ -62,4 +64,4 @@ Modules
.. automodule:: cloudinit.config.cc_users_groups
.. automodule:: cloudinit.config.cc_write_files
.. automodule:: cloudinit.config.cc_yum_add_repo
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/network-config-format-eni.rst b/doc/rtd/topics/network-config-format-eni.rst
index b0904952..94fa0f9e 100644
--- a/doc/rtd/topics/network-config-format-eni.rst
+++ b/doc/rtd/topics/network-config-format-eni.rst
@@ -17,4 +17,4 @@ Please reference existing `documentation`_ for the
.. _Cloud-init: https://launchpad.net/cloud-init
.. _documentation: http://manpages.ubuntu.com/manpages/trusty/en/man5/interfaces.5.html
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/network-config-format-v1.rst b/doc/rtd/topics/network-config-format-v1.rst
index 92e81897..68a9cefa 100644
--- a/doc/rtd/topics/network-config-format-v1.rst
+++ b/doc/rtd/topics/network-config-format-v1.rst
@@ -48,7 +48,7 @@ the key ``subnets``.
Physical
~~~~~~~~
The ``physical`` type configuration represents a "physical" network device,
-typically Ethernet-based. At least one of of these entries is required for
+typically Ethernet-based. At least one of these entries is required for
external network connectivity. Type ``physical`` requires only one key:
``name``. A ``physical`` device may contain some or all of the following
keys:
@@ -62,7 +62,8 @@ structure.
**mac_address**: *<MAC Address>*
The MAC Address is a device unique identifier that most Ethernet-based network
-devices possess. Specifying a MAC Address is optional.
+devices possess. Specifying a MAC Address is optional.
+Letters must be lowercase.
.. note::
@@ -334,7 +335,11 @@ Users can specify a ``nameserver`` type. Nameserver dictionaries include
the following keys:
- ``address``: List of IPv4 or IPv6 address of nameservers.
-- ``search``: List of of hostnames to include in the resolv.conf search path.
+- ``search``: List of hostnames to include in the resolv.conf search path.
+- ``interface``: Optional. Ties the nameserver definition to the specified
+ interface. The value specified here must match the `name` of an interface
+ defined in this config. If unspecified, this nameserver will be considered
+ a global nameserver.
**Nameserver Example**::
@@ -349,6 +354,7 @@ the following keys:
address: 192.168.23.14/27
gateway: 192.168.23.1
- type: nameserver
+ interface: interface0 # Ties nameserver to interface0 only
address:
- 192.168.23.2
- 8.8.8.8
@@ -414,9 +420,19 @@ Subnet types are one of the following:
- ``dhcp6``: Configure this interface with IPv6 dhcp.
- ``static``: Configure this interface with a static IPv4.
- ``static6``: Configure this interface with a static IPv6 .
+- ``ipv6_dhcpv6-stateful``: Configure this interface with ``dhcp6``
+- ``ipv6_dhcpv6-stateless``: Configure this interface with SLAAC and DHCP
+- ``ipv6_slaac``: Configure address with SLAAC
-When making use of ``dhcp`` types, no additional configuration is needed in
-the subnet dictionary.
+When making use of ``dhcp`` or either of the ``ipv6_dhcpv6`` types,
+no additional configuration is needed in the subnet dictionary.
+
+Using ``ipv6_dhcpv6-stateless`` or ``ipv6_slaac`` allows the IPv6 address to be
+automatically configured with StateLess Address AutoConfiguration (`SLAAC`_).
+SLAAC requires support from the network, so verify that your cloud or network
+offering has support before trying it out. With ``ipv6_dhcpv6-stateless``,
+DHCPv6 is still used to fetch other subnet details such as gateway or DNS
+servers. If you only want to discover the address, use ``ipv6_slaac``.
**Subnet DHCP Example**::
@@ -603,4 +619,6 @@ Some more examples to explore the various options available.
- dellstack
type: nameserver
-.. vi: textwidth=78
+.. _SLAAC: https://tools.ietf.org/html/rfc4862
+
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/network-config-format-v2.rst b/doc/rtd/topics/network-config-format-v2.rst
index aa17bef5..c1bf05d1 100644
--- a/doc/rtd/topics/network-config-format-v2.rst
+++ b/doc/rtd/topics/network-config-format-v2.rst
@@ -8,9 +8,25 @@ version 2 format defined for the `netplan`_ tool. Cloud-init supports
both reading and writing of Version 2; the latter support requires a
distro with `netplan`_ present.
+Netplan Passthrough
+-------------------
+
+On a system with netplan present, cloud-init will pass Version 2 configuration
+through to netplan without modification. On such systems, you do not need to
+limit yourself to the below subset of netplan's configuration format.
+
+.. warning::
+ If you are writing or generating network configuration that may be used on
+ non-netplan systems, you **must** limit yourself to the subset described in
+ this document, or you will see network configuration failures on
+ non-netplan systems.
+
+Version 2 Configuration Format
+------------------------------
+
The ``network`` key has at least two required elements. First
it must include ``version: 2`` and one or more of possible device
-``types``..
+``types``.
Cloud-init will read this format from system config.
For example the following could be present in
@@ -34,9 +50,6 @@ Each type block contains device definitions as a map where the keys (called
"configuration IDs"). Each entry under the ``types`` may include IP and/or
device configuration.
-Cloud-init does not current support ``wifis`` type that is present in native
-`netplan`_.
-
Device configuration IDs
------------------------
@@ -94,7 +107,8 @@ NetworkManager does not.
**macaddress**: *<(scalar)>*
-Device's MAC address in the form XX:XX:XX:XX:XX:XX. Globs are not allowed.
+Device's MAC address in the form xx:xx:xx:xx:xx:xx. Globs are not allowed.
+Letters must be lowercase.
.. note::
@@ -118,7 +132,7 @@ supported. Matching on driver is *only* supported with networkd.
# fixed MAC address
match:
- macaddress: 11:22:33:AA:BB:FF
+ macaddress: 11:22:33:aa:bb:ff
# first card of driver ``ixgbe``
match:
@@ -478,6 +492,11 @@ This is a complex example which shows most available features: ::
nameservers:
search: [foo.local, bar.local]
addresses: [8.8.8.8]
+ # static routes
+ routes:
+ - to: 192.0.2.0/24
+ via: 11.0.0.1
+ metric: 3
lom:
match:
driver: ixgbe
@@ -506,11 +525,6 @@ This is a complex example which shows most available features: ::
id: 1
link: id0
dhcp4: yes
- # static routes
- routes:
- - to: 0.0.0.0/0
- via: 11.0.0.1
- metric: 3
-
-.. _netplan: https://launchpad.net/netplan
-.. vi: textwidth=78
+
+.. _netplan: https://netplan.io
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst
index 08db04d8..c461a3fe 100644
--- a/doc/rtd/topics/network-config.rst
+++ b/doc/rtd/topics/network-config.rst
@@ -75,6 +75,17 @@ If `Cloud-init`_ 's networking config has not been disabled, and
no other network information is found, then it will proceed
to generate a fallback networking configuration.
+Disabling Network Activation
+----------------------------
+
+Some datasources may not be initialized until after network has been brought
+up. In this case, cloud-init will attempt to bring up the interfaces specified
+by the datasource metadata.
+
+This behavior can be disabled in the cloud-init configuration dictionary,
+merged from ``/etc/cloud/cloud.cfg`` and ``/etc/cloud/cloud.cfg.d/*``::
+
+ disable_network_activation: true
Fallback Network Configuration
==============================
@@ -104,6 +115,13 @@ interface given the information it has available.
Finally after selecting the "right" interface, a configuration is
generated and applied to the system.
+.. note::
+
+ PhotonOS disables fallback networking configuration by default leaving
+ network unrendered when no other network config is provided.
+ If fallback config is still desired on PhotonOS, it can be enabled by
+ providing `disable_fallback_netcfg: false` in
+ `/etc/cloud/cloud.cfg:sys_config` settings.
Network Configuration Sources
=============================
@@ -144,6 +162,14 @@ The following Datasources optionally provide network configuration:
- `SmartOS JSON Metadata`_
+- :ref:`datasource_upcloud`
+
+ - `UpCloud JSON metadata`_
+
+- :ref:`datasource_vultr`
+
+ - `Vultr JSON metadata`_
+
For more information on network configuration formats
.. toctree::
@@ -257,5 +283,7 @@ Example output converting V2 to sysconfig:
.. _DigitalOcean JSON metadata: https://developers.digitalocean.com/documentation/metadata/#network-interfaces-index
.. _OpenStack Metadata Service Network: https://specs.openstack.org/openstack/nova-specs/specs/liberty/implemented/metadata-service-network-info.html
.. _SmartOS JSON Metadata: https://eng.joyent.com/mdata/datadict.html
+.. _UpCloud JSON metadata: https://developers.upcloud.com/1.3/8-servers/#metadata-service
+.. _Vultr JSON metadata: https://www.vultr.com/metadata/
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/security.rst b/doc/rtd/topics/security.rst
index b8386843..48fcb0a5 100644
--- a/doc/rtd/topics/security.rst
+++ b/doc/rtd/topics/security.rst
@@ -2,4 +2,4 @@
.. mdinclude:: ../../../SECURITY.md
-.. vi: textwidth=78
+.. vi: textwidth=79
diff --git a/doc/rtd/topics/testing.rst b/doc/rtd/topics/testing.rst
new file mode 100644
index 00000000..5543c6f5
--- /dev/null
+++ b/doc/rtd/topics/testing.rst
@@ -0,0 +1,160 @@
+*******
+Testing
+*******
+
+cloud-init has both unit tests and integration tests. Unit tests can
+be found at ``tests/unittests``. Integration tests can be found at
+``tests/integration_tests``. Documentation specifically for integration
+tests can be found on the :ref:`integration_tests` page, but
+the guidelines specified below apply to both types of tests.
+
+cloud-init uses `pytest`_ to run its tests, and has tests written both
+as ``unittest.TestCase`` sub-classes and as un-subclassed pytest tests.
+
+Guidelines
+==========
+
+The following guidelines should be followed.
+
+Test Layout
+-----------
+
+* For ease of organisation and greater accessibility for developers not
+ familiar with pytest, all cloud-init unit tests must be contained
+ within test classes
+
+ * Put another way, module-level test functions should not be used
+
+* As all tests are contained within classes, it is acceptable to mix
+ ``TestCase`` test classes and pytest test classes within the same
+ test file
+
+ * These can be easily distinguished by their definition: pytest
+ classes will not use inheritance at all (e.g.
+ `TestGetPackageMirrorInfo`_), whereas ``TestCase`` classes will
+ subclass (indirectly) from ``TestCase`` (e.g.
+ `TestPrependBaseCommands`_)
+
+* Unit tests and integration tests are located under cloud-init/tests
+
+ * For consistency, unit test files should have a matching name and
+ directory location under `tests/unittests`
+
+ * For example: the expected test file for code in
+ `cloudinit/path/to/file.py` is
+ `tests/unittests/path/to/test_file.py`
+
+
+``pytest`` Tests
+----------------
+
+* pytest test classes should use `pytest fixtures`_ to share
+ functionality instead of inheritance
+
+* pytest tests should use bare ``assert`` statements, to take advantage
+ of pytest's `assertion introspection`_
+
+``pytest`` Version Gotchas
+--------------------------
+
+As we still support Ubuntu 18.04 (Bionic Beaver), we can only use pytest
+features that are available in v3.3.2. This is an inexhaustive list of
+ways in which this may catch you out:
+
+* Only the following built-in fixtures are available [#fixture-list]_:
+
+ * ``cache``
+ * ``capfd``
+ * ``capfdbinary``
+ * ``caplog``
+ * ``capsys``
+ * ``capsysbinary``
+ * ``doctest_namespace``
+ * ``monkeypatch``
+ * ``pytestconfig``
+ * ``record_xml_property``
+ * ``recwarn``
+ * ``tmpdir_factory``
+ * ``tmpdir``
+
+Mocking and Assertions
+----------------------
+
+* Variables/parameter names for ``Mock`` or ``MagicMock`` instances
+ should start with ``m_`` to clearly distinguish them from non-mock
+ variables
+
+ * For example, ``m_readurl`` (which would be a mock for ``readurl``)
+
+* The ``assert_*`` methods that are available on ``Mock`` and
+ ``MagicMock`` objects should be avoided, as typos in these method
+ names may not raise ``AttributeError`` (and so can cause tests to
+ silently pass). An important exception: if a ``Mock`` is
+ `autospecced`_ then misspelled assertion methods *will* raise an
+ ``AttributeError``, so these assertion methods may be used on
+ autospecced ``Mock`` objects.
+
+ For non-autospecced ``Mock`` s, these substitutions can be used
+ (``m`` is assumed to be a ``Mock``):
+
+ * ``m.assert_any_call(*args, **kwargs)`` => ``assert
+ mock.call(*args, **kwargs) in m.call_args_list``
+ * ``m.assert_called()`` => ``assert 0 != m.call_count``
+ * ``m.assert_called_once()`` => ``assert 1 == m.call_count``
+ * ``m.assert_called_once_with(*args, **kwargs)`` => ``assert
+ [mock.call(*args, **kwargs)] == m.call_args_list``
+ * ``m.assert_called_with(*args, **kwargs)`` => ``assert
+ mock.call(*args, **kwargs) == m.call_args_list[-1]``
+ * ``m.assert_has_calls(call_list, any_order=True)`` => ``for call in
+ call_list: assert call in m.call_args_list``
+
+ * ``m.assert_has_calls(...)`` and ``m.assert_has_calls(...,
+ any_order=False)`` are not easily replicated in a single
+ statement, so their use when appropriate is acceptable.
+
+ * ``m.assert_not_called()`` => ``assert 0 == m.call_count``
+
+* When there are multiple patch calls in a test file for the module it
+ is testing, it may be desirable to capture the shared string prefix
+ for these patch calls in a module-level variable. If used, such
+ variables should be named ``M_PATH`` or, for datasource tests,
+ ``DS_PATH``.
+
+Test Argument Ordering
+----------------------
+
+* Test arguments should be ordered as follows:
+
+ * ``mock.patch`` arguments. When used as a decorator, ``mock.patch``
+ partially applies its generated ``Mock`` object as the first
+ argument, so these arguments must go first.
+ * ``pytest.mark.parametrize`` arguments, in the order specified to
+ the ``parametrize`` decorator. These arguments are also provided
+ by a decorator, so it's natural that they sit next to the
+ ``mock.patch`` arguments.
+ * Fixture arguments, alphabetically. These are not provided by a
+ decorator, so they are last, and their order has no defined
+ meaning, so we default to alphabetical.
+
+* It follows from this ordering of test arguments (so that we retain
+ the property that arguments left-to-right correspond to decorators
+ bottom-to-top) that test decorators should be ordered as follows:
+
+ * ``pytest.mark.parametrize``
+ * ``mock.patch``
+
+.. [#fixture-list] This list of fixtures (with markup) can be
+ reproduced by running::
+
+ python3 -m pytest --fixtures -q | grep "^[^ -]" | grep -v 'no tests ran in' | sort | sed 's/ \[session scope\]//g;s/.*/* ``\0``/g'
+
+ in an ubuntu lxd container with python3-pytest installed.
+
+.. _pytest: https://docs.pytest.org/
+.. _pytest fixtures: https://docs.pytest.org/en/latest/fixture.html
+.. _TestGetPackageMirrorInfo: https://github.com/canonical/cloud-init/blob/42f69f410ab8850c02b1f53dd67c132aa8ef64f5/cloudinit/distros/tests/test_init.py\#L15
+.. _TestPrependBaseCommands: https://github.com/canonical/cloud-init/blob/fbcb224bc12495ba200ab107246349d802c5d8e6/cloudinit/tests/test_subp.py#L20
+.. _assertion introspection: https://docs.pytest.org/en/latest/assert.html
+.. _pytest 3.0: https://docs.pytest.org/en/latest/changelog.html#id1093
+.. _pytest.param: https://docs.pytest.org/en/6.2.x/reference.html#pytest-param
+.. _autospecced: https://docs.python.org/3.8/library/unittest.mock.html#autospeccing
diff --git a/doc/rtd/topics/vendordata.rst b/doc/rtd/topics/vendordata.rst
index cdb552d0..e659c941 100644
--- a/doc/rtd/topics/vendordata.rst
+++ b/doc/rtd/topics/vendordata.rst
@@ -47,8 +47,8 @@ way as user-data.
The only differences are:
- * user-scripts are stored in a different location than user-scripts (to
- avoid namespace collision)
+ * vendor-data-defined scripts are stored in a different location than
+ user-data-defined scripts (to avoid namespace collision)
* user can disable part handlers by cloud-config settings.
For example, to disable handling of 'part-handlers' in vendor-data,
the user could provide user-data like this:
@@ -68,4 +68,4 @@ of input files. That data can then be given to an instance.
See 'write-mime-multipart --help' for usage.
-.. vi: textwidth=78
+.. vi: textwidth=79