summaryrefslogtreecommitdiff
path: root/doc
diff options
context:
space:
mode:
authorVlastimil Holer <vlastimil.holer@gmail.com>2013-09-05 13:11:09 +0200
committerVlastimil Holer <vlastimil.holer@gmail.com>2013-09-05 13:11:09 +0200
commit744c779182cba32314f8435660a61c2711cb9f54 (patch)
tree7871342bf0b122217b51493286bac982313b48da /doc
parent8a2a88e0bb4520eabe99b6686413a548f3d59652 (diff)
parent1d27cd75eaaeef7b72f3be77de24da815c82a825 (diff)
downloadvyos-cloud-init-744c779182cba32314f8435660a61c2711cb9f54.tar.gz
vyos-cloud-init-744c779182cba32314f8435660a61c2711cb9f54.zip
Merged trunk lp:cloud-init
Diffstat (limited to 'doc')
-rw-r--r--doc/examples/cloud-config-TODO.txt20
-rw-r--r--doc/examples/cloud-config-datasources.txt23
-rw-r--r--doc/examples/cloud-config-growpart.txt24
-rw-r--r--doc/examples/cloud-config.txt21
-rw-r--r--doc/merging.rst188
-rw-r--r--doc/rtd/conf.py8
-rw-r--r--doc/rtd/index.rst1
-rw-r--r--[-rwxr-xr-x]doc/rtd/static/logo.svg0
-rw-r--r--doc/rtd/topics/merging.rst5
-rw-r--r--doc/sources/azure/README.rst134
-rw-r--r--doc/sources/smartos/README.rst72
11 files changed, 472 insertions, 24 deletions
diff --git a/doc/examples/cloud-config-TODO.txt b/doc/examples/cloud-config-TODO.txt
deleted file mode 100644
index c7ed54ab..00000000
--- a/doc/examples/cloud-config-TODO.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-# Add apt configuration files
-# Add an apt.conf.d/ file with the relevant content
-#
-# See apt.conf man page for more information.
-#
-# Defaults:
-# + filename: 00-boot-conf
-#
-apt_conf:
-
- # Creates an apt proxy configuration in /etc/apt/apt.conf.d/01-proxy
- - filename: "01-proxy"
- content: |
- Acquire::http::Proxy "http://proxy.example.org:3142/ubuntu";
-
- # Add the following line to /etc/apt/apt.conf.d/00-boot-conf
- # (run debconf at a critical priority)
- - content: |
- DPkg::Pre-Install-Pkgs:: "/usr/sbin/dpkg-preconfigure --apt -p critical|| true";
-
diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt
index fc8c22d4..65a3cdf5 100644
--- a/doc/examples/cloud-config-datasources.txt
+++ b/doc/examples/cloud-config-datasources.txt
@@ -42,3 +42,26 @@ datasource:
meta-data:
instance-id: i-87018aed
local-hostname: myhost.internal
+
+ Azure:
+ agent_command: [service, walinuxagent, start]
+ set_hostname: True
+ hostname_bounce:
+ interface: eth0
+ policy: on # [can be 'on', 'off' or 'force']
+ }
+
+ SmartOS:
+ # Smart OS datasource works over a serial console interacting with
+ # a server on the other end. By default, the second serial console is the
+ # device. SmartOS also uses a serial timeout of 60 seconds.
+ serial_device: /dev/ttyS1
+ serial_timeout: 60
+
+ # a list of keys that will not be base64 decoded even if base64_all
+ no_base64_decode: ['root_authorized_keys', 'motd_sys_info',
+ 'iptables_disable']
+ # a plaintext, comma delimited list of keys whose values are b64 encoded
+ base64_keys: []
+ # a boolean indicating that all keys not in 'no_base64_decode' are encoded
+ base64_all: False
diff --git a/doc/examples/cloud-config-growpart.txt b/doc/examples/cloud-config-growpart.txt
new file mode 100644
index 00000000..705f02c2
--- /dev/null
+++ b/doc/examples/cloud-config-growpart.txt
@@ -0,0 +1,24 @@
+#cloud-config
+#
+# growpart entry is a dict, if it is not present at all
+# in config, then the default is used ({'mode': 'auto', 'devices': ['/']})
+#
+# mode:
+# values:
+# * auto: use any option possible (growpart or parted)
+# if none are available, do not warn, but debug.
+# * growpart: use growpart to grow partitions
+# if growpart is not available, this is an error.
+# * parted: use parted (parted resizepart) to resize partitions
+# if parted is not available, this is an error.
+# * off, false
+#
+# devices:
+# a list of things to resize.
+# items can be filesystem paths or devices (in /dev)
+# examples:
+# devices: [/, /dev/vdb1]
+#
+growpart:
+ mode: auto
+ devices: ['/']
diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt
index 09298655..bcfd7917 100644
--- a/doc/examples/cloud-config.txt
+++ b/doc/examples/cloud-config.txt
@@ -53,6 +53,9 @@ apt_mirror_search:
apt_mirror_search_dns: False
# apt_proxy (configure Acquire::HTTP::Proxy)
+# 'apt_http_proxy' is an alias for 'apt_proxy'.
+# Also, available are 'apt_ftp_proxy' and 'apt_https_proxy'.
+# These affect Acquire::FTP::Proxy and Acquire::HTTPS::Proxy respectively
apt_proxy: http://my.apt.proxy:3128
# apt_pipelining (configure Acquire::http::Pipeline-Depth)
@@ -125,6 +128,24 @@ apt_sources:
=Y2oI
-----END PGP PUBLIC KEY BLOCK-----
+## apt config via system_info:
+# under the 'system_info', you can further customize cloud-init's interaction
+# with apt.
+# system_info:
+# apt_get_command: [command, argument, argument]
+# apt_get_upgrade_subcommand: dist-upgrade
+#
+# apt_get_command:
+# To specify a different 'apt-get' command, set 'apt_get_command'.
+# This must be a list, and the subcommand (update, upgrade) is appended to it.
+# default is:
+# ['apt-get', '--option=Dpkg::Options::=--force-confold',
+# '--option=Dpkg::options::=--force-unsafe-io', '--assume-yes', '--quiet']
+#
+# apt_get_upgrade_subcommand:
+# Specify a different subcommand for 'upgrade. The default is 'dist-upgrade'.
+# This is the subcommand that is invoked if package_upgrade is set to true above.
+
# Install additional packages on first boot
#
# Default: none
diff --git a/doc/merging.rst b/doc/merging.rst
new file mode 100644
index 00000000..d4d5cd05
--- /dev/null
+++ b/doc/merging.rst
@@ -0,0 +1,188 @@
+Overview
+--------
+
+This was done because it has been a common feature request that there be a
+way to specify how cloud-config yaml "dictionaries" are merged together when
+there are multiple yamls to merge together (say when performing an #include).
+
+Since previously the merging algorithm was very simple and would only overwrite
+and not append lists, or strings, and so on it was decided to create a new and
+improved way to merge dictionaries (and there contained objects) together in a
+way that is customizable, thus allowing for users who provide cloud-config data
+to determine exactly how there objects will be merged.
+
+For example.
+
+.. code-block:: yaml
+
+ #cloud-config (1)
+ run_cmd:
+ - bash1
+ - bash2
+
+ #cloud-config (2)
+ run_cmd:
+ - bash3
+ - bash4
+
+The previous way of merging the following 2 objects would result in a final
+cloud-config object that contains the following.
+
+.. code-block:: yaml
+
+ #cloud-config (merged)
+ run_cmd:
+ - bash3
+ - bash4
+
+Typically this is not what users want, instead they would likely prefer:
+
+.. code-block:: yaml
+
+ #cloud-config (merged)
+ run_cmd:
+ - bash1
+ - bash2
+ - bash3
+ - bash4
+
+This way makes it easier to combine the various cloud-config objects you have
+into a more useful list, thus reducing duplication that would have had to
+occur in the previous method to accomplish the same result.
+
+Customizability
+---------------
+
+Since the above merging algorithm may not always be the desired merging
+algorithm (like how the previous merging algorithm was not always the preferred
+one) the concept of customizing how merging can be done was introduced through
+a new concept call 'merge classes'.
+
+A merge class is a class defintion which provides functions that can be used
+to merge a given type with another given type.
+
+An example of one of these merging classes is the following:
+
+.. code-block:: python
+
+ class Merger(object):
+ def __init__(self, merger, opts):
+ self._merger = merger
+ self._overwrite = 'overwrite' in opts
+
+ # This merging algorithm will attempt to merge with
+ # another dictionary, on encountering any other type of object
+ # it will not merge with said object, but will instead return
+ # the original value
+ #
+ # On encountering a dictionary, it will create a new dictionary
+ # composed of the original and the one to merge with, if 'overwrite'
+ # is enabled then keys that exist in the original will be overwritten
+ # by keys in the one to merge with (and associated values). Otherwise
+ # if not in overwrite mode the 2 conflicting keys themselves will
+ # be merged.
+ def _on_dict(self, value, merge_with):
+ if not isinstance(merge_with, (dict)):
+ return value
+ merged = dict(value)
+ for (k, v) in merge_with.items():
+ if k in merged:
+ if not self._overwrite:
+ merged[k] = self._merger.merge(merged[k], v)
+ else:
+ merged[k] = v
+ else:
+ merged[k] = v
+ return merged
+
+As you can see there is a '_on_dict' method here that will be given a source value
+and a value to merge with. The result will be the merged object. This code itself
+is called by another merging class which 'directs' the merging to happen by
+analyzing the types of the objects to merge and attempting to find a know object
+that will merge that type. I will avoid pasting that here, but it can be found
+in the `mergers/__init__.py` file (see `LookupMerger` and `UnknownMerger`).
+
+So following the typical cloud-init way of allowing source code to be downloaded
+and used dynamically, it is possible for users to inject there own merging files
+to handle specific types of merging as they choose (the basic ones included will
+handle lists, dicts, and strings). Note how each merge can have options associated
+with it which affect how the merging is performed, for example a dictionary merger
+can be told to overwrite instead of attempt to merge, or a string merger can be
+told to append strings instead of discarding other strings to merge with.
+
+How to activate
+---------------
+
+There are a few ways to activate the merging algorithms, and to customize them
+for your own usage.
+
+1. The first way involves the usage of MIME messages in cloud-init to specify
+ multipart documents (this is one way in which multiple cloud-config is joined
+ together into a single cloud-config). Two new headers are looked for, both
+ of which can define the way merging is done (the first header to exist wins).
+ These new headers (in lookup order) are 'Merge-Type' and 'X-Merge-Type'. The value
+ should be a string which will satisfy the new merging format defintion (see
+ below for this format).
+2. The second way is actually specifying the merge-type in the body of the
+ cloud-config dictionary. There are 2 ways to specify this, either as a string
+ or as a dictionary (see format below). The keys that are looked up for this
+ definition are the following (in order), 'merge_how', 'merge_type'.
+
+String format
+********
+
+The string format that is expected is the following.
+
+::
+
+ classname1(option1,option2)+classname2(option3,option4)....
+
+The class name there will be connected to class names used when looking for the
+class that can be used to merge and options provided will be given to the class
+on construction of that class.
+
+For example, the default string that is used when none is provided is the following:
+
+::
+
+ list()+dict()+str()
+
+Dictionary format
+********
+
+In cases where a dictionary can be used to specify the same information as the
+string format (ie option #2 of above) it can be used, for example.
+
+.. code-block:: python
+
+ {'merge_how': [{'name': 'list', 'settings': ['extend']},
+ {'name': 'dict', 'settings': []},
+ {'name': 'str', 'settings': ['append']}]}
+
+This would be the equivalent format for default string format but in dictionary
+form instead of string form.
+
+Specifying multiple types and its effect
+----------------------------------------
+
+Now you may be asking yourself, if I specify a merge-type header or dictionary
+for every cloud-config that I provide, what exactly happens?
+
+The answer is that when merging, a stack of 'merging classes' is kept, the
+first one on that stack is the default merging classes, this set of mergers
+will be used when the first cloud-config is merged with the initial empty
+cloud-config dictionary. If the cloud-config that was just merged provided a
+set of merging classes (via the above formats) then those merging classes will
+be pushed onto the stack. Now if there is a second cloud-config to be merged then
+the merging classes from the cloud-config before the first will be used (not the
+default) and so on. This way a cloud-config can decide how it will merge with a
+cloud-config dictionary coming after it.
+
+Other uses
+----------
+
+The default merging algorithm for merging 'conf.d' yaml files (which form a initial
+yaml config for cloud-init) was also changed to use this mechanism so its full
+benefits (and customization) can also be used there as well. Other places that
+used the previous merging are also similar now extensible (metadata merging for
+example).
diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py
index 87fc40ab..c9ae79f4 100644
--- a/doc/rtd/conf.py
+++ b/doc/rtd/conf.py
@@ -17,13 +17,13 @@ from cloudinit import version
# General information about the project.
project = 'Cloud-Init'
-# -- General configuration -----------------------------------------------------
+# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
-# Add any Sphinx extension module names here, as strings. They can be extensions
-# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.intersphinx',
]
@@ -55,7 +55,7 @@ exclude_patterns = []
# output. They are ignored by default.
show_authors = False
-# -- Options for HTML output ---------------------------------------------------
+# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst
index 619bb5dc..fe04b1a9 100644
--- a/doc/rtd/index.rst
+++ b/doc/rtd/index.rst
@@ -24,6 +24,7 @@ Summary
topics/examples
topics/datasources
topics/modules
+ topics/merging
topics/moreinfo
topics/hacking
diff --git a/doc/rtd/static/logo.svg b/doc/rtd/static/logo.svg
index b22ce2a0..b22ce2a0 100755..100644
--- a/doc/rtd/static/logo.svg
+++ b/doc/rtd/static/logo.svg
diff --git a/doc/rtd/topics/merging.rst b/doc/rtd/topics/merging.rst
new file mode 100644
index 00000000..8a03f3c7
--- /dev/null
+++ b/doc/rtd/topics/merging.rst
@@ -0,0 +1,5 @@
+=========
+Merging
+=========
+
+.. include:: ../../merging.rst
diff --git a/doc/sources/azure/README.rst b/doc/sources/azure/README.rst
new file mode 100644
index 00000000..8239d1fa
--- /dev/null
+++ b/doc/sources/azure/README.rst
@@ -0,0 +1,134 @@
+================
+Azure Datasource
+================
+
+This datasource finds metadata and user-data from the Azure cloud platform.
+
+Azure Platform
+--------------
+The azure cloud-platform provides initial data to an instance via an attached
+CD formated in UDF. That CD contains a 'ovf-env.xml' file that provides some
+information. Additional information is obtained via interaction with the
+"endpoint". The ip address of the endpoint is advertised to the instance
+inside of dhcp option 245. On ubuntu, that can be seen in
+/var/lib/dhcp/dhclient.eth0.leases as a colon delimited hex value (example:
+``option unknown-245 64:41:60:82;`` is 100.65.96.130)
+
+walinuxagent
+------------
+In order to operate correctly, cloud-init needs walinuxagent to provide much
+of the interaction with azure. In addition to "provisioning" code, walinux
+does the following on the agent is a long running daemon that handles the
+following things:
+- generate a x509 certificate and send that to the endpoint
+
+waagent.conf config
+~~~~~~~~~~~~~~~~~~~
+in order to use waagent.conf with cloud-init, the following settings are recommended. Other values can be changed or set to the defaults.
+
+ ::
+
+ # disabling provisioning turns off all 'Provisioning.*' function
+ Provisioning.Enabled=n
+ # this is currently not handled by cloud-init, so let walinuxagent do it.
+ ResourceDisk.Format=y
+ ResourceDisk.MountPoint=/mnt
+
+
+Userdata
+--------
+Userdata is provided to cloud-init inside the ovf-env.xml file. Cloud-init
+expects that user-data will be provided as base64 encoded value inside the
+text child of a element named ``UserData`` or ``CustomData`` which is a direct
+child of the ``LinuxProvisioningConfigurationSet`` (a sibling to ``UserName``)
+If both ``UserData`` and ``CustomData`` are provided behavior is undefined on
+which will be selected.
+
+In the example below, user-data provided is 'this is my userdata', and the
+datasource config provided is ``{"agent_command": ["start", "walinuxagent"]}``.
+That agent command will take affect as if it were specified in system config.
+
+Example:
+
+.. code::
+
+ <wa:ProvisioningSection>
+ <wa:Version>1.0</wa:Version>
+ <LinuxProvisioningConfigurationSet
+ xmlns="http://schemas.microsoft.com/windowsazure"
+ xmlns:i="http://www.w3.org/2001/XMLSchema-instance">
+ <ConfigurationSetType>LinuxProvisioningConfiguration</ConfigurationSetType>
+ <HostName>myHost</HostName>
+ <UserName>myuser</UserName>
+ <UserPassword/>
+ <CustomData>dGhpcyBpcyBteSB1c2VyZGF0YQ===</CustomData>
+ <dscfg>eyJhZ2VudF9jb21tYW5kIjogWyJzdGFydCIsICJ3YWxpbnV4YWdlbnQiXX0=</dscfg>
+ <DisableSshPasswordAuthentication>true</DisableSshPasswordAuthentication>
+ <SSH>
+ <PublicKeys>
+ <PublicKey>
+ <Fingerprint>6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7</Fingerprint>
+ <Path>this-value-unused</Path>
+ </PublicKey>
+ </PublicKeys>
+ </SSH>
+ </LinuxProvisioningConfigurationSet>
+ </wa:ProvisioningSection>
+
+Configuration
+-------------
+Configuration for the datasource can be read from the system config's or set
+via the `dscfg` entry in the `LinuxProvisioningConfigurationSet`. Content in
+dscfg node is expected to be base64 encoded yaml content, and it will be
+merged into the 'datasource: Azure' entry.
+
+The '``hostname_bounce: command``' entry can be either the literal string
+'builtin' or a command to execute. The command will be invoked after the
+hostname is set, and will have the 'interface' in its environment. If
+``set_hostname`` is not true, then ``hostname_bounce`` will be ignored.
+
+An example might be:
+ command: ["sh", "-c", "killall dhclient; dhclient $interface"]
+
+.. code::
+
+ datasource:
+ agent_command
+ Azure:
+ agent_command: [service, walinuxagent, start]
+ set_hostname: True
+ hostname_bounce:
+ # the name of the interface to bounce
+ interface: eth0
+ # policy can be 'on', 'off' or 'force'
+ policy: on
+ # the method 'bounce' command.
+ command: "builtin"
+ hostname_command: "hostname"
+ }
+
+hostname
+--------
+When the user launches an instance, they provide a hostname for that instance.
+The hostname is provided to the instance in the ovf-env.xml file as
+``HostName``.
+
+Whatever value the instance provides in its dhcp request will resolve in the
+domain returned in the 'search' request.
+
+The interesting issue is that a generic image will already have a hostname
+configured. The ubuntu cloud images have 'ubuntu' as the hostname of the
+system, and the initial dhcp request on eth0 is not guaranteed to occur after
+the datasource code has been run. So, on first boot, that initial value will
+be sent in the dhcp request and *that* value will resolve.
+
+In order to make the ``HostName`` provided in the ovf-env.xml resolve, a
+dhcp request must be made with the new value. Walinuxagent (in its current
+version) handles this by polling the state of hostname and bouncing ('``ifdown
+eth0; ifup eth0``' the network interface if it sees that a change has been
+made.
+
+cloud-init handles this by setting the hostname in the DataSource's 'get_data'
+method via '``hostname $HostName``', and then bouncing the interface. This
+behavior can be configured or disabled in the datasource config. See
+'Configuration' above.
diff --git a/doc/sources/smartos/README.rst b/doc/sources/smartos/README.rst
new file mode 100644
index 00000000..fd4e496d
--- /dev/null
+++ b/doc/sources/smartos/README.rst
@@ -0,0 +1,72 @@
+==================
+SmartOS Datasource
+==================
+
+This datasource finds metadata and user-data from the SmartOS virtualization
+platform (i.e. Joyent).
+
+SmartOS Platform
+----------------
+The SmartOS virtualization platform meta-data to the instance via the second
+serial console. On Linux, this is /dev/ttyS1. The data is a provided via a
+simple protocol, where something queries for the userdata, where the console
+responds with the status and if "SUCCESS" returns until a single ".\n".
+
+New versions of the SmartOS tooling will include support for base64 encoded data.
+
+Userdata
+--------
+
+In SmartOS parlance, user-data is a actually meta-data. This userdata can be
+provided a key-value pairs.
+
+Cloud-init supports reading the traditional meta-data fields supported by the
+SmartOS tools. These are:
+ * root_authorized_keys
+ * hostname
+ * enable_motd_sys_info
+ * iptables_disable
+
+Note: At this time iptables_disable and enable_motd_sys_info are read but
+ are not actioned.
+
+user-script
+-----------
+
+SmartOS traditionally supports sending over a user-script for execution at the
+rc.local level. Cloud-init supports running user-scripts as if they were
+cloud-init user-data. In this sense, anything with a shell interpreter
+directive will run
+
+user-data and user-script
+-------------------------
+
+In the event that a user defines the meta-data key of "user-data" it will
+always supercede any user-script data. This is for consistency.
+
+base64
+------
+
+The following are exempt from base64 encoding, owing to the fact that they
+are provided by SmartOS:
+ * root_authorized_keys
+ * enable_motd_sys_info
+ * iptables_disable
+
+This list can be changed through system config of variable 'no_base64_decode'.
+
+This means that user-script and user-data as well as other values can be
+base64 encoded. Since Cloud-init can only guess as to whether or not something
+is truly base64 encoded, the following meta-data keys are hints as to whether
+or not to base64 decode something:
+ * base64_all: Except for excluded keys, attempt to base64 decode
+ the values. If the value fails to decode properly, it will be
+ returned in its text
+ * base64_keys: A comma deliminated list of which keys are base64 encoded.
+ * b64-<key>:
+ for any key, if there exists an entry in the metadata for 'b64-<key>'
+ Then 'b64-<key>' is expected to be a plaintext boolean indicating whether
+ or not its value is encoded.
+ * no_base64_decode: This is a configuration setting
+ (i.e. /etc/cloud/cloud.cfg.d) that sets which values should not be
+ base64 decoded.