summaryrefslogtreecommitdiff
path: root/doc
diff options
context:
space:
mode:
authorzsdc <taras@vyos.io>2020-09-15 17:05:20 +0300
committerzsdc <taras@vyos.io>2020-09-15 17:05:20 +0300
commit7cd260b313267dc7123cb99a75d4555e24909cca (patch)
treef57f3db085a724df237ffa64b589c6bb6dd3b28f /doc
parent1a790ee102fd405e5c3a20a17a69ba0c118ed874 (diff)
parent948bd9c1fcd08346cf8ec0551d7f6c2b234e896b (diff)
downloadvyos-cloud-init-7cd260b313267dc7123cb99a75d4555e24909cca.tar.gz
vyos-cloud-init-7cd260b313267dc7123cb99a75d4555e24909cca.zip
T2117: Cloud-init updated to 20.3
Merged with 20.3 tag from the upstream Cloud-init repository
Diffstat (limited to 'doc')
-rw-r--r--doc/examples/cloud-config-apt.txt43
-rw-r--r--doc/examples/cloud-config-boot-cmds.txt4
-rw-r--r--doc/examples/cloud-config-chef-oneiric.txt115
-rw-r--r--doc/examples/cloud-config-chef.txt87
-rw-r--r--doc/examples/cloud-config-datasources.txt8
-rw-r--r--doc/examples/cloud-config-disk-setup.txt331
-rw-r--r--doc/examples/cloud-config-landscape.txt1
-rw-r--r--doc/examples/cloud-config-mcollective.txt82
-rw-r--r--doc/examples/cloud-config-mount-points.txt6
-rw-r--r--doc/examples/cloud-config-phone-home.txt10
-rw-r--r--doc/examples/cloud-config-power-state.txt10
-rw-r--r--doc/examples/cloud-config-puppet.txt88
-rw-r--r--doc/examples/cloud-config-reporting.txt22
-rw-r--r--doc/examples/cloud-config-rh_subscription.txt48
-rw-r--r--doc/examples/cloud-config-rsyslog.txt43
-rw-r--r--doc/examples/cloud-config-user-groups.txt29
-rw-r--r--doc/examples/cloud-config-vendor-data.txt4
-rw-r--r--doc/examples/cloud-config-write-files.txt42
-rw-r--r--doc/examples/cloud-config-yum-repo.txt24
-rw-r--r--doc/examples/cloud-config.txt55
-rw-r--r--doc/examples/kernel-cmdline.txt13
-rw-r--r--doc/rtd/conf.py2
-rw-r--r--doc/rtd/index.rst1
-rw-r--r--doc/rtd/topics/availability.rst19
-rw-r--r--doc/rtd/topics/boot.rst4
-rw-r--r--doc/rtd/topics/cli.rst16
-rw-r--r--doc/rtd/topics/code_review.rst256
-rw-r--r--doc/rtd/topics/datasources/azure.rst16
-rw-r--r--doc/rtd/topics/datasources/cloudstack.rst26
-rw-r--r--doc/rtd/topics/datasources/ec2.rst27
-rw-r--r--doc/rtd/topics/datasources/maas.rst2
-rw-r--r--doc/rtd/topics/datasources/nocloud.rst20
-rw-r--r--doc/rtd/topics/datasources/openstack.rst17
-rw-r--r--doc/rtd/topics/datasources/ovf.rst19
-rw-r--r--doc/rtd/topics/faq.rst21
-rw-r--r--doc/rtd/topics/format.rst55
-rw-r--r--doc/rtd/topics/instancedata.rst363
-rw-r--r--doc/rtd/topics/modules.rst1
-rw-r--r--doc/rtd/topics/network-config-format-v2.rst10
-rw-r--r--doc/rtd/topics/network-config.rst26
-rw-r--r--doc/rtd/topics/tests.rst66
41 files changed, 1265 insertions, 767 deletions
diff --git a/doc/examples/cloud-config-apt.txt b/doc/examples/cloud-config-apt.txt
index ff8206f6..004894b7 100644
--- a/doc/examples/cloud-config-apt.txt
+++ b/doc/examples/cloud-config-apt.txt
@@ -1,3 +1,4 @@
+#cloud-config
# apt_pipelining (configure Acquire::http::Pipeline-Depth)
# Default: disables HTTP pipelining. Certain web servers, such
# as S3 do not pipeline properly (LP: #948461).
@@ -141,7 +142,7 @@ apt:
# as above, allowing to have one config for different per arch mirrors
# security is optional, if not defined it is set to the same value as primary
security:
- uri: http://security.ubuntu.com/ubuntu
+ - uri: http://security.ubuntu.com/ubuntu
# If search_dns is set for security the searched pattern is:
# <distro>-security-mirror
@@ -222,19 +223,19 @@ apt:
# This allows merging between multiple input files than a list like:
# cloud-config1
# sources:
- # s1: {'key': 'key1', 'source': 'source1'}
+ # s1: {'key': 'key1', 'source': 'source1'}
# cloud-config2
# sources:
- # s2: {'key': 'key2'}
- # s1: {'keyserver': 'foo'}
+ # s2: {'key': 'key2'}
+ # s1: {'keyserver': 'foo'}
# This would be merged to
# sources:
- # s1:
- # keyserver: foo
- # key: key1
- # source: source1
- # s2:
- # key: key2
+ # s1:
+ # keyserver: foo
+ # key: key1
+ # source: source1
+ # s2:
+ # key: key2
#
# The following examples number the subfeatures per sources entry to ease
# identification in discussions.
@@ -314,15 +315,15 @@ apt:
# As with keyid's this can be specified with or without some actual source
# content.
key: | # The value needs to start with -----BEGIN PGP PUBLIC KEY BLOCK-----
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- Version: SKS 1.0.10
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+ Version: SKS 1.0.10
- mI0ESpA3UQEEALdZKVIMq0j6qWAXAyxSlF63SvPVIgxHPb9Nk0DZUixn+akqytxG4zKCONz6
- qLjoBBfHnynyVLfT4ihg9an1PqxRnTO+JKQxl8NgKGz6Pon569GtAOdWNKw15XKinJTDLjnj
- 9y96ljJqRcpV9t/WsIcdJPcKFR5voHTEoABE2aEXABEBAAG0GUxhdW5jaHBhZCBQUEEgZm9y
- IEFsZXN0aWOItgQTAQIAIAUCSpA3UQIbAwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJEA7H
- 5Qi+CcVxWZ8D/1MyYvfj3FJPZUm2Yo1zZsQ657vHI9+pPouqflWOayRR9jbiyUFIn0VdQBrP
- t0FwvnOFArUovUWoKAEdqR8hPy3M3APUZjl5K4cMZR/xaMQeQRZ5CHpS4DBKURKAHC0ltS5o
- uBJKQOZm5iltJp15cgyIkBkGe8Mx18VFyVglAZey
- =Y2oI
- -----END PGP PUBLIC KEY BLOCK-----
+ mI0ESpA3UQEEALdZKVIMq0j6qWAXAyxSlF63SvPVIgxHPb9Nk0DZUixn+akqytxG4zKCONz6
+ qLjoBBfHnynyVLfT4ihg9an1PqxRnTO+JKQxl8NgKGz6Pon569GtAOdWNKw15XKinJTDLjnj
+ 9y96ljJqRcpV9t/WsIcdJPcKFR5voHTEoABE2aEXABEBAAG0GUxhdW5jaHBhZCBQUEEgZm9y
+ IEFsZXN0aWOItgQTAQIAIAUCSpA3UQIbAwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJEA7H
+ 5Qi+CcVxWZ8D/1MyYvfj3FJPZUm2Yo1zZsQ657vHI9+pPouqflWOayRR9jbiyUFIn0VdQBrP
+ t0FwvnOFArUovUWoKAEdqR8hPy3M3APUZjl5K4cMZR/xaMQeQRZ5CHpS4DBKURKAHC0ltS5o
+ uBJKQOZm5iltJp15cgyIkBkGe8Mx18VFyVglAZey
+ =Y2oI
+ -----END PGP PUBLIC KEY BLOCK-----
diff --git a/doc/examples/cloud-config-boot-cmds.txt b/doc/examples/cloud-config-boot-cmds.txt
index 84e487a5..f9357b52 100644
--- a/doc/examples/cloud-config-boot-cmds.txt
+++ b/doc/examples/cloud-config-boot-cmds.txt
@@ -11,5 +11,5 @@
# - the INSTANCE_ID variable will be set to the current instance id.
# - you can use 'cloud-init-per' command to help only run once
bootcmd:
- - echo 192.168.1.130 us.archive.ubuntu.com >> /etc/hosts
- - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ]
+ - echo 192.168.1.130 us.archive.ubuntu.com >> /etc/hosts
+ - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ]
diff --git a/doc/examples/cloud-config-chef-oneiric.txt b/doc/examples/cloud-config-chef-oneiric.txt
index 75c9aeed..241fbf9b 100644
--- a/doc/examples/cloud-config-chef-oneiric.txt
+++ b/doc/examples/cloud-config-chef-oneiric.txt
@@ -13,73 +13,74 @@
# Key from http://apt.opscode.com/packages@opscode.com.gpg.key
apt:
sources:
- - source: "deb http://apt.opscode.com/ $RELEASE-0.10 main"
- key: |
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- Version: GnuPG v1.4.9 (GNU/Linux)
+ source1:
+ source: "deb http://apt.opscode.com/ $RELEASE-0.10 main"
+ key: |
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+ Version: GnuPG v1.4.9 (GNU/Linux)
- mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
- twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
- dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
- JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
- ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
- XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
- DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
- sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
- Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
- YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
- CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
- +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu5Ag0ESmkLtBAIAIO2SwlR
- lU5i6gTOp42RHWW7/pmW78CwUqJnYqnXROrt3h9F9xrsGkH0Fh1FRtsnncgzIhvh
- DLQnRHnkXm0ws0jV0PF74ttoUT6BLAUsFi2SPP1zYNJ9H9fhhK/pjijtAcQwdgxu
- wwNJ5xCEscBZCjhSRXm0d30bK1o49Cow8ZIbHtnXVP41c9QWOzX/LaGZsKQZnaMx
- EzDk8dyyctR2f03vRSVyTFGgdpUcpbr9eTFVgikCa6ODEBv+0BnCH6yGTXwBid9g
- w0o1e/2DviKUWCC+AlAUOubLmOIGFBuI4UR+rux9affbHcLIOTiKQXv79lW3P7W8
- AAfniSQKfPWXrrcAAwUH/2XBqD4Uxhbs25HDUUiM/m6Gnlj6EsStg8n0nMggLhuN
- QmPfoNByMPUqvA7sULyfr6xCYzbzRNxABHSpf85FzGQ29RF4xsA4vOOU8RDIYQ9X
- Q8NqqR6pydprRFqWe47hsAN7BoYuhWqTtOLSBmnAnzTR5pURoqcquWYiiEavZixJ
- 3ZRAq/HMGioJEtMFrvsZjGXuzef7f0ytfR1zYeLVWnL9Bd32CueBlI7dhYwkFe+V
- Ep5jWOCj02C1wHcwt+uIRDJV6TdtbIiBYAdOMPk15+VBdweBXwMuYXr76+A7VeDL
- zIhi7tKFo6WiwjKZq0dzctsJJjtIfr4K4vbiD9Ojg1iISQQYEQIACQUCSmkLtAIb
- DAAKCRApQKupg++CauISAJ9CxYPOKhOxalBnVTLeNUkAHGg2gACeIsbobtaD4ZHG
- 0GLl8EkfA8uhluM=
- =zKAm
- -----END PGP PUBLIC KEY BLOCK-----
+ mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu
+ twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99
+ dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC
+ JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W
+ ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I
+ XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe
+ DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm
+ sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO
+ Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ
+ YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG
+ CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K
+ +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu5Ag0ESmkLtBAIAIO2SwlR
+ lU5i6gTOp42RHWW7/pmW78CwUqJnYqnXROrt3h9F9xrsGkH0Fh1FRtsnncgzIhvh
+ DLQnRHnkXm0ws0jV0PF74ttoUT6BLAUsFi2SPP1zYNJ9H9fhhK/pjijtAcQwdgxu
+ wwNJ5xCEscBZCjhSRXm0d30bK1o49Cow8ZIbHtnXVP41c9QWOzX/LaGZsKQZnaMx
+ EzDk8dyyctR2f03vRSVyTFGgdpUcpbr9eTFVgikCa6ODEBv+0BnCH6yGTXwBid9g
+ w0o1e/2DviKUWCC+AlAUOubLmOIGFBuI4UR+rux9affbHcLIOTiKQXv79lW3P7W8
+ AAfniSQKfPWXrrcAAwUH/2XBqD4Uxhbs25HDUUiM/m6Gnlj6EsStg8n0nMggLhuN
+ QmPfoNByMPUqvA7sULyfr6xCYzbzRNxABHSpf85FzGQ29RF4xsA4vOOU8RDIYQ9X
+ Q8NqqR6pydprRFqWe47hsAN7BoYuhWqTtOLSBmnAnzTR5pURoqcquWYiiEavZixJ
+ 3ZRAq/HMGioJEtMFrvsZjGXuzef7f0ytfR1zYeLVWnL9Bd32CueBlI7dhYwkFe+V
+ Ep5jWOCj02C1wHcwt+uIRDJV6TdtbIiBYAdOMPk15+VBdweBXwMuYXr76+A7VeDL
+ zIhi7tKFo6WiwjKZq0dzctsJJjtIfr4K4vbiD9Ojg1iISQQYEQIACQUCSmkLtAIb
+ DAAKCRApQKupg++CauISAJ9CxYPOKhOxalBnVTLeNUkAHGg2gACeIsbobtaD4ZHG
+ 0GLl8EkfA8uhluM=
+ =zKAm
+ -----END PGP PUBLIC KEY BLOCK-----
chef:
- # 11.10 will fail if install_type is "gems" (LP: #960576)
- install_type: "packages"
+ # 11.10 will fail if install_type is "gems" (LP: #960576)
+ install_type: "packages"
- # Chef settings
- server_url: "https://chef.yourorg.com:4000"
+ # Chef settings
+ server_url: "https://chef.yourorg.com:4000"
- # Node Name
- # Defaults to the instance-id if not present
- node_name: "your-node-name"
+ # Node Name
+ # Defaults to the instance-id if not present
+ node_name: "your-node-name"
- # Environment
- # Defaults to '_default' if not present
- environment: "production"
+ # Environment
+ # Defaults to '_default' if not present
+ environment: "production"
- # Default validation name is chef-validator
- validation_name: "yourorg-validator"
+ # Default validation name is chef-validator
+ validation_name: "yourorg-validator"
- # value of validation_cert is not used if validation_key defined,
- # but variable needs to be defined (LP: #960547)
- validation_cert: "unused"
- validation_key: |
- -----BEGIN RSA PRIVATE KEY-----
- YOUR-ORGS-VALIDATION-KEY-HERE
- -----END RSA PRIVATE KEY-----
-
- # A run list for a first boot json
- run_list:
- - "recipe[apache2]"
- - "role[db]"
+ # value of validation_cert is not used if validation_key defined,
+ # but variable needs to be defined (LP: #960547)
+ validation_cert: "unused"
+ validation_key: |
+ -----BEGIN RSA PRIVATE KEY-----
+ YOUR-ORGS-VALIDATION-KEY-HERE
+ -----END RSA PRIVATE KEY-----
- # Specify a list of initial attributes used by the cookbooks
- initial_attributes:
+ # A run list for a first boot json
+ run_list:
+ - "recipe[apache2]"
+ - "role[db]"
+
+ # Specify a list of initial attributes used by the cookbooks
+ initial_attributes:
apache:
prefork:
maxclients: 100
diff --git a/doc/examples/cloud-config-chef.txt b/doc/examples/cloud-config-chef.txt
index 2320e01a..bb4b058c 100644
--- a/doc/examples/cloud-config-chef.txt
+++ b/doc/examples/cloud-config-chef.txt
@@ -52,55 +52,58 @@ apt:
chef:
- # Valid values are 'gems' and 'packages' and 'omnibus'
- install_type: "packages"
-
- # Boolean: run 'install_type' code even if chef-client
- # appears already installed.
- force_install: false
-
- # Chef settings
- server_url: "https://chef.yourorg.com"
-
- # Node Name
- # Defaults to the instance-id if not present
- node_name: "your-node-name"
-
- # Environment
- # Defaults to '_default' if not present
- environment: "production"
-
- # Default validation name is chef-validator
- validation_name: "yourorg-validator"
- # if validation_cert's value is "system" then it is expected
- # that the file already exists on the system.
- validation_cert: |
- -----BEGIN RSA PRIVATE KEY-----
- YOUR-ORGS-VALIDATION-KEY-HERE
- -----END RSA PRIVATE KEY-----
-
- # A run list for a first boot json, an example (not required)
- run_list:
- - "recipe[apache2]"
- - "role[db]"
-
- # Specify a list of initial attributes used by the cookbooks
- initial_attributes:
+ # Valid values are 'accept' and 'accept-no-persist'
+ chef_license: "accept"
+
+ # Valid values are 'gems' and 'packages' and 'omnibus'
+ install_type: "packages"
+
+ # Boolean: run 'install_type' code even if chef-client
+ # appears already installed.
+ force_install: false
+
+ # Chef settings
+ server_url: "https://chef.yourorg.com"
+
+ # Node Name
+ # Defaults to the instance-id if not present
+ node_name: "your-node-name"
+
+ # Environment
+ # Defaults to '_default' if not present
+ environment: "production"
+
+ # Default validation name is chef-validator
+ validation_name: "yourorg-validator"
+ # if validation_cert's value is "system" then it is expected
+ # that the file already exists on the system.
+ validation_cert: |
+ -----BEGIN RSA PRIVATE KEY-----
+ YOUR-ORGS-VALIDATION-KEY-HERE
+ -----END RSA PRIVATE KEY-----
+
+ # A run list for a first boot json, an example (not required)
+ run_list:
+ - "recipe[apache2]"
+ - "role[db]"
+
+ # Specify a list of initial attributes used by the cookbooks
+ initial_attributes:
apache:
prefork:
maxclients: 100
keepalive: "off"
- # if install_type is 'omnibus', change the url to download
- omnibus_url: "https://www.chef.io/chef/install.sh"
+ # if install_type is 'omnibus', change the url to download
+ omnibus_url: "https://www.chef.io/chef/install.sh"
- # if install_type is 'omnibus', pass pinned version string
- # to the install script
- omnibus_version: "12.3.0"
+ # if install_type is 'omnibus', pass pinned version string
+ # to the install script
+ omnibus_version: "12.3.0"
- # If encrypted data bags are used, the client needs to have a secrets file
- # configured to decrypt them
- encrypted_data_bag_secret: "/etc/chef/encrypted_data_bag_secret"
+ # If encrypted data bags are used, the client needs to have a secrets file
+ # configured to decrypt them
+ encrypted_data_bag_secret: "/etc/chef/encrypted_data_bag_secret"
# Capture all subprocess output into a logfile
# Useful for troubleshooting cloud-init issues
diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt
index 52a2476b..13bb687c 100644
--- a/doc/examples/cloud-config-datasources.txt
+++ b/doc/examples/cloud-config-datasources.txt
@@ -1,3 +1,5 @@
+#cloud-config
+
# Documentation on data sources configuration options
datasource:
# Ec2
@@ -38,10 +40,10 @@ datasource:
# these are optional, but allow you to basically provide a datasource
# right here
user-data: |
- # This is the user-data verbatim
+ # This is the user-data verbatim
meta-data:
- instance-id: i-87018aed
- local-hostname: myhost.internal
+ instance-id: i-87018aed
+ local-hostname: myhost.internal
Azure:
agent_command: [service, walinuxagent, start]
diff --git a/doc/examples/cloud-config-disk-setup.txt b/doc/examples/cloud-config-disk-setup.txt
index cac44d8c..5c6de77e 100644
--- a/doc/examples/cloud-config-disk-setup.txt
+++ b/doc/examples/cloud-config-disk-setup.txt
@@ -1,3 +1,4 @@
+#cloud-config
# Cloud-init supports the creation of simple partition tables and file systems
# on devices.
@@ -6,47 +7,47 @@
# (Not implemented yet, but provided for future documentation)
disk_setup:
- ephmeral0:
- table_type: 'mbr'
- layout: True
- overwrite: False
+ ephmeral0:
+ table_type: 'mbr'
+ layout: True
+ overwrite: False
fs_setup:
- - label: None,
- filesystem: ext3
- device: ephemeral0
- partition: auto
+ - label: None,
+ filesystem: ext3
+ device: ephemeral0
+ partition: auto
# Default disk definitions for Microsoft Azure
# ------------------------------------------
device_aliases: {'ephemeral0': '/dev/sdb'}
disk_setup:
- ephemeral0:
- table_type: mbr
- layout: True
- overwrite: False
+ ephemeral0:
+ table_type: mbr
+ layout: True
+ overwrite: False
fs_setup:
- - label: ephemeral0
- filesystem: ext4
- device: ephemeral0.1
- replace_fs: ntfs
+ - label: ephemeral0
+ filesystem: ext4
+ device: ephemeral0.1
+ replace_fs: ntfs
# Data disks definitions for Microsoft Azure
# ------------------------------------------
disk_setup:
- /dev/disk/azure/scsi1/lun0:
- table_type: gpt
- layout: True
- overwrite: True
+ /dev/disk/azure/scsi1/lun0:
+ table_type: gpt
+ layout: True
+ overwrite: True
fs_setup:
- - device: /dev/disk/azure/scsi1/lun0
- partition: 1
- filesystem: ext4
+ - device: /dev/disk/azure/scsi1/lun0
+ partition: 1
+ filesystem: ext4
# Default disk definitions for SmartOS
@@ -54,15 +55,15 @@ fs_setup:
device_aliases: {'ephemeral0': '/dev/vdb'}
disk_setup:
- ephemeral0:
- table_type: mbr
- layout: False
- overwrite: False
+ ephemeral0:
+ table_type: mbr
+ layout: False
+ overwrite: False
fs_setup:
- - label: ephemeral0
- filesystem: ext4
- device: ephemeral0.0
+ - label: ephemeral0
+ filesystem: ext4
+ device: ephemeral0.0
# Caveat for SmartOS: if ephemeral disk is not defined, then the disk will
# not be automatically added to the mounts.
@@ -77,87 +78,87 @@ fs_setup:
# The disk_setup directive instructs Cloud-init to partition a disk. The format is:
disk_setup:
- ephmeral0:
- table_type: 'mbr'
- layout: 'auto'
- /dev/xvdh:
- table_type: 'mbr'
- layout:
- - 33
- - [33, 82]
- - 33
- overwrite: True
+ ephmeral0:
+ table_type: 'mbr'
+ layout: 'auto'
+ /dev/xvdh:
+ table_type: 'mbr'
+ layout:
+ - 33
+ - [33, 82]
+ - 33
+ overwrite: True
# The format is a list of dicts of dicts. The first value is the name of the
# device and the subsequent values define how to create and layout the
# partition.
# The general format is:
-# disk_setup:
-# <DEVICE>:
-# table_type: 'mbr'
-# layout: <LAYOUT|BOOL>
-# overwrite: <BOOL>
+# disk_setup:
+# <DEVICE>:
+# table_type: 'mbr'
+# layout: <LAYOUT|BOOL>
+# overwrite: <BOOL>
#
# Where:
-# <DEVICE>: The name of the device. 'ephemeralX' and 'swap' are special
-# values which are specific to the cloud. For these devices
-# Cloud-init will look up what the real devices is and then
-# use it.
+# <DEVICE>: The name of the device. 'ephemeralX' and 'swap' are special
+# values which are specific to the cloud. For these devices
+# Cloud-init will look up what the real devices is and then
+# use it.
#
-# For other devices, the kernel device name is used. At this
-# time only simply kernel devices are supported, meaning
-# that device mapper and other targets may not work.
+# For other devices, the kernel device name is used. At this
+# time only simply kernel devices are supported, meaning
+# that device mapper and other targets may not work.
#
-# Note: At this time, there is no handling or setup of
-# device mapper targets.
+# Note: At this time, there is no handling or setup of
+# device mapper targets.
#
-# table_type=<TYPE>: Currently the following are supported:
-# 'mbr': default and setups a MS-DOS partition table
-# 'gpt': setups a GPT partition table
+# table_type=<TYPE>: Currently the following are supported:
+# 'mbr': default and setups a MS-DOS partition table
+# 'gpt': setups a GPT partition table
#
-# Note: At this time only 'mbr' and 'gpt' partition tables
-# are allowed. It is anticipated in the future that
-# we'll also have "RAID" to create a mdadm RAID.
+# Note: At this time only 'mbr' and 'gpt' partition tables
+# are allowed. It is anticipated in the future that
+# we'll also have "RAID" to create a mdadm RAID.
#
-# layout={...}: The device layout. This is a list of values, with the
-# percentage of disk that partition will take.
-# Valid options are:
-# [<SIZE>, [<SIZE>, <PART_TYPE]]
+# layout={...}: The device layout. This is a list of values, with the
+# percentage of disk that partition will take.
+# Valid options are:
+# [<SIZE>, [<SIZE>, <PART_TYPE]]
#
-# Where <SIZE> is the _percentage_ of the disk to use, while
-# <PART_TYPE> is the numerical value of the partition type.
+# Where <SIZE> is the _percentage_ of the disk to use, while
+# <PART_TYPE> is the numerical value of the partition type.
#
-# The following setups two partitions, with the first
-# partition having a swap label, taking 1/3 of the disk space
-# and the remainder being used as the second partition.
-# /dev/xvdh':
-# table_type: 'mbr'
-# layout:
-# - [33,82]
-# - 66
-# overwrite: True
+# The following setups two partitions, with the first
+# partition having a swap label, taking 1/3 of the disk space
+# and the remainder being used as the second partition.
+# /dev/xvdh':
+# table_type: 'mbr'
+# layout:
+# - [33,82]
+# - 66
+# overwrite: True
#
-# When layout is "true" it means single partition the entire
-# device.
+# When layout is "true" it means single partition the entire
+# device.
#
-# When layout is "false" it means don't partition or ignore
-# existing partitioning.
+# When layout is "false" it means don't partition or ignore
+# existing partitioning.
#
-# If layout is set to "true" and overwrite is set to "false",
-# it will skip partitioning the device without a failure.
+# If layout is set to "true" and overwrite is set to "false",
+# it will skip partitioning the device without a failure.
#
-# overwrite=<BOOL>: This describes whether to ride with saftey's on and
-# everything holstered.
+# overwrite=<BOOL>: This describes whether to ride with saftey's on and
+# everything holstered.
#
-# 'false' is the default, which means that:
-# 1. The device will be checked for a partition table
-# 2. The device will be checked for a file system
-# 3. If either a partition of file system is found, then
-# the operation will be _skipped_.
+# 'false' is the default, which means that:
+# 1. The device will be checked for a partition table
+# 2. The device will be checked for a file system
+# 3. If either a partition of file system is found, then
+# the operation will be _skipped_.
#
-# 'true' is cowboy mode. There are no checks and things are
-# done blindly. USE with caution, you can do things you
-# really, really don't want to do.
+# 'true' is cowboy mode. There are no checks and things are
+# done blindly. USE with caution, you can do things you
+# really, really don't want to do.
#
#
# fs_setup: Setup the file system
@@ -166,101 +167,101 @@ disk_setup:
# fs_setup describes the how the file systems are supposed to look.
fs_setup:
- - label: ephemeral0
- filesystem: 'ext3'
- device: 'ephemeral0'
- partition: 'auto'
- - label: mylabl2
- filesystem: 'ext4'
- device: '/dev/xvda1'
- - cmd: mkfs -t %(filesystem)s -L %(label)s %(device)s
- label: mylabl3
- filesystem: 'btrfs'
- device: '/dev/xvdh'
+ - label: ephemeral0
+ filesystem: 'ext3'
+ device: 'ephemeral0'
+ partition: 'auto'
+ - label: mylabl2
+ filesystem: 'ext4'
+ device: '/dev/xvda1'
+ - cmd: mkfs -t %(filesystem)s -L %(label)s %(device)s
+ label: mylabl3
+ filesystem: 'btrfs'
+ device: '/dev/xvdh'
# The general format is:
-# fs_setup:
-# - label: <LABEL>
-# filesystem: <FS_TYPE>
-# device: <DEVICE>
-# partition: <PART_VALUE>
-# overwrite: <OVERWRITE>
-# replace_fs: <FS_TYPE>
+# fs_setup:
+# - label: <LABEL>
+# filesystem: <FS_TYPE>
+# device: <DEVICE>
+# partition: <PART_VALUE>
+# overwrite: <OVERWRITE>
+# replace_fs: <FS_TYPE>
#
# Where:
-# <LABEL>: The file system label to be used. If set to None, no label is
-# used.
+# <LABEL>: The file system label to be used. If set to None, no label is
+# used.
#
-# <FS_TYPE>: The file system type. It is assumed that the there
-# will be a "mkfs.<FS_TYPE>" that behaves likes "mkfs". On a standard
-# Ubuntu Cloud Image, this means that you have the option of ext{2,3,4},
-# and vfat by default.
+# <FS_TYPE>: The file system type. It is assumed that the there
+# will be a "mkfs.<FS_TYPE>" that behaves likes "mkfs". On a standard
+# Ubuntu Cloud Image, this means that you have the option of ext{2,3,4},
+# and vfat by default.
#
-# <DEVICE>: The device name. Special names of 'ephemeralX' or 'swap'
-# are allowed and the actual device is acquired from the cloud datasource.
-# When using 'ephemeralX' (i.e. ephemeral0), make sure to leave the
-# label as 'ephemeralX' otherwise there may be issues with the mounting
-# of the ephemeral storage layer.
+# <DEVICE>: The device name. Special names of 'ephemeralX' or 'swap'
+# are allowed and the actual device is acquired from the cloud datasource.
+# When using 'ephemeralX' (i.e. ephemeral0), make sure to leave the
+# label as 'ephemeralX' otherwise there may be issues with the mounting
+# of the ephemeral storage layer.
#
-# If you define the device as 'ephemeralX.Y' then Y will be interpetted
-# as a partition value. However, ephermalX.0 is the _same_ as ephemeralX.
+# If you define the device as 'ephemeralX.Y' then Y will be interpetted
+# as a partition value. However, ephermalX.0 is the _same_ as ephemeralX.
#
-# <PART_VALUE>:
-# Partition definitions are overwriten if you use the '<DEVICE>.Y' notation.
+# <PART_VALUE>:
+# Partition definitions are overwriten if you use the '<DEVICE>.Y' notation.
#
-# The valid options are:
-# "auto|any": tell cloud-init not to care whether there is a partition
-# or not. Auto will use the first partition that does not contain a
-# file system already. In the absence of a partition table, it will
-# put it directly on the disk.
+# The valid options are:
+# "auto|any": tell cloud-init not to care whether there is a partition
+# or not. Auto will use the first partition that does not contain a
+# file system already. In the absence of a partition table, it will
+# put it directly on the disk.
#
-# "auto": If a file system that matches the specification in terms of
-# label, type and device, then cloud-init will skip the creation of
-# the file system.
+# "auto": If a file system that matches the specification in terms of
+# label, type and device, then cloud-init will skip the creation of
+# the file system.
#
-# "any": If a file system that matches the file system type and device,
-# then cloud-init will skip the creation of the file system.
+# "any": If a file system that matches the file system type and device,
+# then cloud-init will skip the creation of the file system.
#
-# Devices are selected based on first-detected, starting with partitions
-# and then the raw disk. Consider the following:
-# NAME FSTYPE LABEL
-# xvdb
-# |-xvdb1 ext4
-# |-xvdb2
-# |-xvdb3 btrfs test
-# \-xvdb4 ext4 test
+# Devices are selected based on first-detected, starting with partitions
+# and then the raw disk. Consider the following:
+# NAME FSTYPE LABEL
+# xvdb
+# |-xvdb1 ext4
+# |-xvdb2
+# |-xvdb3 btrfs test
+# \-xvdb4 ext4 test
#
-# If you ask for 'auto', label of 'test, and file system of 'ext4'
-# then cloud-init will select the 2nd partition, even though there
-# is a partition match at the 4th partition.
+# If you ask for 'auto', label of 'test, and file system of 'ext4'
+# then cloud-init will select the 2nd partition, even though there
+# is a partition match at the 4th partition.
#
-# If you ask for 'any' and a label of 'test', then cloud-init will
-# select the 1st partition.
+# If you ask for 'any' and a label of 'test', then cloud-init will
+# select the 1st partition.
#
-# If you ask for 'auto' and don't define label, then cloud-init will
-# select the 1st partition.
+# If you ask for 'auto' and don't define label, then cloud-init will
+# select the 1st partition.
#
-# In general, if you have a specific partition configuration in mind,
-# you should define either the device or the partition number. 'auto'
-# and 'any' are specifically intended for formating ephemeral storage or
-# for simple schemes.
+# In general, if you have a specific partition configuration in mind,
+# you should define either the device or the partition number. 'auto'
+# and 'any' are specifically intended for formating ephemeral storage or
+# for simple schemes.
#
-# "none": Put the file system directly on the device.
+# "none": Put the file system directly on the device.
#
-# <NUM>: where NUM is the actual partition number.
+# <NUM>: where NUM is the actual partition number.
#
-# <OVERWRITE>: Defines whether or not to overwrite any existing
-# filesystem.
+# <OVERWRITE>: Defines whether or not to overwrite any existing
+# filesystem.
#
-# "true": Indiscriminately destroy any pre-existing file system. Use at
-# your own peril.
+# "true": Indiscriminately destroy any pre-existing file system. Use at
+# your own peril.
#
-# "false": If an existing file system exists, skip the creation.
+# "false": If an existing file system exists, skip the creation.
#
-# <REPLACE_FS>: This is a special directive, used for Microsoft Azure that
-# instructs cloud-init to replace a file system of <FS_TYPE>. NOTE:
-# unless you define a label, this requires the use of the 'any' partition
-# directive.
+# <REPLACE_FS>: This is a special directive, used for Microsoft Azure that
+# instructs cloud-init to replace a file system of <FS_TYPE>. NOTE:
+# unless you define a label, this requires the use of the 'any' partition
+# directive.
#
# Behavior Caveat: The default behavior is to _check_ if the file system exists.
-# If a file system matches the specification, then the operation is a no-op.
+# If a file system matches the specification, then the operation is a no-op.
diff --git a/doc/examples/cloud-config-landscape.txt b/doc/examples/cloud-config-landscape.txt
index d7ff8ef8..88be57ce 100644
--- a/doc/examples/cloud-config-landscape.txt
+++ b/doc/examples/cloud-config-landscape.txt
@@ -1,3 +1,4 @@
+#cloud-config
# Landscape-client configuration
#
# Anything under the top 'landscape: client' entry
diff --git a/doc/examples/cloud-config-mcollective.txt b/doc/examples/cloud-config-mcollective.txt
index 67735682..a701616a 100644
--- a/doc/examples/cloud-config-mcollective.txt
+++ b/doc/examples/cloud-config-mcollective.txt
@@ -5,45 +5,45 @@
# Make sure that this file is valid yaml before starting instances.
# It should be passed as user-data when starting the instance.
mcollective:
- # Every key present in the conf object will be added to server.cfg:
- # key: value
- #
- # For example the configuration below will have the following key
- # added to server.cfg:
- # plugin.stomp.host: dbhost
- conf:
- plugin.stomp.host: dbhost
- # This will add ssl certs to mcollective
- # WARNING WARNING WARNING
- # The ec2 metadata service is a network service, and thus is readable
- # by non-root users on the system (ie: 'ec2metadata --user-data')
- # If you want security for this, please use include-once + SSL urls
- public-cert: |
- -----BEGIN CERTIFICATE-----
- MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
- Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
- MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
- b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
- 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
- qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
- T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
- BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
- SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
- +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
- hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
- -----END CERTIFICATE-----
- private-cert: |
- -----BEGIN CERTIFICATE-----
- MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
- Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
- MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
- b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
- 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
- qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
- T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
- BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
- SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
- +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
- hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
- -----END CERTIFICATE-----
+ # Every key present in the conf object will be added to server.cfg:
+ # key: value
+ #
+ # For example the configuration below will have the following key
+ # added to server.cfg:
+ # plugin.stomp.host: dbhost
+ conf:
+ plugin.stomp.host: dbhost
+ # This will add ssl certs to mcollective
+ # WARNING WARNING WARNING
+ # The ec2 metadata service is a network service, and thus is readable
+ # by non-root users on the system (ie: 'ec2metadata --user-data')
+ # If you want security for this, please use include-once + SSL urls
+ public-cert: |
+ -----BEGIN CERTIFICATE-----
+ MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
+ Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
+ MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
+ b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
+ 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
+ qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
+ T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
+ BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
+ SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
+ +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
+ hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
+ -----END CERTIFICATE-----
+ private-cert: |
+ -----BEGIN CERTIFICATE-----
+ MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
+ Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
+ MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
+ b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
+ 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
+ qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
+ T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
+ BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
+ SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
+ +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
+ hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
+ -----END CERTIFICATE-----
diff --git a/doc/examples/cloud-config-mount-points.txt b/doc/examples/cloud-config-mount-points.txt
index bce28bf8..43f80ec9 100644
--- a/doc/examples/cloud-config-mount-points.txt
+++ b/doc/examples/cloud-config-mount-points.txt
@@ -41,6 +41,6 @@ mount_default_fields: [ None, None, "auto", "defaults,nofail", "0", "2" ]
# swap can also be set up by the 'mounts' module
# default is to not create any swap files, because 'size' is set to 0
swap:
- filename: /swap.img
- size: "auto" # or size in bytes
- maxsize: size in bytes
+ filename: /swap.img
+ size: "auto" # or size in bytes
+ maxsize: size in bytes
diff --git a/doc/examples/cloud-config-phone-home.txt b/doc/examples/cloud-config-phone-home.txt
index 7f2b69f7..b30c14e3 100644
--- a/doc/examples/cloud-config-phone-home.txt
+++ b/doc/examples/cloud-config-phone-home.txt
@@ -5,10 +5,10 @@
# url
# default: none
# phone_home:
-# url: http://my.foo.bar/$INSTANCE/
-# post: all
-# tries: 10
+# url: http://my.foo.bar/$INSTANCE/
+# post: all
+# tries: 10
#
phone_home:
- url: http://my.example.com/$INSTANCE_ID/
- post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ]
+ url: http://my.example.com/$INSTANCE_ID/
+ post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ]
diff --git a/doc/examples/cloud-config-power-state.txt b/doc/examples/cloud-config-power-state.txt
index b470153d..9cd56814 100644
--- a/doc/examples/cloud-config-power-state.txt
+++ b/doc/examples/cloud-config-power-state.txt
@@ -33,8 +33,8 @@
# for future use.
#
power_state:
- delay: "+30"
- mode: poweroff
- message: Bye Bye
- timeout: 30
- condition: True
+ delay: "+30"
+ mode: poweroff
+ message: Bye Bye
+ timeout: 30
+ condition: True
diff --git a/doc/examples/cloud-config-puppet.txt b/doc/examples/cloud-config-puppet.txt
index cd3c2f8e..3c7e2da7 100644
--- a/doc/examples/cloud-config-puppet.txt
+++ b/doc/examples/cloud-config-puppet.txt
@@ -5,47 +5,47 @@
# Make sure that this file is valid yaml before starting instances.
# It should be passed as user-data when starting the instance.
puppet:
- # Every key present in the conf object will be added to puppet.conf:
- # [name]
- # subkey=value
- #
- # For example the configuration below will have the following section
- # added to puppet.conf:
- # [puppetd]
- # server=puppetmaster.example.org
- # certname=i-0123456.ip-X-Y-Z.cloud.internal
- #
- # The puppmaster ca certificate will be available in
- # /var/lib/puppet/ssl/certs/ca.pem
- conf:
- agent:
- server: "puppetmaster.example.org"
- # certname supports substitutions at runtime:
- # %i: instanceid
- # Example: i-0123456
- # %f: fqdn of the machine
- # Example: ip-X-Y-Z.cloud.internal
- #
- # NB: the certname will automatically be lowercased as required by puppet
- certname: "%i.%f"
- # ca_cert is a special case. It won't be added to puppet.conf.
- # It holds the puppetmaster certificate in pem format.
- # It should be a multi-line string (using the | yaml notation for
- # multi-line strings).
- # The puppetmaster certificate is located in
- # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetmaster host.
- #
- ca_cert: |
- -----BEGIN CERTIFICATE-----
- MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
- Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
- MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
- b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
- 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
- qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
- T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
- BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
- SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
- +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
- hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
- -----END CERTIFICATE-----
+ # Every key present in the conf object will be added to puppet.conf:
+ # [name]
+ # subkey=value
+ #
+ # For example the configuration below will have the following section
+ # added to puppet.conf:
+ # [puppetd]
+ # server=puppetmaster.example.org
+ # certname=i-0123456.ip-X-Y-Z.cloud.internal
+ #
+ # The puppmaster ca certificate will be available in
+ # /var/lib/puppet/ssl/certs/ca.pem
+ conf:
+ agent:
+ server: "puppetmaster.example.org"
+ # certname supports substitutions at runtime:
+ # %i: instanceid
+ # Example: i-0123456
+ # %f: fqdn of the machine
+ # Example: ip-X-Y-Z.cloud.internal
+ #
+ # NB: the certname will automatically be lowercased as required by puppet
+ certname: "%i.%f"
+ # ca_cert is a special case. It won't be added to puppet.conf.
+ # It holds the puppetmaster certificate in pem format.
+ # It should be a multi-line string (using the | yaml notation for
+ # multi-line strings).
+ # The puppetmaster certificate is located in
+ # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetmaster host.
+ #
+ ca_cert: |
+ -----BEGIN CERTIFICATE-----
+ MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe
+ Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf
+ MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc
+ b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu
+ 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA
+ qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv
+ T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd
+ BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG
+ SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf
+ +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb
+ hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d
+ -----END CERTIFICATE-----
diff --git a/doc/examples/cloud-config-reporting.txt b/doc/examples/cloud-config-reporting.txt
index ee00078f..80bde303 100644
--- a/doc/examples/cloud-config-reporting.txt
+++ b/doc/examples/cloud-config-reporting.txt
@@ -4,14 +4,14 @@
## A 'webhook' and a 'log' type.
## It also disables the built in default 'log'
reporting:
- smtest:
- type: webhook
- endpoint: "http://myhost:8000/"
- consumer_key: "ckey_foo"
- consumer_secret: "csecret_foo"
- token_key: "tkey_foo"
- token_secret: "tkey_foo"
- smlogger:
- type: log
- level: WARN
- log: null
+ smtest:
+ type: webhook
+ endpoint: "http://myhost:8000/"
+ consumer_key: "ckey_foo"
+ consumer_secret: "csecret_foo"
+ token_key: "tkey_foo"
+ token_secret: "tkey_foo"
+ smlogger:
+ type: log
+ level: WARN
+ log: null
diff --git a/doc/examples/cloud-config-rh_subscription.txt b/doc/examples/cloud-config-rh_subscription.txt
index be121338..5cc903a2 100644
--- a/doc/examples/cloud-config-rh_subscription.txt
+++ b/doc/examples/cloud-config-rh_subscription.txt
@@ -14,36 +14,36 @@
# /etc/rhsm/rhs.conf file
rh_subscription:
- username: joe@foo.bar
+ username: joe@foo.bar
- ## Quote your password if it has symbols to be safe
- password: '1234abcd'
+ ## Quote your password if it has symbols to be safe
+ password: '1234abcd'
- ## If you prefer, you can use the activation key and
- ## org instead of username and password. Be sure to
- ## comment out username and password
+ ## If you prefer, you can use the activation key and
+ ## org instead of username and password. Be sure to
+ ## comment out username and password
- #activation-key: foobar
- #org: 12345
+ #activation-key: foobar
+ #org: 12345
- ## Uncomment to auto-attach subscriptions to your system
- #auto-attach: True
+ ## Uncomment to auto-attach subscriptions to your system
+ #auto-attach: True
- ## Uncomment to set the service level for your
- ## subscriptions
- #service-level: self-support
+ ## Uncomment to set the service level for your
+ ## subscriptions
+ #service-level: self-support
- ## Uncomment to add pools (needs to be a list of IDs)
- #add-pool: []
+ ## Uncomment to add pools (needs to be a list of IDs)
+ #add-pool: []
- ## Uncomment to add or remove yum repos
- ## (needs to be a list of repo IDs)
- #enable-repo: []
- #disable-repo: []
+ ## Uncomment to add or remove yum repos
+ ## (needs to be a list of repo IDs)
+ #enable-repo: []
+ #disable-repo: []
- ## Uncomment to alter the baseurl in /etc/rhsm/rhsm.conf
- #rhsm-baseurl: http://url
+ ## Uncomment to alter the baseurl in /etc/rhsm/rhsm.conf
+ #rhsm-baseurl: http://url
- ## Uncomment to alter the server hostname in
- ## /etc/rhsm/rhsm.conf
- #server-hostname: foo.bar.com
+ ## Uncomment to alter the server hostname in
+ ## /etc/rhsm/rhsm.conf
+ #server-hostname: foo.bar.com
diff --git a/doc/examples/cloud-config-rsyslog.txt b/doc/examples/cloud-config-rsyslog.txt
index 28ea1f16..d28dd38e 100644
--- a/doc/examples/cloud-config-rsyslog.txt
+++ b/doc/examples/cloud-config-rsyslog.txt
@@ -1,3 +1,4 @@
+#cloud-config
## the rsyslog module allows you to configure the systems syslog.
## configuration of syslog is under the top level cloud-config
## entry 'rsyslog'.
@@ -5,22 +6,22 @@
## Example:
#cloud-config
rsyslog:
- remotes:
- # udp to host 'maas.mydomain' port 514
- maashost: maas.mydomain
- # udp to ipv4 host on port 514
- maas: "@[10.5.1.56]:514"
- # tcp to host ipv6 host on port 555
- maasipv6: "*.* @@[FE80::0202:B3FF:FE1E:8329]:555"
- configs:
- - "*.* @@192.158.1.1"
- - content: "*.* @@192.0.2.1:10514"
- filename: 01-example.conf
- - content: |
- *.* @@syslogd.example.com
- config_dir: /etc/rsyslog.d
- config_filename: 20-cloud-config.conf
- service_reload_command: [your, syslog, reload, command]
+ remotes:
+ # udp to host 'maas.mydomain' port 514
+ maashost: maas.mydomain
+ # udp to ipv4 host on port 514
+ maas: "@[10.5.1.56]:514"
+ # tcp to host ipv6 host on port 555
+ maasipv6: "*.* @@[FE80::0202:B3FF:FE1E:8329]:555"
+ configs:
+ - "*.* @@192.158.1.1"
+ - content: "*.* @@192.0.2.1:10514"
+ filename: 01-example.conf
+ - content: |
+ *.* @@syslogd.example.com
+ config_dir: /etc/rsyslog.d
+ config_filename: 20-cloud-config.conf
+ service_reload_command: [your, syslog, reload, command]
## Additionally the following legacy format is supported
## it is converted into the format above before use.
@@ -28,11 +29,11 @@ rsyslog:
## rsyslog_dir -> rsyslog/config_dir
## rsyslog -> rsyslog/configs
# rsyslog:
-# - "*.* @@192.158.1.1"
-# - content: "*.* @@192.0.2.1:10514"
-# filename: 01-example.conf
-# - content: |
-# *.* @@syslogd.example.com
+# - "*.* @@192.158.1.1"
+# - content: "*.* @@192.0.2.1:10514"
+# filename: 01-example.conf
+# - content: |
+# *.* @@syslogd.example.com
# rsyslog_filename: 20-cloud-config.conf
# rsyslog_dir: /etc/rsyslog.d
diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt
index f588bfbc..b593cdd1 100644
--- a/doc/examples/cloud-config-user-groups.txt
+++ b/doc/examples/cloud-config-user-groups.txt
@@ -7,6 +7,11 @@ groups:
- cloud-users
# Add users to the system. Users are added after groups are added.
+# Note: Most of these configuration options will not be honored if the user
+# already exists. Following options are the exceptions and they are
+# applicable on already-existing users:
+# - 'plain_text_passwd', 'hashed_passwd', 'lock_passwd', 'sudo',
+# 'ssh_authorized_keys', 'ssh_redirect_user'.
users:
- default
- name: foobar
@@ -84,7 +89,7 @@ users:
# use <default_username> instead. This option only disables cloud
# provided public-keys. An error will be raised if ssh_authorized_keys
# or ssh_import_id is provided for the same user.
-#
+#
# ssh_authorized_keys.
# sudo: Defaults to none. Accepts a sudo rule string, a list of sudo rule
# strings or False to explicitly deny sudo usage. Examples:
@@ -120,10 +125,10 @@ users:
# to have the 'ubuntu' user in addition to other users, you need to instruct
# cloud-init that you also want the default user. To do this use the following
# syntax:
-# users:
-# - default
-# - bob
-# - ....
+# users:
+# - default
+# - bob
+# - ....
# foobar: ...
#
# users[0] (the first user in users) overrides the user directive.
@@ -131,10 +136,10 @@ users:
# The 'default' user above references the distro's config:
# system_info:
# default_user:
-# name: Ubuntu
-# plain_text_passwd: 'ubuntu'
-# home: /home/ubuntu
-# shell: /bin/bash
-# lock_passwd: True
-# gecos: Ubuntu
-# groups: [adm, audio, cdrom, dialout, floppy, video, plugdev, dip, netdev]
+# name: Ubuntu
+# plain_text_passwd: 'ubuntu'
+# home: /home/ubuntu
+# shell: /bin/bash
+# lock_passwd: True
+# gecos: Ubuntu
+# groups: [adm, audio, cdrom, dialout, floppy, video, plugdev, dip, netdev]
diff --git a/doc/examples/cloud-config-vendor-data.txt b/doc/examples/cloud-config-vendor-data.txt
index 7f90847b..920d12e8 100644
--- a/doc/examples/cloud-config-vendor-data.txt
+++ b/doc/examples/cloud-config-vendor-data.txt
@@ -7,8 +7,8 @@
# vendordata. Users of the end system are given ultimate control.
#
vendor_data:
- enabled: True
- prefix: /usr/bin/ltrace
+ enabled: True
+ prefix: /usr/bin/ltrace
# enabled: whether it is enabled or not
# prefix: the command to run before any vendor scripts.
diff --git a/doc/examples/cloud-config-write-files.txt b/doc/examples/cloud-config-write-files.txt
index ec98bc93..6c67c503 100644
--- a/doc/examples/cloud-config-write-files.txt
+++ b/doc/examples/cloud-config-write-files.txt
@@ -8,26 +8,26 @@
#
# Note: Content strings here are truncated for example purposes.
write_files:
-- encoding: b64
- content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4...
- owner: root:root
- path: /etc/sysconfig/selinux
- permissions: '0644'
-- content: |
- # My new /etc/sysconfig/samba file
+- encoding: b64
+ content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4...
+ owner: root:root
+ path: /etc/sysconfig/selinux
+ permissions: '0644'
+- content: |
+ # My new /etc/sysconfig/samba file
- SMBDOPTIONS="-D"
- path: /etc/sysconfig/samba
-- content: !!binary |
- f0VMRgIBAQAAAAAAAAAAAAIAPgABAAAAwARAAAAAAABAAAAAAAAAAJAVAAAAAAAAAAAAAEAAOAAI
- AEAAHgAdAAYAAAAFAAAAQAAAAAAAAABAAEAAAAAAAEAAQAAAAAAAwAEAAAAAAADAAQAAAAAAAAgA
- AAAAAAAAAwAAAAQAAAAAAgAAAAAAAAACQAAAAAAAAAJAAAAAAAAcAAAAAAAAABwAAAAAAAAAAQAA
- ....
- path: /bin/arch
- permissions: '0555'
-- encoding: gzip
- content: !!binary |
- H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA=
- path: /usr/bin/hello
- permissions: '0755'
+ SMBDOPTIONS="-D"
+ path: /etc/sysconfig/samba
+- content: !!binary |
+ f0VMRgIBAQAAAAAAAAAAAAIAPgABAAAAwARAAAAAAABAAAAAAAAAAJAVAAAAAAAAAAAAAEAAOAAI
+ AEAAHgAdAAYAAAAFAAAAQAAAAAAAAABAAEAAAAAAAEAAQAAAAAAAwAEAAAAAAADAAQAAAAAAAAgA
+ AAAAAAAAAwAAAAQAAAAAAgAAAAAAAAACQAAAAAAAAAJAAAAAAAAcAAAAAAAAABwAAAAAAAAAAQAA
+ ....
+ path: /bin/arch
+ permissions: '0555'
+- encoding: gzip
+ content: !!binary |
+ H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA=
+ path: /usr/bin/hello
+ permissions: '0755'
diff --git a/doc/examples/cloud-config-yum-repo.txt b/doc/examples/cloud-config-yum-repo.txt
index ab2c031e..e8f2bbb4 100644
--- a/doc/examples/cloud-config-yum-repo.txt
+++ b/doc/examples/cloud-config-yum-repo.txt
@@ -6,15 +6,15 @@
# The following example adds the file /etc/yum.repos.d/epel_testing.repo
# which can then subsequently be used by yum for later operations.
yum_repos:
- # The name of the repository
- epel-testing:
- # Any repository configuration options
- # See: man yum.conf
- #
- # This one is required!
- baseurl: http://download.fedoraproject.org/pub/epel/testing/5/$basearch
- enabled: false
- failovermethod: priority
- gpgcheck: true
- gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL
- name: Extra Packages for Enterprise Linux 5 - Testing
+ # The name of the repository
+ epel-testing:
+ # Any repository configuration options
+ # See: man yum.conf
+ #
+ # This one is required!
+ baseurl: http://download.fedoraproject.org/pub/epel/testing/5/$basearch
+ enabled: false
+ failovermethod: priority
+ gpgcheck: true
+ gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL
+ name: Extra Packages for Enterprise Linux 5 - Testing
diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt
index eb84dcf5..f3ae5e68 100644
--- a/doc/examples/cloud-config.txt
+++ b/doc/examples/cloud-config.txt
@@ -192,8 +192,8 @@ cloud_config_modules:
# ssh_import_id: [ user1, user2 ]
# ssh_import_id will feed the list in that variable to
-# ssh-import-id, so that public keys stored in launchpad
-# can easily be imported into the configured user
+# ssh-import-id, so that public keys stored in launchpad
+# can easily be imported into the configured user
# This can be a single string ('smoser') or a list ([smoser, kirkland])
ssh_import_id: [smoser]
@@ -202,14 +202,15 @@ ssh_import_id: [smoser]
# See debconf-set-selections man page.
#
# Default: none
-#
-debconf_selections: | # Need to preserve newlines
- # Force debconf priority to critical.
- debconf debconf/priority select critical
+#
+debconf_selections:
+ # Force debconf priority to critical.
+ set1: debconf debconf/priority select critical
- # Override default frontend to readline, but allow user to select.
- debconf debconf/frontend select readline
- debconf debconf/frontend seen false
+ # Override default frontend to readline, but allow user to select.
+ set2: |
+ debconf debconf/frontend select readline
+ debconf debconf/frontend seen false
# manage byobu defaults
# byobu_by_default:
@@ -235,7 +236,7 @@ disable_root: false
# The string '$USER' will be replaced with the username of the default user.
# The string '$DISABLE_USER' will be replaced with the username to disable.
#
-# disable_root_opts: no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command="echo 'Please login as the user \"$USER\" rather than the user \"$DISABLE_USER\".';echo;sleep 10"
+# disable_root_opts: no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command="echo 'Please login as the user \"$USER\" rather than the user \"$DISABLE_USER\".';echo;sleep 10;exit 142"
# disable ssh access for non-root-users
# To disable ssh access for non-root users, ssh_redirect_user: true can be
@@ -375,11 +376,11 @@ final_message: "The system is finally up, after $UPTIME seconds"
# the special entry "&1" for an error means "same location as stdout"
# (Note, that '&1' has meaning in yaml, so it must be quoted)
output:
- init: "> /var/log/my-cloud-init.log"
- config: [ ">> /tmp/foo.out", "> /tmp/foo.err" ]
- final:
- output: "| tee /tmp/final.stdout | tee /tmp/bar.stdout"
- error: "&1"
+ init: "> /var/log/my-cloud-init.log"
+ config: [ ">> /tmp/foo.out", "> /tmp/foo.err" ]
+ final:
+ output: "| tee /tmp/final.stdout | tee /tmp/bar.stdout"
+ error: "&1"
# phone_home: if this dictionary is present, then the phone_home
@@ -392,8 +393,8 @@ output:
# tries: 10
#
phone_home:
- url: http://my.example.com/$INSTANCE_ID/
- post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ]
+ url: http://my.example.com/$INSTANCE_ID/
+ post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ]
# timezone: set the timezone for this instance
# the value of 'timezone' must exist in /usr/share/zoneinfo
@@ -407,7 +408,7 @@ timezone: US/Eastern
# then 'L' will be initially created with root:root ownership (during
# cloud-init), and then at cloud-config time (when syslog is available)
# the syslog daemon will be unable to write to the file.
-#
+#
# to remedy this situation, 'def_log_file' can be set to a filename
# and syslog_fix_perms to a string containing "<user>:<group>"
# if syslog_fix_perms is a list, it will iterate through and use the
@@ -446,11 +447,11 @@ syslog_fix_perms: syslog:root
# to set hashed password, here account 'user3' has a password it set to
# 'cloud-init', hashed with SHA-256:
# chpasswd:
-# list: |
-# user1:password1
-# user2:RANDOM
-# user3:$5$eriogqzq$Dg7PxHsKGzziuEGkZgkLvacjuEFeljJ.rLf.hZqKQLA
-# expire: True
+# list: |
+# user1:password1
+# user2:RANDOM
+# user3:$5$eriogqzq$Dg7PxHsKGzziuEGkZgkLvacjuEFeljJ.rLf.hZqKQLA
+# expire: True
# ssh_pwauth: [ True, False, "" or "unchanged" ]
#
# Hashed passwords can be generated in multiple ways, example with python3:
@@ -510,7 +511,7 @@ manual_cache_clean: False
# power_state can be used to make the system shutdown, reboot or
# halt after boot is finished. This same thing can be acheived by
# user-data scripts or by runcmd by simply invoking 'shutdown'.
-#
+#
# Doing it this way ensures that cloud-init is entirely finished with
# modules that would be executed, and avoids any error/log messages
# that may go to the console as a result of system services like
@@ -521,6 +522,6 @@ manual_cache_clean: False
# mode: required. must be one of 'poweroff', 'halt', 'reboot'
# message: provided as the message argument to 'shutdown'. default is none.
power_state:
- delay: 30
- mode: poweroff
- message: Bye Bye
+ delay: 30
+ mode: poweroff
+ message: Bye Bye
diff --git a/doc/examples/kernel-cmdline.txt b/doc/examples/kernel-cmdline.txt
index f043baef..805bc3d3 100644
--- a/doc/examples/kernel-cmdline.txt
+++ b/doc/examples/kernel-cmdline.txt
@@ -3,16 +3,19 @@ configuration that comes from the kernel command line has higher priority
than configuration in /etc/cloud/cloud.cfg
The format is:
- cc: <yaml content here> [end_cc]
+ cc: <yaml content here|URL encoded yaml content> [end_cc]
cloud-config will consider any content after 'cc:' to be cloud-config
data. If an 'end_cc' string is present, then it will stop reading there.
otherwise it considers everthing after 'cc:' to be cloud-config content.
-In order to allow carriage returns, you must enter '\\n', literally,
+In order to allow carriage returns, you must enter '\\n', literally,
on the command line two backslashes followed by a letter 'n'.
+The yaml content may also be URL encoded (urllib.parse.quote()).
+
Here are some examples:
- root=/dev/sda1 cc: ssh_import_id: [smoser, kirkland]\\n
- root=LABEL=uec-rootfs cc: ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ] end_cc
- cc:ssh_import_id: [smoser] end_cc cc:runcmd: [ [ ls, -l ] ] end_cc root=/dev/sda1
+ root=/dev/sda1 cc: ssh_import_id: [smoser, kirkland]\\n
+ root=LABEL=uec-rootfs cc: ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ] end_cc
+ cc:ssh_import_id: [smoser] end_cc cc:runcmd: [ [ ls, -l ] ] end_cc root=/dev/sda1
+ cc:ssh_import_id: %5Bsmoser%5D end_cc cc:runcmd: %5B %5B ls, -l %5D %5D end_cc root=/dev/sda1
diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py
index 86441986..684822c2 100644
--- a/doc/rtd/conf.py
+++ b/doc/rtd/conf.py
@@ -18,7 +18,7 @@ from cloudinit.config.schema import get_schema_doc
# General information about the project.
project = 'cloud-init'
-copyright = '2019, Canonical Ltd.'
+copyright = '2020, Canonical Ltd.'
# -- General configuration ----------------------------------------------------
diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst
index 5d90c131..0015e35a 100644
--- a/doc/rtd/index.rst
+++ b/doc/rtd/index.rst
@@ -68,6 +68,7 @@ Having trouble? We would like to help!
:caption: Development
topics/hacking.rst
+ topics/code_review.rst
topics/security.rst
topics/debugging.rst
topics/logging.rst
diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst
index 3f215b1b..8f56a7d2 100644
--- a/doc/rtd/topics/availability.rst
+++ b/doc/rtd/topics/availability.rst
@@ -14,17 +14,20 @@ distributions and clouds, both public and private.
Distributions
=============
-Cloud-init has support across all major Linux distributions and
-FreeBSD:
+Cloud-init has support across all major Linux distributions, FreeBSD, NetBSD
+and OpenBSD:
-- Ubuntu
-- SLES/openSUSE
-- RHEL/CentOS
-- Fedora
-- Gentoo Linux
-- Debian
+- Alpine Linux
- ArchLinux
+- Debian
+- Fedora
- FreeBSD
+- Gentoo Linux
+- NetBSD
+- OpenBSD
+- RHEL/CentOS
+- SLES/openSUSE
+- Ubuntu
Clouds
======
diff --git a/doc/rtd/topics/boot.rst b/doc/rtd/topics/boot.rst
index d846867b..4e79c958 100644
--- a/doc/rtd/topics/boot.rst
+++ b/doc/rtd/topics/boot.rst
@@ -129,7 +129,7 @@ Config
+---------+--------+----------------------------------------------------------+
This stage runs config modules only. Modules that do not really have an
-effect on other stages of boot are run here.
+effect on other stages of boot are run here, including ``runcmd``.
Final
=====
@@ -150,7 +150,7 @@ Things that run here include
* package installations
* configuration management plugins (puppet, chef, salt-minion)
- * user-scripts (including ``runcmd``).
+ * user-scripts (i.e. shell scripts passed as user-data)
For scripts external to cloud-init looking to wait until cloud-init is
finished, the ``cloud-init status`` subcommand can help block external
diff --git a/doc/rtd/topics/cli.rst b/doc/rtd/topics/cli.rst
index b32677b0..0ff230b5 100644
--- a/doc/rtd/topics/cli.rst
+++ b/doc/rtd/topics/cli.rst
@@ -106,17 +106,19 @@ Do **NOT** rely on the output of these commands as they can and will change.
Current subcommands:
- * ``schema``: a **#cloud-config** format and schema
- validator. It accepts a cloud-config yaml file and annotates potential
- schema errors locally without the need for deployment. Schema
- validation is work in progress and supports a subset of cloud-config
- modules.
-
+ * ``net-convert``: manually use cloud-init's network format conversion, useful
+ for testing configuration or testing changes to the network conversion logic
+ itself.
* ``render``: use cloud-init's jinja template render to
process **#cloud-config** or **custom-scripts**, injecting any variables
from ``/run/cloud-init/instance-data.json``. It accepts a user-data file
containing the jinja template header ``## template: jinja`` and renders
that content with any instance-data.json variables present.
+ * ``schema``: a **#cloud-config** format and schema
+ validator. It accepts a cloud-config yaml file and annotates potential
+ schema errors locally without the need for deployment. Schema
+ validation is work in progress and supports a subset of cloud-config
+ modules.
.. _cli_features:
@@ -162,7 +164,7 @@ declared to run in various boot stages in the file
* *cloud_init_modules*
* *cloud_config_modules*
-* *cloud_init_modules*
+* *cloud_final_modules*
Can be run on the command line, but each module is gated to run only once due
to semaphores in ``/var/lib/cloud/``.
diff --git a/doc/rtd/topics/code_review.rst b/doc/rtd/topics/code_review.rst
new file mode 100644
index 00000000..68c10405
--- /dev/null
+++ b/doc/rtd/topics/code_review.rst
@@ -0,0 +1,256 @@
+*******************
+Code Review Process
+*******************
+
+In order to manage incoming pull requests effectively, and provide
+timely feedback and/or acceptance this document serves as a guideline
+for the review process and outlines the expectations for those
+submitting code to the project as well as those reviewing the code.
+Code is reviewed for acceptance by at least one core team member (later
+referred to as committers), but comments and suggestions from others
+are encouraged and welcome.
+
+The process is intended to provide timely and actionable feedback for
+any submission.
+
+Asking For Help
+===============
+
+cloud-init contributors, potential contributors, community members and
+users are encouraged to ask for any help that they need. If you have
+questions about the code review process, or at any point during the
+code review process, these are the available avenues:
+
+* if you have an open Pull Request, comment on that pull request
+* join the ``#cloud-init`` channel on the Freenode IRC network and ask
+ away
+* send an email to the cloud-init mailing list,
+ cloud-init@lists.launchpad.net
+
+These are listed in rough order of preference, but use whichever of
+them you are most comfortable with.
+
+Goals
+=====
+
+This process has the following goals:
+
+* Ensure code reviews occur in a timely fashion and provide actionable
+ feedback if changes are desired.
+* Ensure the minimization of ancillary problems to increase the
+ efficiency for those reviewing the submitted code
+
+Role Definitions
+================
+
+Any code review process will have (at least) two involved parties. For
+our purposes, these parties are referred to as **Proposer** and
+**Reviewer**. (We also have the **Committer** role which is a special
+case of the **Reviewer** role.) The terms are defined here (and the
+use of the singular form is not meant to imply that they refer to a
+single person):
+
+Proposer
+ The person proposing a pull request (hereafter known as a PR).
+
+Reviewer
+ A person who is reviewing a PR.
+
+Committer
+ A cloud-init core developer (i.e. a person who has permission to
+ merge PRs into master).
+
+Prerequisites For Landing Pull Requests
+=======================================
+
+Before a PR can be landed into master, the following conditions *must*
+be met:
+
+* the CLA has been signed by the **Proposer** (or is covered by an
+ entity-level CLA signature)
+* all required status checks are passing
+* at least one "Approve" review from a **Committer**
+* no "Request changes" reviews from any **Committer**
+
+The following conditions *should* be met:
+
+* any Python functions/methods/classes have docstrings added/updated
+* any changes to config module behaviour are captured in the
+ documentation of the config module
+* any Python code added has corresponding unit tests
+* no "Request changes" reviews from any **Reviewer**
+
+These conditions can be relaxed at the discretion of the
+**Committers** on a case-by-case basis. Generally, for accountability,
+this should not be the decision of a single **Committer**, and the
+decision should be documented in comments on the PR.
+
+(To take a specific example, the ``cc_phone_home`` module had no tests
+at the time `PR #237
+<https://github.com/canonical/cloud-init/pull/237>`_ was submitted, so
+the **Proposer** was not expected to write a full set of tests for
+their minor modification, but they were expected to update the config
+module docs.)
+
+Non-Committer Reviews
+=====================
+
+Reviews from non-**Committers** are *always* welcome. Please feel
+empowered to review PRs and leave your thoughts and comments on any
+submitted PRs, regardless of the **Proposer**.
+
+Much of the below process is written in terms of the **Committers**.
+This is not intended to reflect that reviews should only come from that
+group, but acknowledges that we are ultimately responsible for
+maintaining the standards of the codebase. It would be entirely
+reasonable (and very welcome) for a **Reviewer** to only examine part
+of a PR, but it would not be appropriate for a **Committer** to merge a
+PR without full scrutiny.
+
+Opening Phase
+=============
+
+In this phase, the **Proposer** is responsible for opening a pull
+request and meeting the prerequisites laid out above.
+
+If they need help understanding the prerequisites, or help meeting the
+prerequisites, then they can (and should!) ask for help. See the
+:ref:`Asking For Help` section above for the ways to do that.
+
+These are the steps that comprise the opening phase:
+
+1. The **Proposer** opens PR
+
+2. CI runs automatically, and if
+
+ CI fails
+ The **Proposer** is expected to fix CI failures. If the
+ **Proposer** doesn't understand the nature of the failures they
+ are seeing, they should comment in the PR to request assistance,
+ or use another way of :ref:`Asking For Help`.
+
+ (Note that if assistance is not requested, the **Committers**
+ will assume that the **Proposer** is working on addressing the
+ failures themselves. If you require assistance, please do ask
+ for help!)
+
+ CI passes
+ Move on to the :ref:`Review phase`.
+
+Review Phase
+============
+
+In this phase, the **Proposer** and the **Reviewers** will iterate
+together to, hopefully, get the PR merged into the cloud-init codebase.
+There are three potential outcomes: merged, rejected permanently, and
+temporarily closed. (The first two are covered in this section; see
+:ref:`Inactive Pull Requests` for details about temporary closure.)
+
+(In the below, when the verbs "merge" or "squash merge" are used, they
+should be understood to mean "squash merged using the GitHub UI", which
+is the only way that changes can land in cloud-init's master branch.)
+
+These are the steps that comprise the review phase:
+
+1. **The Committers** assign a **Committer** to the PR
+
+ This **Committer** is expected to shepherd the PR to completion (and
+ merge it, if that is the outcome reached). This means that they
+ will perform an initial review, and monitor the PR to ensure that
+ the **Proposer** is receiving any assistance that they require. The
+ **Committers** will perform this assignment on a daily basis.
+
+ This assignment is intended to ensure that the **Proposer** has a
+ clear point of contact with a cloud-init core developer, and that
+ they get timely feedback after submitting a PR. It *is not*
+ intended to preclude reviews from any other **Reviewers**, nor to
+ imply that the **Committer** has ownership over the review process.
+
+ The assigned **Committer** may choose to delegate the code review of
+ a PR to another **Reviewer** if they think that they would be better
+ suited.
+
+ (Note that, in GitHub terms, this is setting an Assignee, not
+ requesting a review.)
+
+2. That **Committer** performs an initial review of the PR, resulting
+ in one of the following:
+
+ Approve
+ If the submitted PR meets all of the :ref:`Prerequisites for
+ Landing Pull Requests` and passes code review, then the
+ **Committer** will squash merge immediately.
+
+ There may be circumstances where a PR should not be merged
+ immediately. The ``wip`` label will be applied to PRs for which
+ this is true. Only **Committers** are able to apply labels to
+ PRs, so anyone who believes that this label should be applied to a
+ PR should request its application in a comment on the PR.
+
+ The review process is **DONE**.
+
+ Approve (with nits)
+ If the **Proposer** submits their PR with "Allow edits from
+ maintainer" enabled, and the only changes the **Committer**
+ requests are minor "nits", the **Committer** can push fixes for
+ those nits and *immediately* squash merge. If the **Committer**
+ does not wish to fix these nits but believes they should block a
+ straight-up Approve, then their review should be "Needs Changes"
+ instead.
+
+ A nit is understood to be something like a minor style issue or a
+ spelling error, generally confined to a single line of code.
+
+ If a **Committer** is unsure as to whether their requested change
+ is a nit, they should not treat it as a nit.
+
+ (If a **Proposer** wants to opt-out of this, then they should
+ uncheck "Allow edits from maintainer" when submitting their PR.)
+
+ The review process is **DONE**.
+
+ Outright rejection
+ The **Committer** will close the PR, with useful messaging for the
+ **Proposer** as to why this has happened.
+
+ This is reserved for cases where the proposed change is completely
+ unfit for landing, and there is no reasonable path forward. This
+ should only be used sparingly, as there are very few cases where
+ proposals are completely unfit.
+
+ If a different approach to the same problem is planned, it should
+ be submitted as a separate PR. The **Committer** should include
+ this information in their message when the PR is closed.
+
+ The review process is **DONE**.
+
+ Needs Changes
+ The **Committer** will give the **Proposer** a clear idea of what
+ is required for an Approve vote or, for more complex PRs, what the
+ next steps towards an Approve vote are.
+
+ The **Proposer** will ask questions if they don't understand, or
+ disagree with, the **Committer**'s review comments.
+
+ Once consensus has been reached, the **Proposer** will address the
+ review comments.
+
+ Once the review comments are addressed (as well as, potentially,
+ in the interim), CI will run. If CI fails, the **Proposer** is
+ expected to fix CI failures. If CI passes, the **Proposer**
+ should indicate that the PR is ready for re-review (by @ing the
+ assigned reviewer), effectively moving back to the start of this
+ section.
+
+Inactive Pull Requests
+======================
+
+PRs will be temporarily closed if they have been waiting on
+**Proposer** action for a certain amount of time without activity. A
+PR will be marked as stale (with an explanatory comment) after 14 days
+of inactivity. It will be closed after a further 7 days of inactivity.
+
+These closes are not considered permanent, and the closing message
+should reflect this for the **Proposer**. However, if a PR is reopened,
+it should effectively enter the :ref:`Opening phase` again, as it may
+need some work done to get CI passing again.
diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst
index 1427fb3d..fdb919a5 100644
--- a/doc/rtd/topics/datasources/azure.rst
+++ b/doc/rtd/topics/datasources/azure.rst
@@ -114,19 +114,19 @@ An example configuration with the default values is provided below:
.. sourcecode:: yaml
datasource:
- Azure:
- agent_command: __builtin__
- apply_network_config: true
- data_dir: /var/lib/waagent
- dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases
- disk_aliases:
+ Azure:
+ agent_command: __builtin__
+ apply_network_config: true
+ data_dir: /var/lib/waagent
+ dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases
+ disk_aliases:
ephemeral0: /dev/disk/cloud/azure_resource
- hostname_bounce:
+ hostname_bounce:
interface: eth0
command: builtin
policy: true
hostname_command: hostname
- set_hostname: true
+ set_hostname: true
Userdata
diff --git a/doc/rtd/topics/datasources/cloudstack.rst b/doc/rtd/topics/datasources/cloudstack.rst
index da183226..a24de34f 100644
--- a/doc/rtd/topics/datasources/cloudstack.rst
+++ b/doc/rtd/topics/datasources/cloudstack.rst
@@ -9,14 +9,20 @@ dhcp lease information given to the instance.
For more details on meta-data and user-data,
refer the `CloudStack Administrator Guide`_.
-URLs to access user-data and meta-data from the Virtual Machine. Here 10.1.1.1
-is the Virtual Router IP:
+URLs to access user-data and meta-data from the Virtual Machine.
+`data-server.` is a well-known hostname provided by the CloudStack virtual
+router that points to the next UserData server (which is usually also
+the virtual router).
.. code:: bash
- http://10.1.1.1/latest/user-data
- http://10.1.1.1/latest/meta-data
- http://10.1.1.1/latest/meta-data/{metadata type}
+ http://data-server./latest/user-data
+ http://data-server./latest/meta-data
+ http://data-server./latest/meta-data/{metadata type}
+
+If `data-server.` cannot be resolved, cloud-init will try to obtain the
+virtual router's address from the system's DHCP leases. If that fails,
+it will use the system's default gateway.
Configuration
-------------
@@ -37,11 +43,11 @@ An example configuration with the default values is provided below:
.. sourcecode:: yaml
datasource:
- CloudStack:
- max_wait: 120
- timeout: 50
- datasource_list:
- - CloudStack
+ CloudStack:
+ max_wait: 120
+ timeout: 50
+ datasource_list:
+ - CloudStack
.. _Apache CloudStack: http://cloudstack.apache.org/
diff --git a/doc/rtd/topics/datasources/ec2.rst b/doc/rtd/topics/datasources/ec2.rst
index a90f3779..274ca1e4 100644
--- a/doc/rtd/topics/datasources/ec2.rst
+++ b/doc/rtd/topics/datasources/ec2.rst
@@ -42,6 +42,7 @@ Note that there are multiple versions of this data provided, cloud-init
by default uses **2009-04-04** but newer versions can be supported with
relative ease (newer versions have more data exposed, while maintaining
backward compatibility with the previous versions).
+Version **2016-09-02** is required for secondary IP address support.
To see which versions are supported from your cloud provider use the following
URL:
@@ -80,16 +81,26 @@ The settings that may be configured are:
* **timeout**: the timeout value provided to urlopen for each individual http
request. This is used both when selecting a metadata_url and when crawling
the metadata service. (default: 50)
+ * **apply_full_imds_network_config**: Boolean (default: True) to allow
+ cloud-init to configure any secondary NICs and secondary IPs described by
+ the metadata service. All network interfaces are configured with DHCP (v4)
+ to obtain an primary IPv4 address and route. Interfaces which have a
+ non-empty 'ipv6s' list will also enable DHCPv6 to obtain a primary IPv6
+ address and route. The DHCP response (v4 and v6) return an IP that matches
+ the first element of local-ipv4s and ipv6s lists respectively. All
+ additional values (secondary addresses) in the static ip lists will be
+ added to interface.
An example configuration with the default values is provided below:
.. sourcecode:: yaml
datasource:
- Ec2:
- metadata_urls: ["http://169.254.169.254:80", "http://instance-data:8773"]
- max_wait: 120
- timeout: 50
+ Ec2:
+ metadata_urls: ["http://169.254.169.254:80", "http://instance-data:8773"]
+ max_wait: 120
+ timeout: 50
+ apply_full_imds_network_config: true
Notes
-----
@@ -102,4 +113,12 @@ Notes
The check for the instance type is performed by is_classic_instance()
method.
+ * For EC2 instances with multiple network interfaces (NICs) attached, dhcp4
+ will be enabled to obtain the primary private IPv4 address of those NICs.
+ Wherever dhcp4 or dhcp6 is enabled for a NIC, a dhcp route-metric will be
+ added with the value of ``<device-number + 1> * 100`` to ensure dhcp
+ routes on the primary NIC are preferred to any secondary NICs.
+ For example: the primary NIC will have a DHCP route-metric of 100,
+ the next NIC will be 200.
+
.. vi: textwidth=78
diff --git a/doc/rtd/topics/datasources/maas.rst b/doc/rtd/topics/datasources/maas.rst
index 85c853e9..427fba24 100644
--- a/doc/rtd/topics/datasources/maas.rst
+++ b/doc/rtd/topics/datasources/maas.rst
@@ -5,6 +5,6 @@ MAAS
*TODO*
-For now see: http://maas.ubuntu.com/
+For now see: https://maas.io/docs
diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst
index bc96f7fe..0ca79102 100644
--- a/doc/rtd/topics/datasources/nocloud.rst
+++ b/doc/rtd/topics/datasources/nocloud.rst
@@ -32,7 +32,7 @@ The permitted keys are:
With ``ds=nocloud``, the ``seedfrom`` value must start with ``/`` or
``file://``. With ``ds=nocloud-net``, the ``seedfrom`` value must start
-with ``http://``, ``https://`` or ``ftp://``
+with ``http://`` or ``https://``.
e.g. you can pass this option to QEMU:
@@ -53,6 +53,12 @@ These user-data and meta-data files are expected to be in the following format.
Basically, user-data is simply user-data and meta-data is a yaml formatted file
representing what you'd find in the EC2 metadata service.
+You may also optionally provide a vendor-data file in the following format.
+
+::
+
+ /vendor-data
+
Given a disk ubuntu 12.04 cloud image in 'disk.img', you can create a
sufficient disk by following the example below.
@@ -133,12 +139,12 @@ be network configuration based on the filename.
version: 2
ethernets:
interface0:
- match:
- mac_address: "52:54:00:12:34:00"
- set-name: interface0
- addresses:
- - 192.168.1.10/255.255.255.0
- gateway4: 192.168.1.254
+ match:
+ mac_address: "52:54:00:12:34:00"
+ set-name: interface0
+ addresses:
+ - 192.168.1.10/255.255.255.0
+ gateway4: 192.168.1.254
.. _iso9660: https://en.wikipedia.org/wiki/ISO_9660
diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst
index 8ce2a53d..b23b4b7c 100644
--- a/doc/rtd/topics/datasources/openstack.rst
+++ b/doc/rtd/topics/datasources/openstack.rst
@@ -5,7 +5,7 @@ OpenStack
This datasource supports reading data from the
`OpenStack Metadata Service
-<https://docs.openstack.org/nova/latest/admin/networking-nova.html#metadata-service>`_.
+<https://docs.openstack.org/nova/latest/admin/metadata-service.html>`_.
Discovery
-------------
@@ -19,7 +19,8 @@ checks the following environment attributes as a potential OpenStack platform:
* **/proc/1/environ**: Nova-lxd contains *product_name=OpenStack Nova*
* **DMI product_name**: Either *Openstack Nova* or *OpenStack Compute*
- * **DMI chassis_asset_tag** is *OpenTelekomCloud*
+ * **DMI chassis_asset_tag** is *OpenTelekomCloud*, *SAP CCloud VM*,
+ *OpenStack Nova* (since 19.2) or *OpenStack Compute* (since 19.2)
Configuration
@@ -50,12 +51,12 @@ An example configuration with the default values is provided below:
.. sourcecode:: yaml
datasource:
- OpenStack:
- metadata_urls: ["http://169.254.169.254"]
- max_wait: -1
- timeout: 10
- retries: 5
- apply_network_config: True
+ OpenStack:
+ metadata_urls: ["http://169.254.169.254"]
+ max_wait: -1
+ timeout: 10
+ retries: 5
+ apply_network_config: True
Vendor Data
diff --git a/doc/rtd/topics/datasources/ovf.rst b/doc/rtd/topics/datasources/ovf.rst
index c312617f..6256e624 100644
--- a/doc/rtd/topics/datasources/ovf.rst
+++ b/doc/rtd/topics/datasources/ovf.rst
@@ -11,4 +11,23 @@ transport.
For further information see a full working example in cloud-init's
source code tree in doc/sources/ovf
+Configuration
+-------------
+On VMware platforms, VMTools use is required for OVF datasource configuration
+settings as well as vCloud and vSphere admin configuration. User could change
+the VMTools configuration options with command::
+
+ vmware-toolbox-cmd config set <section> <key> <value>
+
+The following VMTools configuration options affect cloud-init's behavior on a booted VM:
+ * a: [deploypkg] enable-custom-scripts
+ If this option is absent in VMTools configuration, the custom script is
+ disabled by default for security reasons. Some VMware products could
+ change this default behavior (for example: enabled by default) via
+ customization specification settings.
+
+VMWare admin can refer to (https://github.com/canonical/cloud-init/blob/master/cloudinit/sources/helpers/vmware/imc/config.py) and set the customization specification settings.
+
+For more information, see [VMware vSphere Product Documentation](https://docs.vmware.com/en/VMware-vSphere/7.0/com.vmware.vsphere.vm_admin.doc/GUID-9A5093A5-C54F-4502-941B-3F9C0F573A39.html) and specific VMTools parameters consumed.
+
.. vi: textwidth=78
diff --git a/doc/rtd/topics/faq.rst b/doc/rtd/topics/faq.rst
index 98c0cfaa..aa1be142 100644
--- a/doc/rtd/topics/faq.rst
+++ b/doc/rtd/topics/faq.rst
@@ -104,6 +104,23 @@ The force parameter allows the command to be run again since the instance has
already launched. The other options increase the verbosity of logging and
put the logs to STDERR.
+How can I re-run datasource detection and cloud-init?
+=====================================================
+
+If a user is developing a new datasource or working on debugging an issue it
+may be useful to re-run datasource detection and the initial setup of
+cloud-init.
+
+To do this, force ds-identify to re-run, clean up any logs, and re-run
+cloud-init:
+
+.. code-block:: shell-session
+
+ $ sudo DI_LOG=stderr /usr/lib/cloud-init/ds-identify --force
+ $ sudo cloud-init clean --logs
+ $ sudo cloud-init init --local
+ $ sudo cloud-init init
+
How can I debug my user data?
=============================
@@ -206,8 +223,8 @@ values or the LXD `Custom Network Configuration`_ document for more about
custom network config.
.. _LXD: https://linuxcontainers.org/
-.. _Instance Configuration: https://lxd.readthedocs.io/en/latest/instances/
-.. _Custom Network Configuration: https://lxd.readthedocs.io/en/latest/cloud-init/
+.. _Instance Configuration: https://linuxcontainers.org/lxd/docs/master/instances
+.. _Custom Network Configuration: https://linuxcontainers.org/lxd/docs/master/cloud-init
Where can I learn more?
========================================
diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst
index 2b60bdd3..d03e4caf 100644
--- a/doc/rtd/topics/format.rst
+++ b/doc/rtd/topics/format.rst
@@ -23,44 +23,35 @@ Using a mime-multi part file, the user can specify more than one type of data.
For example, both a user data script and a cloud-config type could be
specified.
-Supported content-types:
+Supported content-types are listed from the cloud-init subcommand make-mime::
-- text/cloud-boothook
-- text/cloud-config
-- text/cloud-config-archive
-- text/jinja2
-- text/part-handler
-- text/upstart-job
-- text/x-include-once-url
-- text/x-include-url
-- text/x-shellscript
+ % cloud-init devel make-mime --list-types
+ cloud-boothook
+ cloud-config
+ cloud-config-archive
+ cloud-config-jsonp
+ jinja2
+ part-handler
+ upstart-job
+ x-include-once-url
+ x-include-url
+ x-shellscript
-Helper script to generate mime messages
----------------------------------------
-.. code-block:: python
-
- #!/usr/bin/python
-
- import sys
+Helper subcommand to generate mime messages
+-------------------------------------------
- from email.mime.multipart import MIMEMultipart
- from email.mime.text import MIMEText
+The cloud-init subcommand can generate MIME multi-part files: `make-mime`_.
- if len(sys.argv) == 1:
- print("%s input-file:type ..." % (sys.argv[0]))
- sys.exit(1)
+``make-mime`` subcommand takes pairs of (filename, "text/" mime subtype)
+separated by a colon (e.g. ``config.yaml:cloud-config``) and emits a MIME
+multipart message to stdout. An example invocation, assuming you have your
+cloud config in ``config.yaml`` and a shell script in ``script.sh`` and want
+to store the multipart message in ``user-data``::
- combined_message = MIMEMultipart()
- for i in sys.argv[1:]:
- (filename, format_type) = i.split(":", 1)
- with open(filename) as fh:
- contents = fh.read()
- sub_message = MIMEText(contents, format_type, sys.getdefaultencoding())
- sub_message.add_header('Content-Disposition', 'attachment; filename="%s"' % (filename))
- combined_message.attach(sub_message)
+ % cloud-init devel make-mime -a config.yaml:cloud-config -a script.sh:x-shellscript > user-data
- print(combined_message)
+.. _make-mime: https://github.com/canonical/cloud-init/blob/master/cloudinit/cmd/devel/make_mime.py
User-Data Script
@@ -126,7 +117,7 @@ Begins with: ``#cloud-config`` or ``Content-Type: text/cloud-config`` when
using a MIME archive.
.. note::
- New in cloud-init v. 18.4: Cloud config dta can also render cloud instance
+ New in cloud-init v. 18.4: Cloud config data can also render cloud instance
metadata variables using jinja templating. See
:ref:`instance_metadata` for more information.
diff --git a/doc/rtd/topics/instancedata.rst b/doc/rtd/topics/instancedata.rst
index e7dd0d62..255245a4 100644
--- a/doc/rtd/topics/instancedata.rst
+++ b/doc/rtd/topics/instancedata.rst
@@ -76,6 +76,11 @@ There are three basic top-level keys:
'security sensitive'. Only the keys listed here will be redacted from
instance-data.json for non-root users.
+* **merged_cfg**: Merged cloud-init 'system_config' from `/etc/cloud/cloud.cfg`
+ and `/etc/cloud/cloud-cfg.d`. Values under this key could contain sensitive
+ information such as passwords, so it is included in the **sensitive-keys**
+ list which is only readable by root.
+
* **ds**: Datasource-specific metadata crawled for the specific cloud
platform. It should closely represent the structure of the cloud metadata
crawled. The structure of content and details provided are entirely
@@ -83,6 +88,9 @@ There are three basic top-level keys:
The content exposed under the 'ds' key is currently **experimental** and
expected to change slightly in the upcoming cloud-init release.
+* **sys_info**: Information about the underlying os, python, architecture and
+ kernel. This represents the data collected by `cloudinit.util.system_info`.
+
* **v1**: Standardized cloud-init metadata keys, these keys are guaranteed to
exist on all cloud platforms. They will also retain their current behavior
and format and will be carried forward even if cloud-init introduces a new
@@ -103,7 +111,7 @@ v1.cloud_name
-------------
Where possible this will indicate the 'name' of the cloud the system is running
on. This is different than the 'platform' item. For example, the cloud name of
-Amazone Web Services is 'aws', while the platform is 'ec2'.
+Amazon Web Services is 'aws', while the platform is 'ec2'.
If determining a specific name is not possible or provided in meta-data, then
this filed may contain the same content as 'platform'.
@@ -117,6 +125,22 @@ Example output:
- nocloud
- ovf
+v1.distro, v1.distro_version, v1.distro_release
+-----------------------------------------------
+This shall be the distro name, version and release as determined by
+`cloudinit.util.get_linux_distro`.
+
+Example output:
+
+- alpine, 3.12.0, ''
+- centos, 7.5, core
+- debian, 9, stretch
+- freebsd, 12.0-release-p10,
+- opensuse, 42.3, x86_64
+- opensuse-tumbleweed, 20180920, x86_64
+- redhat, 7.5, 'maipo'
+- sles, 12.3, x86_64
+- ubuntu, 20.04, focal
v1.instance_id
--------------
@@ -126,6 +150,14 @@ Examples output:
- i-<hash>
+v1.kernel_release
+-----------------
+This shall be the running kernel `uname -r`
+
+Example output:
+
+- 5.3.0-1010-aws
+
v1.local_hostname
-----------------
The internal or local hostname of the system.
@@ -135,6 +167,17 @@ Examples output:
- ip-10-41-41-70
- <user-provided-hostname>
+v1.machine
+----------
+This shall be the running cpu machine architecture `uname -m`
+
+Example output:
+
+- x86_64
+- i686
+- ppc64le
+- s390x
+
v1.platform
-------------
An attempt to identify the cloud platfrom instance that the system is running
@@ -154,7 +197,7 @@ v1.subplatform
Additional platform details describing the specific source or type of metadata
used. The format of subplatform will be:
-``<subplatform_type> (<url_file_or_dev_path>``
+``<subplatform_type> (<url_file_or_dev_path>)``
Examples output:
@@ -171,6 +214,15 @@ Examples output:
- ['ssh-rsa AA...', ...]
+v1.python_version
+-----------------
+The version of python that is running cloud-init as determined by
+`cloudinit.util.system_info`
+
+Example output:
+
+- 3.7.6
+
v1.region
---------
The physical region/data center in which the instance is deployed.
@@ -192,164 +244,265 @@ Examples output:
Example Output
--------------
-Below is an example of ``/run/cloud-init/instance_data.json`` on an EC2
-instance:
+Below is an example of ``/run/cloud-init/instance-data-sensitive.json`` on an
+EC2 instance:
.. sourcecode:: json
{
+ "_beta_keys": [
+ "subplatform"
+ ],
+ "availability_zone": "us-east-1b",
"base64_encoded_keys": [],
+ "merged_cfg": {
+ "_doc": "Merged cloud-init system config from /etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/",
+ "_log": [
+ "[loggers]\nkeys=root,cloudinit\n\n[handlers]\nkeys=consoleHandler,cloudLogHandler\n\n[formatters]\nkeys=simpleFormatter,arg0Formatter\n\n[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler,cloudLogHandler\n\n[logger_cloudinit]\nlevel=DEBUG\nqualname=cloudinit\nhandlers=\npropagate=1\n\n[handler_consoleHandler]\nclass=StreamHandler\nlevel=WARNING\nformatter=arg0Formatter\nargs=(sys.stderr,)\n\n[formatter_arg0Formatter]\nformat=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s\n\n[formatter_simpleFormatter]\nformat=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s\n",
+ "[handler_cloudLogHandler]\nclass=FileHandler\nlevel=DEBUG\nformatter=arg0Formatter\nargs=('/var/log/cloud-init.log',)\n",
+ "[handler_cloudLogHandler]\nclass=handlers.SysLogHandler\nlevel=DEBUG\nformatter=simpleFormatter\nargs=(\"/dev/log\", handlers.SysLogHandler.LOG_USER)\n"
+ ],
+ "cloud_config_modules": [
+ "emit_upstart",
+ "snap",
+ "ssh-import-id",
+ "locale",
+ "set-passwords",
+ "grub-dpkg",
+ "apt-pipelining",
+ "apt-configure",
+ "ubuntu-advantage",
+ "ntp",
+ "timezone",
+ "disable-ec2-metadata",
+ "runcmd",
+ "byobu"
+ ],
+ "cloud_final_modules": [
+ "package-update-upgrade-install",
+ "fan",
+ "landscape",
+ "lxd",
+ "ubuntu-drivers",
+ "puppet",
+ "chef",
+ "mcollective",
+ "salt-minion",
+ "rightscale_userdata",
+ "scripts-vendor",
+ "scripts-per-once",
+ "scripts-per-boot",
+ "scripts-per-instance",
+ "scripts-user",
+ "ssh-authkey-fingerprints",
+ "keys-to-console",
+ "phone-home",
+ "final-message",
+ "power-state-change"
+ ],
+ "cloud_init_modules": [
+ "migrator",
+ "seed_random",
+ "bootcmd",
+ "write-files",
+ "growpart",
+ "resizefs",
+ "disk_setup",
+ "mounts",
+ "set_hostname",
+ "update_hostname",
+ "update_etc_hosts",
+ "ca-certs",
+ "rsyslog",
+ "users-groups",
+ "ssh"
+ ],
+ "datasource_list": [
+ "Ec2",
+ "None"
+ ],
+ "def_log_file": "/var/log/cloud-init.log",
+ "disable_root": true,
+ "log_cfgs": [
+ [
+ "[loggers]\nkeys=root,cloudinit\n\n[handlers]\nkeys=consoleHandler,cloudLogHandler\n\n[formatters]\nkeys=simpleFormatter,arg0Formatter\n\n[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler,cloudLogHandler\n\n[logger_cloudinit]\nlevel=DEBUG\nqualname=cloudinit\nhandlers=\npropagate=1\n\n[handler_consoleHandler]\nclass=StreamHandler\nlevel=WARNING\nformatter=arg0Formatter\nargs=(sys.stderr,)\n\n[formatter_arg0Formatter]\nformat=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s\n\n[formatter_simpleFormatter]\nformat=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s\n",
+ "[handler_cloudLogHandler]\nclass=FileHandler\nlevel=DEBUG\nformatter=arg0Formatter\nargs=('/var/log/cloud-init.log',)\n"
+ ]
+ ],
+ "output": {
+ "all": "| tee -a /var/log/cloud-init-output.log"
+ },
+ "preserve_hostname": false,
+ "syslog_fix_perms": [
+ "syslog:adm",
+ "root:adm",
+ "root:wheel",
+ "root:root"
+ ],
+ "users": [
+ "default"
+ ],
+ "vendor_data": {
+ "enabled": true,
+ "prefix": []
+ }
+ },
+ "cloud_name": "aws",
+ "distro": "ubuntu",
+ "distro_release": "focal",
+ "distro_version": "20.04",
"ds": {
"_doc": "EXPERIMENTAL: The structure and format of content scoped under the 'ds' key may change in subsequent releases of cloud-init.",
"_metadata_api_version": "2016-09-02",
"dynamic": {
- "instance-identity": {
+ "instance_identity": {
"document": {
- "accountId": "437526006925",
+ "accountId": "329910648901",
"architecture": "x86_64",
- "availabilityZone": "us-east-2b",
+ "availabilityZone": "us-east-1b",
"billingProducts": null,
"devpayProductCodes": null,
- "imageId": "ami-079638aae7046bdd2",
- "instanceId": "i-075f088c72ad3271c",
+ "imageId": "ami-02e8aa396f8be3b6d",
+ "instanceId": "i-0929128ff2f73a2f1",
"instanceType": "t2.micro",
"kernelId": null,
"marketplaceProductCodes": null,
- "pendingTime": "2018-10-05T20:10:43Z",
- "privateIp": "10.41.41.95",
+ "pendingTime": "2020-02-27T20:46:18Z",
+ "privateIp": "172.31.81.43",
"ramdiskId": null,
- "region": "us-east-2",
+ "region": "us-east-1",
"version": "2017-09-30"
},
"pkcs7": [
- "MIAGCSqGSIb3DQEHAqCAMIACAQExCzAJBgUrDgMCGgUAMIAGCSqGSIb3DQEHAaCAJIAEggHbewog",
- "ICJkZXZwYXlQcm9kdWN0Q29kZXMiIDogbnVsbCwKICAibWFya2V0cGxhY2VQcm9kdWN0Q29kZXMi",
- "IDogbnVsbCwKICAicHJpdmF0ZUlwIiA6ICIxMC40MS40MS45NSIsCiAgInZlcnNpb24iIDogIjIw",
- "MTctMDktMzAiLAogICJpbnN0YW5jZUlkIiA6ICJpLTA3NWYwODhjNzJhZDMyNzFjIiwKICAiYmls",
- "bGluZ1Byb2R1Y3RzIiA6IG51bGwsCiAgImluc3RhbmNlVHlwZSIgOiAidDIubWljcm8iLAogICJh",
- "Y2NvdW50SWQiIDogIjQzNzUyNjAwNjkyNSIsCiAgImF2YWlsYWJpbGl0eVpvbmUiIDogInVzLWVh",
- "c3QtMmIiLAogICJrZXJuZWxJZCIgOiBudWxsLAogICJyYW1kaXNrSWQiIDogbnVsbCwKICAiYXJj",
- "aGl0ZWN0dXJlIiA6ICJ4ODZfNjQiLAogICJpbWFnZUlkIiA6ICJhbWktMDc5NjM4YWFlNzA0NmJk",
- "ZDIiLAogICJwZW5kaW5nVGltZSIgOiAiMjAxOC0xMC0wNVQyMDoxMDo0M1oiLAogICJyZWdpb24i",
- "IDogInVzLWVhc3QtMiIKfQAAAAAAADGCARcwggETAgEBMGkwXDELMAkGA1UEBhMCVVMxGTAXBgNV",
- "BAgTEFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0FtYXpvbiBX",
- "ZWIgU2VydmljZXMgTExDAgkAlrpI2eVeGmcwCQYFKw4DAhoFAKBdMBgGCSqGSIb3DQEJAzELBgkq",
- "hkiG9w0BBwEwHAYJKoZIhvcNAQkFMQ8XDTE4MTAwNTIwMTA0OFowIwYJKoZIhvcNAQkEMRYEFK0k",
- "Tz6n1A8/zU1AzFj0riNQORw2MAkGByqGSM44BAMELjAsAhRNrr174y98grPBVXUforN/6wZp8AIU",
- "JLZBkrB2GJA8A4WJ1okq++jSrBIAAAAAAAA="
+ "MIAGCSqGSIb3DQ...",
+ "REDACTED",
+ "AhQUgq0iPWqPTVnT96tZE6L1XjjLHQAAAAAAAA=="
],
"rsa2048": [
- "MIAGCSqGSIb3DQEHAqCAMIACAQExDzANBglghkgBZQMEAgEFADCABgkqhkiG9w0BBwGggCSABIIB",
- "23sKICAiZGV2cGF5UHJvZHVjdENvZGVzIiA6IG51bGwsCiAgIm1hcmtldHBsYWNlUHJvZHVjdENv",
- "ZGVzIiA6IG51bGwsCiAgInByaXZhdGVJcCIgOiAiMTAuNDEuNDEuOTUiLAogICJ2ZXJzaW9uIiA6",
- "ICIyMDE3LTA5LTMwIiwKICAiaW5zdGFuY2VJZCIgOiAiaS0wNzVmMDg4YzcyYWQzMjcxYyIsCiAg",
- "ImJpbGxpbmdQcm9kdWN0cyIgOiBudWxsLAogICJpbnN0YW5jZVR5cGUiIDogInQyLm1pY3JvIiwK",
- "ICAiYWNjb3VudElkIiA6ICI0Mzc1MjYwMDY5MjUiLAogICJhdmFpbGFiaWxpdHlab25lIiA6ICJ1",
- "cy1lYXN0LTJiIiwKICAia2VybmVsSWQiIDogbnVsbCwKICAicmFtZGlza0lkIiA6IG51bGwsCiAg",
- "ImFyY2hpdGVjdHVyZSIgOiAieDg2XzY0IiwKICAiaW1hZ2VJZCIgOiAiYW1pLTA3OTYzOGFhZTcw",
- "NDZiZGQyIiwKICAicGVuZGluZ1RpbWUiIDogIjIwMTgtMTAtMDVUMjA6MTA6NDNaIiwKICAicmVn",
- "aW9uIiA6ICJ1cy1lYXN0LTIiCn0AAAAAAAAxggH/MIIB+wIBATBpMFwxCzAJBgNVBAYTAlVTMRkw",
- "FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6",
- "b24gV2ViIFNlcnZpY2VzIExMQwIJAM07oeX4xevdMA0GCWCGSAFlAwQCAQUAoGkwGAYJKoZIhvcN",
- "AQkDMQsGCSqGSIb3DQEHATAcBgkqhkiG9w0BCQUxDxcNMTgxMDA1MjAxMDQ4WjAvBgkqhkiG9w0B",
- "CQQxIgQgkYz0pZk3zJKBi4KP4egeOKJl/UYwu5UdE7id74pmPwMwDQYJKoZIhvcNAQEBBQAEggEA",
- "dC3uIGGNul1OC1mJKSH3XoBWsYH20J/xhIdftYBoXHGf2BSFsrs9ZscXd2rKAKea4pSPOZEYMXgz",
- "lPuT7W0WU89N3ZKviy/ReMSRjmI/jJmsY1lea6mlgcsJXreBXFMYucZvyeWGHdnCjamoKWXkmZlM",
- "mSB1gshWy8Y7DzoKviYPQZi5aI54XK2Upt4kGme1tH1NI2Cq+hM4K+adxTbNhS3uzvWaWzMklUuU",
- "QHX2GMmjAVRVc8vnA8IAsBCJJp+gFgYzi09IK+cwNgCFFPADoG6jbMHHf4sLB3MUGpiA+G9JlCnM",
- "fmkjI2pNRB8spc0k4UG4egqLrqCz67WuK38tjwAAAAAAAA=="
+ "MIAGCSqGSIb...",
+ "REDACTED",
+ "clYQvuE45xXm7Yreg3QtQbrP//owl1eZHj6s350AAAAAAAA="
],
"signature": [
- "Tsw6h+V3WnxrNVSXBYIOs1V4j95YR1mLPPH45XnhX0/Ei3waJqf7/7EEKGYP1Cr4PTYEULtZ7Mvf",
- "+xJpM50Ivs2bdF7o0c4vnplRWe3f06NI9pv50dr110j/wNzP4MZ1pLhJCqubQOaaBTF3LFutgRrt",
- "r4B0mN3p7EcqD8G+ll0="
+ "dA+QV+LLCWCRNddnrKleYmh2GvYo+t8urDkdgmDSsPi",
+ "REDACTED",
+ "kDT4ygyJLFkd3b4qjAs="
]
}
},
- "meta-data": {
- "ami-id": "ami-079638aae7046bdd2",
- "ami-launch-index": "0",
- "ami-manifest-path": "(unknown)",
- "block-device-mapping": {
+ "meta_data": {
+ "ami_id": "ami-02e8aa396f8be3b6d",
+ "ami_launch_index": "0",
+ "ami_manifest_path": "(unknown)",
+ "block_device_mapping": {
"ami": "/dev/sda1",
- "ephemeral0": "sdb",
- "ephemeral1": "sdc",
"root": "/dev/sda1"
},
- "hostname": "ip-10-41-41-95.us-east-2.compute.internal",
- "instance-action": "none",
- "instance-id": "i-075f088c72ad3271c",
- "instance-type": "t2.micro",
- "local-hostname": "ip-10-41-41-95.us-east-2.compute.internal",
- "local-ipv4": "10.41.41.95",
- "mac": "06:74:8f:39:cd:a6",
+ "hostname": "ip-172-31-81-43.ec2.internal",
+ "instance_action": "none",
+ "instance_id": "i-0929128ff2f73a2f1",
+ "instance_type": "t2.micro",
+ "local_hostname": "ip-172-31-81-43.ec2.internal",
+ "local_ipv4": "172.31.81.43",
+ "mac": "12:7e:c9:93:29:af",
"metrics": {
"vhostmd": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
},
"network": {
"interfaces": {
"macs": {
- "06:74:8f:39:cd:a6": {
- "device-number": "0",
- "interface-id": "eni-052058bbd7831eaae",
- "ipv4-associations": {
- "18.218.221.122": "10.41.41.95"
- },
- "local-hostname": "ip-10-41-41-95.us-east-2.compute.internal",
- "local-ipv4s": "10.41.41.95",
- "mac": "06:74:8f:39:cd:a6",
- "owner-id": "437526006925",
- "public-hostname": "ec2-18-218-221-122.us-east-2.compute.amazonaws.com",
- "public-ipv4s": "18.218.221.122",
- "security-group-ids": "sg-828247e9",
- "security-groups": "Cloud-init integration test secgroup",
- "subnet-id": "subnet-282f3053",
- "subnet-ipv4-cidr-block": "10.41.41.0/24",
- "subnet-ipv6-cidr-blocks": "2600:1f16:b80:ad00::/64",
- "vpc-id": "vpc-252ef24d",
- "vpc-ipv4-cidr-block": "10.41.0.0/16",
- "vpc-ipv4-cidr-blocks": "10.41.0.0/16",
- "vpc-ipv6-cidr-blocks": "2600:1f16:b80:ad00::/56"
- }
+ "12:7e:c9:93:29:af": {
+ "device_number": "0",
+ "interface_id": "eni-0c07a0474339b801d",
+ "ipv4_associations": {
+ "3.89.187.177": "172.31.81.43"
+ },
+ "local_hostname": "ip-172-31-81-43.ec2.internal",
+ "local_ipv4s": "172.31.81.43",
+ "mac": "12:7e:c9:93:29:af",
+ "owner_id": "329910648901",
+ "public_hostname": "ec2-3-89-187-177.compute-1.amazonaws.com",
+ "public_ipv4s": "3.89.187.177",
+ "security_group_ids": "sg-0100038b68aa79986",
+ "security_groups": "launch-wizard-3",
+ "subnet_id": "subnet-04e2d12a",
+ "subnet_ipv4_cidr_block": "172.31.80.0/20",
+ "vpc_id": "vpc-210b4b5b",
+ "vpc_ipv4_cidr_block": "172.31.0.0/16",
+ "vpc_ipv4_cidr_blocks": "172.31.0.0/16"
+ }
}
}
},
"placement": {
- "availability-zone": "us-east-2b"
+ "availability_zone": "us-east-1b"
},
"profile": "default-hvm",
- "public-hostname": "ec2-18-218-221-122.us-east-2.compute.amazonaws.com",
- "public-ipv4": "18.218.221.122",
- "public-keys": {
- "cloud-init-integration": [
- "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL7uWGj8cgWyIOaspgKdVy0cKJ+UTjfv7jBOjG2H/GN8bJVXy72XAvnhM0dUM+CCs8FOf0YlPX+Frvz2hKInrmRhZVwRSL129PasD12MlI3l44u6IwS1o/W86Q+tkQYEljtqDOo0a+cOsaZkvUNzUyEXUwz/lmYa6G4hMKZH4NBj7nbAAF96wsMCoyNwbWryBnDYUr6wMbjRR1J9Pw7Xh7WRC73wy4Va2YuOgbD3V/5ZrFPLbWZW/7TFXVrql04QVbyei4aiFR5n//GvoqwQDNe58LmbzX/xvxyKJYdny2zXmdAhMxbrpFQsfpkJ9E/H5w0yOdSvnWbUoG5xNGoOB cloud-init-integration"
- ]
- },
- "reservation-id": "r-0594a20e31f6cfe46",
- "security-groups": "Cloud-init integration test secgroup",
+ "public_hostname": "ec2-3-89-187-177.compute-1.amazonaws.com",
+ "public_ipv4": "3.89.187.177",
+ "reservation_id": "r-0c481643d15766a02",
+ "security_groups": "launch-wizard-3",
"services": {
"domain": "amazonaws.com",
"partition": "aws"
}
}
},
+ "instance_id": "i-0929128ff2f73a2f1",
+ "kernel_release": "5.3.0-1010-aws",
+ "local_hostname": "ip-172-31-81-43",
+ "machine": "x86_64",
+ "platform": "ec2",
+ "public_ssh_keys": [],
+ "python_version": "3.7.6",
+ "region": "us-east-1",
"sensitive_keys": [],
+ "subplatform": "metadata (http://169.254.169.254)",
+ "sys_info": {
+ "dist": [
+ "ubuntu",
+ "20.04",
+ "focal"
+ ],
+ "platform": "Linux-5.3.0-1010-aws-x86_64-with-Ubuntu-20.04-focal",
+ "python": "3.7.6",
+ "release": "5.3.0-1010-aws",
+ "system": "Linux",
+ "uname": [
+ "Linux",
+ "ip-172-31-81-43",
+ "5.3.0-1010-aws",
+ "#11-Ubuntu SMP Thu Jan 16 07:59:32 UTC 2020",
+ "x86_64",
+ "x86_64"
+ ],
+ "variant": "ubuntu"
+ },
+ "system_platform": "Linux-5.3.0-1010-aws-x86_64-with-Ubuntu-20.04-focal",
+ "userdata": "#cloud-config\nssh_import_id: [<my-launchpad-id>]\n...",
"v1": {
"_beta_keys": [
"subplatform"
],
- "availability-zone": "us-east-2b",
- "availability_zone": "us-east-2b",
+ "availability_zone": "us-east-1b",
"cloud_name": "aws",
- "instance_id": "i-075f088c72ad3271c",
- "local_hostname": "ip-10-41-41-95",
+ "distro": "ubuntu",
+ "distro_release": "focal",
+ "distro_version": "20.04",
+ "instance_id": "i-0929128ff2f73a2f1",
+ "kernel": "5.3.0-1010-aws",
+ "local_hostname": "ip-172-31-81-43",
+ "machine": "x86_64",
"platform": "ec2",
- "public_ssh_keys": [
- "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL7uWGj8cgWyIOaspgKdVy0cKJ+UTjfv7jBOjG2H/GN8bJVXy72XAvnhM0dUM+CCs8FOf0YlPX+Frvz2hKInrmRhZVwRSL129PasD12MlI3l44u6IwS1o/W86Q+tkQYEljtqDOo0a+cOsaZkvUNzUyEXUwz/lmYa6G4hMKZH4NBj7nbAAF96wsMCoyNwbWryBnDYUr6wMbjRR1J9Pw7Xh7WRC73wy4Va2YuOgbD3V/5ZrFPLbWZW/7TFXVrql04QVbyei4aiFR5n//GvoqwQDNe58LmbzX/xvxyKJYdny2zXmdAhMxbrpFQsfpkJ9E/H5w0yOdSvnWbUoG5xNGoOB cloud-init-integration"
- ],
- "region": "us-east-2",
- "subplatform": "metadata (http://169.254.169.254)"
- }
+ "public_ssh_keys": [],
+ "python": "3.7.6",
+ "region": "us-east-1",
+ "subplatform": "metadata (http://169.254.169.254)",
+ "system_platform": "Linux-5.3.0-1010-aws-x86_64-with-Ubuntu-20.04-focal",
+ "variant": "ubuntu"
+ },
+ "variant": "ubuntu",
+ "vendordata": ""
}
diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst
index 9c9be804..e30fe0fe 100644
--- a/doc/rtd/topics/modules.rst
+++ b/doc/rtd/topics/modules.rst
@@ -6,6 +6,7 @@ Modules
*******
.. contents:: Table of Contents
+.. automodule:: cloudinit.config.cc_apk_configure
.. automodule:: cloudinit.config.cc_apt_configure
.. automodule:: cloudinit.config.cc_apt_pipelining
.. automodule:: cloudinit.config.cc_bootcmd
diff --git a/doc/rtd/topics/network-config-format-v2.rst b/doc/rtd/topics/network-config-format-v2.rst
index 7f857550..c93e29be 100644
--- a/doc/rtd/topics/network-config-format-v2.rst
+++ b/doc/rtd/topics/network-config-format-v2.rst
@@ -50,9 +50,8 @@ currently being defined.
There are two physically/structurally different classes of device definitions,
and the ID field has a different interpretation for each:
-Physical devices
-
-: (Examples: ethernet, wifi) These can dynamically come and go between
+Physical devices (Examples: ethernet, wifi):
+ These can dynamically come and go between
reboots and even during runtime (hotplugging). In the generic case, they
can be selected by ``match:`` rules on desired properties, such as
name/name pattern, MAC address, driver, or device paths. In general these
@@ -69,9 +68,8 @@ Physical devices
which is only being used for references from definitions of compound
devices in the config.
-Virtual devices
-
-: (Examples: veth, bridge, bond) These are fully under the control of the
+Virtual devices (Examples: veth, bridge, bond):
+ These are fully under the control of the
config file(s) and the network stack. I. e. these devices are being created
instead of matched. Thus ``match:`` and ``set-name:`` are not applicable for
these, and the ID field is the name of the created virtual device.
diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst
index 1520ba9a..08db04d8 100644
--- a/doc/rtd/topics/network-config.rst
+++ b/doc/rtd/topics/network-config.rst
@@ -25,17 +25,23 @@ For example, OpenStack may provide network config in the MetaData Service.
**System Config**
-A ``network:`` entry in /etc/cloud/cloud.cfg.d/* configuration files.
+A ``network:`` entry in ``/etc/cloud/cloud.cfg.d/*`` configuration files.
**Kernel Command Line**
-``ip=`` or ``network-config=<YAML config string>``
+``ip=`` or ``network-config=<Base64 encoded YAML config string>``
User-data cannot change an instance's network configuration. In the absence
of network configuration in any of the above sources , `Cloud-init`_ will
write out a network configuration that will issue a DHCP request on a "first"
network interface.
+.. note::
+
+ The network-config value is expected to be a Base64 encoded YAML string in
+ :ref:`network_config_v1` or :ref:`network_config_v2` format. Optionally it
+ can be compressed with ``gzip`` prior to Base64 encoding.
+
Disabling Network Configuration
===============================
@@ -48,19 +54,19 @@ on other methods, such as embedded configuration or other customizations.
**Kernel Command Line**
-`Cloud-init`_ will check for a parameter ``network-config`` and the
-value is expected to be YAML string in the :ref:`network_config_v1` format.
-The YAML string may optionally be ``Base64`` encoded, and optionally
-compressed with ``gzip``.
+`Cloud-init`_ will check additionally check for the parameter
+``network-config=disabled`` which will automatically disable any network
+configuration.
Example disabling kernel command line entry: ::
- network-config={config: disabled}
+ network-config=disabled
**cloud config**
-In the combined cloud-init configuration dictionary. ::
+In the combined cloud-init configuration dictionary, merged from
+``/etc/cloud/cloud.cfg`` and ``/etc/cloud/cloud.cfg.d/*``::
network:
config: disabled
@@ -159,7 +165,7 @@ supported formats. The following ``renderers`` are supported in cloud-init:
- **ENI**
/etc/network/interfaces or ``ENI`` is supported by the ``ifupdown`` package
-found in Ubuntu and Debian.
+found in Alpine Linux, Debian and Ubuntu.
- **Netplan**
@@ -191,7 +197,7 @@ supplying an updated configuration in cloud-config. ::
system_info:
network:
- renderers: ['netplan', 'eni', 'sysconfig', 'freebsd']
+ renderers: ['netplan', 'eni', 'sysconfig', 'freebsd', 'netbsd', 'openbsd']
Network Configuration Tools
diff --git a/doc/rtd/topics/tests.rst b/doc/rtd/topics/tests.rst
index aee3d7fc..f03b5969 100644
--- a/doc/rtd/topics/tests.rst
+++ b/doc/rtd/topics/tests.rst
@@ -467,11 +467,11 @@ Set region in platforms.yaml
.. code-block:: yaml
azurecloud:
- enabled: true
- region: West US 2
- vm_size: Standard_DS1_v2
- storage_sku: standard_lrs
- tag: ci
+ enabled: true
+ region: West US 2
+ vm_size: Standard_DS1_v2
+ storage_sku: standard_lrs
+ tag: ci
Architecture
@@ -546,38 +546,38 @@ The following demonstrates merge behavior:
.. code-block:: yaml
defaults:
- list_item:
- - list_entry_1
- - list_entry_2
- int_item_1: 123
- int_item_2: 234
- dict_item:
- subkey_1: 1
- subkey_2: 2
- subkey_dict:
- subsubkey_1: a
- subsubkey_2: b
+ list_item:
+ - list_entry_1
+ - list_entry_2
+ int_item_1: 123
+ int_item_2: 234
+ dict_item:
+ subkey_1: 1
+ subkey_2: 2
+ subkey_dict:
+ subsubkey_1: a
+ subsubkey_2: b
overrides:
- list_item:
- - overridden_list_entry
- int_item_1: 0
- dict_item:
- subkey_2: false
- subkey_dict:
- subsubkey_2: 'new value'
+ list_item:
+ - overridden_list_entry
+ int_item_1: 0
+ dict_item:
+ subkey_2: false
+ subkey_dict:
+ subsubkey_2: 'new value'
result:
- list_item:
- - overridden_list_entry
- int_item_1: 0
- int_item_2: 234
- dict_item:
- subkey_1: 1
- subkey_2: false
- subkey_dict:
- subsubkey_1: a
- subsubkey_2: 'new value'
+ list_item:
+ - overridden_list_entry
+ int_item_1: 0
+ int_item_2: 234
+ dict_item:
+ subkey_1: 1
+ subkey_2: false
+ subkey_dict:
+ subsubkey_1: a
+ subsubkey_2: 'new value'
Image Config