From 5b065316113b97aadb43e63cc31bb8639f6a6376 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 14 Dec 2018 03:24:26 +0000 Subject: Update to pylint 2.2.2. The tip-pylint tox target correctly reported the invalid use of string formatting. The change here is to: a.) Fix the error that was caught. b.) move to pylint 2.2.2 for the default 'pylint' target. --- cloudinit/sources/DataSourceAzure.py | 4 ++-- tox.ini | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index e076d5dc..46efca4a 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -980,8 +980,8 @@ def read_azure_ovf(contents): raise NonAzureDataSource("No LinuxProvisioningConfigurationSet") if len(lpcs_nodes) > 1: raise BrokenAzureDataSource("found '%d' %ss" % - ("LinuxProvisioningConfigurationSet", - len(lpcs_nodes))) + (len(lpcs_nodes), + "LinuxProvisioningConfigurationSet")) lpcs = lpcs_nodes[0] if not lpcs.hasChildNodes(): diff --git a/tox.ini b/tox.ini index 2fb3209d..d983348b 100644 --- a/tox.ini +++ b/tox.ini @@ -21,7 +21,7 @@ setenv = basepython = python3 deps = # requirements - pylint==1.8.1 + pylint==2.2.2 # test-requirements because unit tests are now present in cloudinit tree -r{toxinidir}/test-requirements.txt commands = {envpython} -m pylint {posargs:cloudinit tests tools} -- cgit v1.2.3 From 26e95e95157d2dced6a8af9d766b93b7ae024d52 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 18 Dec 2018 22:19:57 +0000 Subject: HACKING.rst: change contact info to Josh Powers In the Hacking doc, change the contact information for the contributors agreement to reference Josh Powers rather than Scott Moser. --- HACKING.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/HACKING.rst b/HACKING.rst index 3bb555c2..fcdfa4fb 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -11,10 +11,10 @@ Do these things once * To contribute, you must sign the Canonical `contributor license agreement`_ - If you have already signed it as an individual, your Launchpad user will be listed in the `contributor-agreement-canonical`_ group. Unfortunately there is no easy way to check if an organization or company you are doing work for has signed. If you are unsure or have questions, email `Scott Moser `_ or ping smoser in ``#cloud-init`` channel via freenode. + If you have already signed it as an individual, your Launchpad user will be listed in the `contributor-agreement-canonical`_ group. Unfortunately there is no easy way to check if an organization or company you are doing work for has signed. If you are unsure or have questions, email `Josh Powers `_ or ping powersj in ``#cloud-init`` channel via freenode. When prompted for 'Project contact' or 'Canonical Project Manager' enter - 'Scott Moser'. + 'Josh Powers'. * Configure git with your email and name for commit messages. -- cgit v1.2.3 From f55bb17ddb2fd64e039057bf7ee50951a0dc93e8 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 20 Dec 2018 17:22:45 +0000 Subject: Vmware: Add support for the com.vmware.guestInfo OVF transport. This adds support for reading OVF information over the 'com.vmware.guestInfo' tranport. The current implementation requires vmware-rpctool be installed in the system. LP: #1807466 --- cloudinit/sources/DataSourceOVF.py | 34 ++++++++------ tests/unittests/test_datasource/test_ovf.py | 72 ++++++++++++++++++++++++++--- tests/unittests/test_ds_identify.py | 15 ++++++ tools/ds-identify | 21 +++++++++ 4 files changed, 121 insertions(+), 21 deletions(-) diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 045291e7..891d6547 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -232,10 +232,10 @@ class DataSourceOVF(sources.DataSource): GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS) else: - np = {'iso': transport_iso9660, - 'vmware-guestd': transport_vmware_guestd, } + np = [('com.vmware.guestInfo', transport_vmware_guestinfo), + ('iso', transport_iso9660)] name = None - for (name, transfunc) in np.items(): + for name, transfunc in np: (contents, _dev, _fname) = transfunc() if contents: break @@ -503,18 +503,22 @@ def transport_iso9660(require_iso=True): return (False, None, None) -def transport_vmware_guestd(): - # http://blogs.vmware.com/vapp/2009/07/ \ - # selfconfiguration-and-the-ovf-environment.html - # try: - # cmd = ['vmware-guestd', '--cmd', 'info-get guestinfo.ovfEnv'] - # (out, err) = subp(cmd) - # return(out, 'guestinfo.ovfEnv', 'vmware-guestd') - # except: - # # would need to error check here and see why this failed - # # to know if log/error should be raised - # return(False, None, None) - return (False, None, None) +def transport_vmware_guestinfo(): + rpctool = "vmware-rpctool" + not_found = (False, None, None) + if not util.which(rpctool): + return not_found + cmd = [rpctool, "info-get guestinfo.ovfEnv"] + try: + out, _err = util.subp(cmd) + if out: + return (out, rpctool, "guestinfo.ovfEnv") + LOG.debug("cmd %s exited 0 with empty stdout: %s", cmd, out) + except util.ProcessExecutionError as e: + if e.exit_code != 1: + LOG.warning("%s exited with code %d", rpctool, e.exit_code) + LOG.debug(e) + return not_found def find_child(node, filter_func): diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index a226c032..e4af0fa3 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -17,6 +17,8 @@ from cloudinit.sources import DataSourceOVF as dsovf from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( CustomScriptNotFound) +MPATH = 'cloudinit.sources.DataSourceOVF.' + OVF_ENV_CONTENT = """ \n/dev/null 2>&1 || return 1 + local out="" ret="" + out=$(vmware-rpctool "info-get guestinfo.ovfEnv" 2>&1) + ret=$? + if [ $ret -ne 0 ]; then + debug 1 "Running on vmware but rpctool query returned $ret: $out" + return 1 + fi + case "$1" in + "=label,=label2 # like /dev/sr0=OVF-TRANSPORT,/dev/other=with spaces if [ "${DI_ISO9660_DEVS#${UNAVAILABLE}:}" = "${DI_ISO9660_DEVS}" ]; then -- cgit v1.2.3 From d4d11c78e5e78999356fd0c3d124b5a298735b65 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 20 Dec 2018 20:52:05 +0000 Subject: OVF: simplify expected return values of transport functions. Transport functions (transport_iso9660 and transport_vmware_guestinfo) would return a tuple of 3 values, but only the first was ever used outside of test. The other values (device and filename) were just ignored. This just simplifies the transport functions to now return content (in string format) or None indicating that the transport was not found. --- cloudinit/sources/DataSourceOVF.py | 20 ++++----- tests/unittests/test_datasource/test_ovf.py | 70 ++++++++++------------------- 2 files changed, 33 insertions(+), 57 deletions(-) diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 891d6547..3a3fcdf6 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -236,7 +236,7 @@ class DataSourceOVF(sources.DataSource): ('iso', transport_iso9660)] name = None for name, transfunc in np: - (contents, _dev, _fname) = transfunc() + contents = transfunc() if contents: break if contents: @@ -464,8 +464,8 @@ def maybe_cdrom_device(devname): return cdmatch.match(devname) is not None -# Transport functions take no input and return -# a 3 tuple of content, path, filename +# Transport functions are called with no arguments and return +# either None (indicating not present) or string content of an ovf-env.xml def transport_iso9660(require_iso=True): # Go through mounts to see if it was already mounted @@ -477,9 +477,9 @@ def transport_iso9660(require_iso=True): if not maybe_cdrom_device(dev): continue mp = info['mountpoint'] - (fname, contents) = get_ovf_env(mp) + (_fname, contents) = get_ovf_env(mp) if contents is not False: - return (contents, dev, fname) + return contents if require_iso: mtype = "iso9660" @@ -492,27 +492,27 @@ def transport_iso9660(require_iso=True): if maybe_cdrom_device(dev)] for dev in devs: try: - (fname, contents) = util.mount_cb(dev, get_ovf_env, mtype=mtype) + (_fname, contents) = util.mount_cb(dev, get_ovf_env, mtype=mtype) except util.MountFailedError: LOG.debug("%s not mountable as iso9660", dev) continue if contents is not False: - return (contents, dev, fname) + return contents - return (False, None, None) + return None def transport_vmware_guestinfo(): rpctool = "vmware-rpctool" - not_found = (False, None, None) + not_found = None if not util.which(rpctool): return not_found cmd = [rpctool, "info-get guestinfo.ovfEnv"] try: out, _err = util.subp(cmd) if out: - return (out, rpctool, "guestinfo.ovfEnv") + return out LOG.debug("cmd %s exited 0 with empty stdout: %s", cmd, out) except util.ProcessExecutionError as e: if e.exit_code != 1: diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index e4af0fa3..349d54cc 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -19,6 +19,8 @@ from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( MPATH = 'cloudinit.sources.DataSourceOVF.' +NOT_FOUND = None + OVF_ENV_CONTENT = """ Date: Thu, 20 Dec 2018 21:49:09 +0000 Subject: Scaleway: Support ssh keys provided inside an instance tag. The change here will utilize ssh keys found inside an instance's tag. The tag value must start with 'AUTHORIZED_KEY'. --- cloudinit/sources/DataSourceScaleway.py | 11 +++- tests/unittests/test_datasource/test_scaleway.py | 76 ++++++++++++++++++++++-- 2 files changed, 82 insertions(+), 5 deletions(-) diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index 9dc4ab23..b573b382 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -253,7 +253,16 @@ class DataSourceScaleway(sources.DataSource): return self.metadata['id'] def get_public_ssh_keys(self): - return [key['key'] for key in self.metadata['ssh_public_keys']] + ssh_keys = [key['key'] for key in self.metadata['ssh_public_keys']] + + akeypre = "AUTHORIZED_KEY=" + plen = len(akeypre) + for tag in self.metadata.get('tags', []): + if not tag.startswith(akeypre): + continue + ssh_keys.append(tag[:plen].replace("_", " ")) + + return ssh_keys def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): return self.metadata['hostname'] diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py index c2bc7a00..f96bf0a2 100644 --- a/tests/unittests/test_datasource/test_scaleway.py +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -49,6 +49,9 @@ class MetadataResponses(object): FAKE_METADATA = { 'id': '00000000-0000-0000-0000-000000000000', 'hostname': 'scaleway.host', + 'tags': [ + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", + ], 'ssh_public_keys': [{ 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', 'fingerprint': '2048 06:ae:... login (RSA)' @@ -204,10 +207,11 @@ class TestDataSourceScaleway(HttprettyTestCase): self.assertEqual(self.datasource.get_instance_id(), MetadataResponses.FAKE_METADATA['id']) - self.assertEqual(self.datasource.get_public_ssh_keys(), [ - elem['key'] for elem in - MetadataResponses.FAKE_METADATA['ssh_public_keys'] - ]) + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + ].sort()) self.assertEqual(self.datasource.get_hostname(), MetadataResponses.FAKE_METADATA['hostname']) self.assertEqual(self.datasource.get_userdata_raw(), @@ -218,6 +222,70 @@ class TestDataSourceScaleway(HttprettyTestCase): self.assertIsNone(self.datasource.region) self.assertEqual(sleep.call_count, 0) + def test_ssh_keys_empty(self): + """ + get_public_ssh_keys() should return empty list if no ssh key are + available + """ + self.datasource.metadata['tags'] = [] + self.datasource.metadata['ssh_public_keys'] = [] + self.assertEqual(self.datasource.get_public_ssh_keys(), []) + + def test_ssh_keys_only_tags(self): + """ + get_public_ssh_keys() should return list of keys available in tags + """ + self.datasource.metadata['tags'] = [ + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABCCCCC", + ] + self.datasource.metadata['ssh_public_keys'] = [] + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + ].sort()) + + def test_ssh_keys_only_conf(self): + """ + get_public_ssh_keys() should return list of keys available in + ssh_public_keys field + """ + self.datasource.metadata['tags'] = [] + self.datasource.metadata['ssh_public_keys'] = [{ + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + 'fingerprint': '2048 06:ae:... login (RSA)' + }, { + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'fingerprint': '2048 06:ff:... login2 (RSA)' + }] + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + ].sort()) + + def test_ssh_keys_both(self): + """ + get_public_ssh_keys() should return a merge of keys available + in ssh_public_keys and tags + """ + self.datasource.metadata['tags'] = [ + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", + ] + + self.datasource.metadata['ssh_public_keys'] = [{ + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + 'fingerprint': '2048 06:ae:... login (RSA)' + }, { + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'fingerprint': '2048 06:ff:... login2 (RSA)' + }] + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + ].sort()) + @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', get_source_address_adapter) -- cgit v1.2.3 From 12bc76cebf69a1c8cf9eba78431333842ed170cf Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 20 Dec 2018 23:51:09 +0000 Subject: ds-identify: fix wrong variable name in ovf_vmware_transport_guestinfo. ovf_vmware_transport_guestinfo is not currently tested. It used '$1' instead of '$out' when checking for xml content in the output of vmware-rpctool. --- tools/ds-identify | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/ds-identify b/tools/ds-identify index c61f18ae..b78b2731 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -736,7 +736,7 @@ ovf_vmware_transport_guestinfo() { debug 1 "Running on vmware but rpctool query returned $ret: $out" return 1 fi - case "$1" in + case "$out" in " Date: Tue, 8 Jan 2019 04:52:45 +0000 Subject: doc: clean up some datasource documentation. The change to datasources.rst here is obvious typo fix. The change to azure is to reduce the two 'Customization' sections to a single and clean up some other duplicate text. --- doc/rtd/topics/datasources.rst | 2 +- doc/rtd/topics/datasources/azure.rst | 61 +++++++++++++----------------------- 2 files changed, 22 insertions(+), 41 deletions(-) diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index e34f145c..5abbaefd 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -18,7 +18,7 @@ single way to access the different cloud systems methods to provide this data through the typical usage of subclasses. Any metadata processed by cloud-init's datasources is persisted as -``/run/cloud0-init/instance-data.json``. Cloud-init provides tooling +``/run/cloud-init/instance-data.json``. Cloud-init provides tooling to quickly introspect some of that data. See :ref:`instance_metadata` for more information. diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst index f73c3694..720a475c 100644 --- a/doc/rtd/topics/datasources/azure.rst +++ b/doc/rtd/topics/datasources/azure.rst @@ -23,18 +23,18 @@ information in json format to /run/cloud-init/dhclient.hook/.json. In order for cloud-init to leverage this method to find the endpoint, the cloud.cfg file must contain: -datasource: - Azure: - set_hostname: False - agent_command: __builtin__ +.. sourcecode:: yaml + + datasource: + Azure: + set_hostname: False + agent_command: __builtin__ If those files are not available, the fallback is to check the leases file for the endpoint server (again option 245). You can define the path to the lease file with the 'dhclient_lease_file' -configuration. The default value is /var/lib/dhcp/dhclient.eth0.leases. - - dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases +configuration. walinuxagent ------------ @@ -60,7 +60,7 @@ in order to use waagent.conf with cloud-init, the following settings are recomme Configuration ------------- The following configuration can be set for the datasource in system -configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`). +configuration (in ``/etc/cloud/cloud.cfg`` or ``/etc/cloud/cloud.cfg.d/``). The settings that may be configured are: @@ -76,13 +76,25 @@ The settings that may be configured are: * **disk_aliases**: A dictionary defining which device paths should be interpreted as ephemeral images. See cc_disk_setup module for more info. * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to - metadata changes. + metadata changes. The '``hostname_bounce: command``' entry can be either + the literal string 'builtin' or a command to execute. The command will be + invoked after the hostname is set, and will have the 'interface' in its + environment. If ``set_hostname`` is not true, then ``hostname_bounce`` + will be ignored. An example might be: + + ``command: ["sh", "-c", "killall dhclient; dhclient $interface"]`` + * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to metadata changes. Azure will throttle ifup/down in some cases after metadata has been updated to inform dhcp server about updated hostnames. * **set_hostname**: Boolean set to True when we want Azure to set the hostname based on metadata. +Configuration for the datasource can also be read from a +``dscfg`` entry in the ``LinuxProvisioningConfigurationSet``. Content in +dscfg node is expected to be base64 encoded yaml content, and it will be +merged into the 'datasource: Azure' entry. + An example configuration with the default values is provided below: .. sourcecode:: yaml @@ -143,37 +155,6 @@ Example: -Configuration -------------- -Configuration for the datasource can be read from the system config's or set -via the `dscfg` entry in the `LinuxProvisioningConfigurationSet`. Content in -dscfg node is expected to be base64 encoded yaml content, and it will be -merged into the 'datasource: Azure' entry. - -The '``hostname_bounce: command``' entry can be either the literal string -'builtin' or a command to execute. The command will be invoked after the -hostname is set, and will have the 'interface' in its environment. If -``set_hostname`` is not true, then ``hostname_bounce`` will be ignored. - -An example might be: - command: ["sh", "-c", "killall dhclient; dhclient $interface"] - -.. code:: yaml - - datasource: - agent_command - Azure: - agent_command: [service, walinuxagent, start] - set_hostname: True - hostname_bounce: - # the name of the interface to bounce - interface: eth0 - # policy can be 'on', 'off' or 'force' - policy: on - # the method 'bounce' command. - command: "builtin" - hostname_command: "hostname" - hostname -------- When the user launches an instance, they provide a hostname for that instance. -- cgit v1.2.3 From 5f49ee0f3bdc9b3ebcc71b344b3918d4ef58c989 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 8 Jan 2019 18:46:44 +0000 Subject: Add documentation on adding a datasource. This adds documentation intended for a developer on how to add a new datasource to cloud-init. --- doc/rtd/topics/datasources.rst | 59 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index 5abbaefd..648c6068 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -80,6 +80,65 @@ The current interface that a datasource object must provide is the following: def get_package_mirror_info(self) +Adding a new Datasource +----------------------- +The datasource objects have a few touch points with cloud-init. If you +are interested in adding a new datasource for your cloud platform you'll +need to take care of the following items: + +* **Identify a mechanism for positive identification of the platform**: + It is good practice for a cloud platform to positively identify itself + to the guest. This allows the guest to make educated decisions based + on the platform on which it is running. On the x86 and arm64 architectures, + many clouds identify themselves through DMI data. For example, + Oracle's public cloud provides the string 'OracleCloud.com' in the + DMI chassis-asset field. + + cloud-init enabled images produce a log file with details about the + platform. Reading through this log in ``/run/cloud-init/ds-identify.log`` + may provide the information needed to uniquely identify the platform. + If the log is not present, you can generate it by running from source + ``./tools/ds-identify`` or the installed location + ``/usr/lib/cloud-init/ds-identify``. + + The mechanism used to identify the platform will be required for the + ds-identify and datasource module sections below. + +* **Add datasource module ``cloudinit/sources/DataSource.py``**: + It is suggested that you start by copying one of the simpler datasources + such as DataSourceHetzner. + +* **Add tests for datasource module**: + Add a new file with some tests for the module to + ``cloudinit/sources/test_.py``. For example see + ``cloudinit/sources/tests/test_oracle.py`` + +* **Update ds-identify**: In systemd systems, ds-identify is used to detect + which datasource should be enabled or if cloud-init should run at all. + You'll need to make changes to ``tools/ds-identify``. + +* **Add tests for ds-identify**: Add relevant tests in a new class to + ``tests/unittests/test_ds_identify.py``. You can use ``TestOracle`` as an + example. + +* **Add your datasource name to the builtin list of datasources:** Add + your datasource module name to the end of the ``datasource_list`` + entry in ``cloudinit/settings.py``. + +* **Add your your cloud platform to apport collection prompts:** Update the + list of cloud platforms in ``cloudinit/apport.py``. This list will be + provided to the user who invokes ``ubuntu-bug cloud-init``. + +* **Enable datasource by default in ubuntu packaging branches:** + Ubuntu packaging branches contain a template file + ``debian/cloud-init.templates`` that ultimately sets the default + datasource_list when installed via package. This file needs updating when + the commit gets into a package. + +* **Add documentation for your datasource**: You should add a new + file in ``doc/datasources/.rst`` + + Datasource Documentation ======================== The following is a list of the implemented datasources. -- cgit v1.2.3 From f19dc8fa62d4fd8de33311c3c75c5b6da440bebe Mon Sep 17 00:00:00 2001 From: Jason Zions Date: Tue, 15 Jan 2019 17:05:47 +0000 Subject: [Azure] Increase retries when talking to Wireserver during metadata walk Testing startup of large numbers of VMs (of varying distros) in Azure shows that 3 retries results in a small percentage of failed VMs. Increasing that by a few dramatically decreases the occurrence of provisioning timeout errors. The initial choice of "3 retries" was uninformed by heavy testing. Also, the alternate provisioning mechanism for Azure (waagent) retries the Wireserver crawl without limit. 10 retries seems a more reasonable choice. --- cloudinit/sources/DataSourceAzure.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 46efca4a..a4f998b3 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -416,7 +416,7 @@ class DataSourceAzure(sources.DataSource): raise sources.InvalidMetaDataException(msg) ret = self._reprovision() imds_md = get_metadata_from_imds( - self.fallback_interface, retries=3) + self.fallback_interface, retries=10) (md, userdata_raw, cfg, files) = ret self.seed = cdev crawled_data.update({ -- cgit v1.2.3 From fdadcb5fae51f4e6799314ab98e3aec56c79b17c Mon Sep 17 00:00:00 2001 From: Jason Zions Date: Tue, 15 Jan 2019 21:37:17 +0000 Subject: net: Wait for dhclient to daemonize before reading lease file cloud-init uses dhclient to fetch the DHCP lease so it can extract DHCP options. dhclient creates the leasefile, then writes to it; simply waiting for the leasefile to appear creates a race between dhclient and cloud-init. Instead, wait for dhclient to be parented by init. At that point, we know it has written to the leasefile, so it's safe to copy the file and kill the process. cloud-init creates a temporary directory in which to execute dhclient, and deletes that directory after it has killed the process. If cloud-init abandons waiting for dhclient to daemonize, it will still attempt to delete the temporary directory, but will not report an exception should that attempt fail. LP: #1794399 --- cloudinit/net/dhcp.py | 44 +++++++++++++++++++++++++++----------- cloudinit/net/tests/test_dhcp.py | 15 ++++++++++--- cloudinit/temp_utils.py | 4 ++-- cloudinit/tests/test_temp_utils.py | 18 +++++++++++++++- cloudinit/util.py | 16 ++++++++++++++ tests/unittests/test_util.py | 6 ++++++ 6 files changed, 84 insertions(+), 19 deletions(-) diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py index 0db991db..c98a97cd 100644 --- a/cloudinit/net/dhcp.py +++ b/cloudinit/net/dhcp.py @@ -9,6 +9,7 @@ import logging import os import re import signal +import time from cloudinit.net import ( EphemeralIPv4Network, find_fallback_nic, get_devicelist, @@ -127,7 +128,9 @@ def maybe_perform_dhcp_discovery(nic=None): if not dhclient_path: LOG.debug('Skip dhclient configuration: No dhclient command found.') return [] - with temp_utils.tempdir(prefix='cloud-init-dhcp-', needs_exe=True) as tdir: + with temp_utils.tempdir(rmtree_ignore_errors=True, + prefix='cloud-init-dhcp-', + needs_exe=True) as tdir: # Use /var/tmp because /run/cloud-init/tmp is mounted noexec return dhcp_discovery(dhclient_path, nic, tdir) @@ -195,24 +198,39 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir): '-pf', pid_file, interface, '-sf', '/bin/true'] util.subp(cmd, capture=True) - # dhclient doesn't write a pid file until after it forks when it gets a - # proper lease response. Since cleandir is a temp directory that gets - # removed, we need to wait for that pidfile creation before the - # cleandir is removed, otherwise we get FileNotFound errors. + # Wait for pid file and lease file to appear, and for the process + # named by the pid file to daemonize (have pid 1 as its parent). If we + # try to read the lease file before daemonization happens, we might try + # to read it before the dhclient has actually written it. We also have + # to wait until the dhclient has become a daemon so we can be sure to + # kill the correct process, thus freeing cleandir to be deleted back + # up the callstack. missing = util.wait_for_files( [pid_file, lease_file], maxwait=5, naplen=0.01) if missing: LOG.warning("dhclient did not produce expected files: %s", ', '.join(os.path.basename(f) for f in missing)) return [] - pid_content = util.load_file(pid_file).strip() - try: - pid = int(pid_content) - except ValueError: - LOG.debug( - "pid file contains non-integer content '%s'", pid_content) - else: - os.kill(pid, signal.SIGKILL) + + ppid = 'unknown' + for _ in range(0, 1000): + pid_content = util.load_file(pid_file).strip() + try: + pid = int(pid_content) + except ValueError: + pass + else: + ppid = util.get_proc_ppid(pid) + if ppid == 1: + LOG.debug('killing dhclient with pid=%s', pid) + os.kill(pid, signal.SIGKILL) + return parse_dhcp_lease_file(lease_file) + time.sleep(0.01) + + LOG.error( + 'dhclient(pid=%s, parentpid=%s) failed to daemonize after %s seconds', + pid_content, ppid, 0.01 * 1000 + ) return parse_dhcp_lease_file(lease_file) diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py index cd3e7328..79e8842f 100644 --- a/cloudinit/net/tests/test_dhcp.py +++ b/cloudinit/net/tests/test_dhcp.py @@ -145,16 +145,20 @@ class TestDHCPDiscoveryClean(CiTestCase): 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}], dhcp_discovery(dhclient_script, 'eth9', tmpdir)) self.assertIn( - "pid file contains non-integer content ''", self.logs.getvalue()) + "dhclient(pid=, parentpid=unknown) failed " + "to daemonize after 10.0 seconds", + self.logs.getvalue()) m_kill.assert_not_called() + @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') @mock.patch('cloudinit.net.dhcp.os.kill') @mock.patch('cloudinit.net.dhcp.util.wait_for_files') @mock.patch('cloudinit.net.dhcp.util.subp') def test_dhcp_discovery_run_in_sandbox_waits_on_lease_and_pid(self, m_subp, m_wait, - m_kill): + m_kill, + m_getppid): """dhcp_discovery waits for the presence of pidfile and dhcp.leases.""" tmpdir = self.tmp_dir() dhclient_script = os.path.join(tmpdir, 'dhclient.orig') @@ -164,6 +168,7 @@ class TestDHCPDiscoveryClean(CiTestCase): pidfile = self.tmp_path('dhclient.pid', tmpdir) leasefile = self.tmp_path('dhcp.leases', tmpdir) m_wait.return_value = [pidfile] # Return the missing pidfile wait for + m_getppid.return_value = 1 # Indicate that dhclient has daemonized self.assertEqual([], dhcp_discovery(dhclient_script, 'eth9', tmpdir)) self.assertEqual( mock.call([pidfile, leasefile], maxwait=5, naplen=0.01), @@ -173,9 +178,10 @@ class TestDHCPDiscoveryClean(CiTestCase): self.logs.getvalue()) m_kill.assert_not_called() + @mock.patch('cloudinit.net.dhcp.util.get_proc_ppid') @mock.patch('cloudinit.net.dhcp.os.kill') @mock.patch('cloudinit.net.dhcp.util.subp') - def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill): + def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill, m_getppid): """dhcp_discovery brings up the interface and runs dhclient. It also returns the parsed dhcp.leases file generated in the sandbox. @@ -197,6 +203,7 @@ class TestDHCPDiscoveryClean(CiTestCase): pid_file = os.path.join(tmpdir, 'dhclient.pid') my_pid = 1 write_file(pid_file, "%d\n" % my_pid) + m_getppid.return_value = 1 # Indicate that dhclient has daemonized self.assertItemsEqual( [{'interface': 'eth9', 'fixed-address': '192.168.2.74', @@ -355,3 +362,5 @@ class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase): self.assertEqual(fake_lease, lease) # Ensure that dhcp discovery occurs m_dhcp.called_once_with() + +# vi: ts=4 expandtab diff --git a/cloudinit/temp_utils.py b/cloudinit/temp_utils.py index c98a1b53..346276ec 100644 --- a/cloudinit/temp_utils.py +++ b/cloudinit/temp_utils.py @@ -81,7 +81,7 @@ def ExtendedTemporaryFile(**kwargs): @contextlib.contextmanager -def tempdir(**kwargs): +def tempdir(rmtree_ignore_errors=False, **kwargs): # This seems like it was only added in python 3.2 # Make it since its useful... # See: http://bugs.python.org/file12970/tempdir.patch @@ -89,7 +89,7 @@ def tempdir(**kwargs): try: yield tdir finally: - shutil.rmtree(tdir) + shutil.rmtree(tdir, ignore_errors=rmtree_ignore_errors) def mkdtemp(**kwargs): diff --git a/cloudinit/tests/test_temp_utils.py b/cloudinit/tests/test_temp_utils.py index ffbb92cd..4a52ef89 100644 --- a/cloudinit/tests/test_temp_utils.py +++ b/cloudinit/tests/test_temp_utils.py @@ -2,8 +2,9 @@ """Tests for cloudinit.temp_utils""" -from cloudinit.temp_utils import mkdtemp, mkstemp +from cloudinit.temp_utils import mkdtemp, mkstemp, tempdir from cloudinit.tests.helpers import CiTestCase, wrap_and_call +import os class TestTempUtils(CiTestCase): @@ -98,4 +99,19 @@ class TestTempUtils(CiTestCase): self.assertEqual('/fake/return/path', retval) self.assertEqual([{'dir': '/run/cloud-init/tmp'}], calls) + def test_tempdir_error_suppression(self): + """test tempdir suppresses errors during directory removal.""" + + with self.assertRaises(OSError): + with tempdir(prefix='cloud-init-dhcp-') as tdir: + os.rmdir(tdir) + # As a result, the directory is already gone, + # so shutil.rmtree should raise OSError + + with tempdir(rmtree_ignore_errors=True, + prefix='cloud-init-dhcp-') as tdir: + os.rmdir(tdir) + # Since the directory is already gone, shutil.rmtree would raise + # OSError, but we suppress that + # vi: ts=4 expandtab diff --git a/cloudinit/util.py b/cloudinit/util.py index 7800f7bc..a8a232b6 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2876,4 +2876,20 @@ def udevadm_settle(exists=None, timeout=None): return subp(settle_cmd) +def get_proc_ppid(pid): + """ + Return the parent pid of a process. + """ + ppid = 0 + try: + contents = load_file("/proc/%s/stat" % pid, quiet=True) + except IOError as e: + LOG.warning('Failed to load /proc/%s/stat. %s', pid, e) + if contents: + parts = contents.split(" ", 4) + # man proc says + # ppid %d (4) The PID of the parent. + ppid = int(parts[3]) + return ppid + # vi: ts=4 expandtab diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 5a14479a..0e71db82 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -1171,4 +1171,10 @@ class TestGetProcEnv(helpers.TestCase): self.assertEqual({}, util.get_proc_env(1)) self.assertEqual(1, m_load_file.call_count) + def test_get_proc_ppid(self): + """get_proc_ppid returns correct parent pid value.""" + my_pid = os.getpid() + my_ppid = os.getppid() + self.assertEqual(my_ppid, util.get_proc_ppid(my_pid)) + # vi: ts=4 expandtab -- cgit v1.2.3 From ad170db966492e845b9dc23346cc7297e8a99032 Mon Sep 17 00:00:00 2001 From: Marlin Cremers Date: Tue, 15 Jan 2019 23:22:05 +0000 Subject: cc_set_passwords: Fix regex when parsing hashed passwords Correct invalid regex to match hashes starting with the following: - $1, $2a, $2y, $5 or $6 LP: #1811446 --- cloudinit/config/cc_set_passwords.py | 2 +- cloudinit/config/tests/test_set_passwords.py | 40 ++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index 5ef97376..4585e4d3 100755 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -160,7 +160,7 @@ def handle(_name, cfg, cloud, log, args): hashed_users = [] randlist = [] users = [] - prog = re.compile(r'\$[1,2a,2y,5,6](\$.+){2}') + prog = re.compile(r'\$(1|2a|2y|5|6)(\$.+){2}') for line in plist: u, p = line.split(':', 1) if prog.match(p) is not None and ":" not in p: diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py index b051ec82..a2ea5ec4 100644 --- a/cloudinit/config/tests/test_set_passwords.py +++ b/cloudinit/config/tests/test_set_passwords.py @@ -68,4 +68,44 @@ class TestHandleSshPwauth(CiTestCase): m_update.assert_called_with({optname: optval}) m_subp.assert_not_called() + +class TestSetPasswordsHandle(CiTestCase): + """Test cc_set_passwords.handle""" + + with_logs = True + + def test_handle_on_empty_config(self): + """handle logs that no password has changed when config is empty.""" + cloud = self.tmp_cloud(distro='ubuntu') + setpass.handle( + 'IGNORED', cfg={}, cloud=cloud, log=self.logger, args=[]) + self.assertEqual( + "DEBUG: Leaving ssh config 'PasswordAuthentication' unchanged. " + 'ssh_pwauth=None\n', + self.logs.getvalue()) + + @mock.patch(MODPATH + "util.subp") + def test_handle_on_chpasswd_list_parses_common_hashes(self, m_subp): + """handle parses command password hashes.""" + cloud = self.tmp_cloud(distro='ubuntu') + valid_hashed_pwds = [ + 'root:$2y$10$8BQjxjVByHA/Ee.O1bCXtO8S7Y5WojbXWqnqYpUW.BrPx/' + 'Dlew1Va', + 'ubuntu:$6$5hOurLPO$naywm3Ce0UlmZg9gG2Fl9acWCVEoakMMC7dR52q' + 'SDexZbrN9z8yHxhUM2b.sxpguSwOlbOQSW/HpXazGGx3oo1'] + cfg = {'chpasswd': {'list': valid_hashed_pwds}} + with mock.patch(MODPATH + 'util.subp') as m_subp: + setpass.handle( + 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[]) + self.assertIn( + 'DEBUG: Handling input for chpasswd as list.', + self.logs.getvalue()) + self.assertIn( + "DEBUG: Setting hashed password for ['root', 'ubuntu']", + self.logs.getvalue()) + self.assertEqual( + [mock.call(['chpasswd', '-e'], + '\n'.join(valid_hashed_pwds) + '\n')], + m_subp.call_args_list) + # vi: ts=4 expandtab -- cgit v1.2.3 From 3861102fcaf47a882516d8b6daab518308eb3086 Mon Sep 17 00:00:00 2001 From: Eduardo Otubo Date: Fri, 18 Jan 2019 15:36:19 +0000 Subject: net: Make sysconfig renderer compatible with Network Manager. The 'sysconfig' renderer is activated if, and only if, there's ifup and ifdown commands present in its search dictonary or the network-scripts configuration files are found. This patch adds a check for Network- Manager configuration file as well. This solution is based on the use of the plugin 'ifcfg-rh' present in Network-Manager and is designed to support Fedora 29 or other distributions that also replaced network-scripts by Network-Manager. --- cloudinit/net/sysconfig.py | 36 +++++++++++++++++++++++ tests/unittests/test_net.py | 71 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 107 insertions(+) diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 17293e1d..ae41f7b3 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -10,11 +10,14 @@ from cloudinit.distros.parsers import resolv_conf from cloudinit import log as logging from cloudinit import util +from configobj import ConfigObj + from . import renderer from .network_state import ( is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6) LOG = logging.getLogger(__name__) +NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf" def _make_header(sep='#'): @@ -46,6 +49,24 @@ def _quote_value(value): return value +def enable_ifcfg_rh(path): + """Add ifcfg-rh to NetworkManager.cfg plugins if main section is present""" + config = ConfigObj(path) + if 'main' in config: + if 'plugins' in config['main']: + if 'ifcfg-rh' in config['main']['plugins']: + return + else: + config['main']['plugins'] = [] + + if isinstance(config['main']['plugins'], list): + config['main']['plugins'].append('ifcfg-rh') + else: + config['main']['plugins'] = [config['main']['plugins'], 'ifcfg-rh'] + config.write() + LOG.debug('Enabled ifcfg-rh NetworkManager plugins') + + class ConfigMap(object): """Sysconfig like dictionary object.""" @@ -657,6 +678,8 @@ class Renderer(renderer.Renderer): netrules_content = self._render_persistent_net(network_state) netrules_path = util.target_path(target, self.netrules_path) util.write_file(netrules_path, netrules_content, file_mode) + if available_nm(target=target): + enable_ifcfg_rh(util.target_path(target, path=NM_CFG_FILE)) sysconfig_path = util.target_path(target, templates.get('control')) # Distros configuring /etc/sysconfig/network as a file e.g. Centos @@ -671,6 +694,13 @@ class Renderer(renderer.Renderer): def available(target=None): + sysconfig = available_sysconfig(target=target) + nm = available_nm(target=target) + + return any([nm, sysconfig]) + + +def available_sysconfig(target=None): expected = ['ifup', 'ifdown'] search = ['/sbin', '/usr/sbin'] for p in expected: @@ -686,4 +716,10 @@ def available(target=None): return True +def available_nm(target=None): + if not os.path.isfile(util.target_path(target, path=NM_CFG_FILE)): + return False + return True + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 195f261c..d679e92c 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -22,6 +22,7 @@ import os import textwrap import yaml + DHCP_CONTENT_1 = """ DEVICE='eth0' PROTO='dhcp' @@ -1880,6 +1881,7 @@ class TestRhelSysConfigRendering(CiTestCase): with_logs = True + nm_cfg_file = "/etc/NetworkManager/NetworkManager.conf" scripts_dir = '/etc/sysconfig/network-scripts' header = ('# Created by cloud-init on instance boot automatically, ' 'do not edit.\n#\n') @@ -2174,6 +2176,75 @@ USERCTL=no self._compare_files_to_expected(entry[self.expected_name], found) self._assert_headers(found) + def test_check_ifcfg_rh(self): + """ifcfg-rh plugin is added NetworkManager.conf if conf present.""" + render_dir = self.tmp_dir() + nm_cfg = util.target_path(render_dir, path=self.nm_cfg_file) + util.ensure_dir(os.path.dirname(nm_cfg)) + + # write a template nm.conf, note plugins is a list here + with open(nm_cfg, 'w') as fh: + fh.write('# test_check_ifcfg_rh\n[main]\nplugins=foo,bar\n') + self.assertTrue(os.path.exists(nm_cfg)) + + # render and read + entry = NETWORK_CONFIGS['small'] + found = self._render_and_read(network_config=yaml.load(entry['yaml']), + dir=render_dir) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + # check ifcfg-rh is in the 'plugins' list + config = sysconfig.ConfigObj(nm_cfg) + self.assertIn('ifcfg-rh', config['main']['plugins']) + + def test_check_ifcfg_rh_plugins_string(self): + """ifcfg-rh plugin is append when plugins is a string.""" + render_dir = self.tmp_path("render") + os.makedirs(render_dir) + nm_cfg = util.target_path(render_dir, path=self.nm_cfg_file) + util.ensure_dir(os.path.dirname(nm_cfg)) + + # write a template nm.conf, note plugins is a value here + util.write_file(nm_cfg, '# test_check_ifcfg_rh\n[main]\nplugins=foo\n') + + # render and read + entry = NETWORK_CONFIGS['small'] + found = self._render_and_read(network_config=yaml.load(entry['yaml']), + dir=render_dir) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + # check raw content has plugin + nm_file_content = util.load_file(nm_cfg) + self.assertIn('ifcfg-rh', nm_file_content) + + # check ifcfg-rh is in the 'plugins' list + config = sysconfig.ConfigObj(nm_cfg) + self.assertIn('ifcfg-rh', config['main']['plugins']) + + def test_check_ifcfg_rh_plugins_no_plugins(self): + """enable_ifcfg_plugin creates plugins value if missing.""" + render_dir = self.tmp_path("render") + os.makedirs(render_dir) + nm_cfg = util.target_path(render_dir, path=self.nm_cfg_file) + util.ensure_dir(os.path.dirname(nm_cfg)) + + # write a template nm.conf, note plugins is missing + util.write_file(nm_cfg, '# test_check_ifcfg_rh\n[main]\n') + self.assertTrue(os.path.exists(nm_cfg)) + + # render and read + entry = NETWORK_CONFIGS['small'] + found = self._render_and_read(network_config=yaml.load(entry['yaml']), + dir=render_dir) + self._compare_files_to_expected(entry[self.expected_name], found) + self._assert_headers(found) + + # check ifcfg-rh is in the 'plugins' list + config = sysconfig.ConfigObj(nm_cfg) + self.assertIn('ifcfg-rh', config['main']['plugins']) + class TestOpenSuseSysConfigRendering(CiTestCase): -- cgit v1.2.3 From b74ebca563a21332b29482c8029e7908f60225a4 Mon Sep 17 00:00:00 2001 From: Robert Schweikert Date: Wed, 23 Jan 2019 22:35:32 +0000 Subject: net/sysconfig: do not write a resolv.conf file with only the header. Writing the file with no dns information may prevent distro tools from writing a resolv.conf file with dns information obtained from a dhcp server. --- cloudinit/net/sysconfig.py | 5 ++++- tests/unittests/test_net.py | 8 ++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index ae41f7b3..fd8e5010 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -557,6 +557,8 @@ class Renderer(renderer.Renderer): content.add_nameserver(nameserver) for searchdomain in network_state.dns_searchdomains: content.add_search_domain(searchdomain) + if not str(content): + return None header = _make_header(';') content_str = str(content) if not content_str.startswith(header): @@ -666,7 +668,8 @@ class Renderer(renderer.Renderer): dns_path = util.target_path(target, self.dns_path) resolv_content = self._render_dns(network_state, existing_dns_path=dns_path) - util.write_file(dns_path, resolv_content, file_mode) + if resolv_content: + util.write_file(dns_path, resolv_content, file_mode) if self.networkmanager_conf_path: nm_conf_path = util.target_path(target, self.networkmanager_conf_path) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index d679e92c..5313d2df 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -2098,6 +2098,10 @@ TYPE=Ethernet USERCTL=no """ self.assertEqual(expected, found[nspath + 'ifcfg-interface0']) + # The configuration has no nameserver information make sure we + # do not write the resolv.conf file + respath = '/etc/resolv.conf' + self.assertNotIn(respath, found.keys()) def test_config_with_explicit_loopback(self): ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) @@ -2456,6 +2460,10 @@ TYPE=Ethernet USERCTL=no """ self.assertEqual(expected, found[nspath + 'ifcfg-interface0']) + # The configuration has no nameserver information make sure we + # do not write the resolv.conf file + respath = '/etc/resolv.conf' + self.assertNotIn(respath, found.keys()) def test_config_with_explicit_loopback(self): ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) -- cgit v1.2.3 From c283321bb118d5408390e12b173440f57bd2c160 Mon Sep 17 00:00:00 2001 From: Johnson Shi Date: Fri, 25 Jan 2019 17:46:33 +0000 Subject: lxd: install zfs-linux instead of zfs meta package When using the LXD module cloud-init will attempt to install ZFS if it does not exist on the target system. However instead of installing the `zfsutils-linux` package it attempts to install `zfs` resulting in an error. Ubuntu Xenial (16.04) has zfs meta package, but Bionic (18.04) does not. Use the specific base package instead of zfs meta. Co-authored-by: Michael Skalka LP: #1799779 --- cloudinit/config/cc_lxd.py | 2 +- tests/unittests/test_handler/test_handler_lxd.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py index 24a8ebea..71d13ed8 100644 --- a/cloudinit/config/cc_lxd.py +++ b/cloudinit/config/cc_lxd.py @@ -89,7 +89,7 @@ def handle(name, cfg, cloud, log, args): packages.append('lxd') if init_cfg.get("storage_backend") == "zfs" and not util.which('zfs'): - packages.append('zfs') + packages.append('zfsutils-linux') if len(packages): try: diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py index 2478ebc4..b63db616 100644 --- a/tests/unittests/test_handler/test_handler_lxd.py +++ b/tests/unittests/test_handler/test_handler_lxd.py @@ -62,7 +62,7 @@ class TestLxd(t_help.CiTestCase): cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, []) self.assertFalse(m_maybe_clean.called) install_pkg = cc.distro.install_packages.call_args_list[0][0][0] - self.assertEqual(sorted(install_pkg), ['lxd', 'zfs']) + self.assertEqual(sorted(install_pkg), ['lxd', 'zfsutils-linux']) @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") @mock.patch("cloudinit.config.cc_lxd.util") -- cgit v1.2.3 From 7a4696596bbcccfedf5c6b6e25ad684ef30d9cea Mon Sep 17 00:00:00 2001 From: Paride Legovini Date: Fri, 25 Jan 2019 17:52:41 +0000 Subject: run-container: uncomment baseurl in yum.repos.d/*.repo when using a proxy When using a proxy it is often useful to know in advance which mirrors are to be contacted, so a whitelist can be set up. This is not easy when using the yum.conf(5) mirrorlist option, as the retrieved list of mirrors may change. The repository definition may also specify a canonical mirror with the 'baseurl' option; this option is often commented out by default to favor the usage of worldwide mirrors. This patch uncomments 'baseurl' when an http_proxy is being used, so the canonical mirror is used *in addition to* the mirrors retrieved from the mirrorlist. --- tools/run-container | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/run-container b/tools/run-container index 6dedb757..852f4d1e 100755 --- a/tools/run-container +++ b/tools/run-container @@ -373,6 +373,7 @@ wait_for_boot() { inside "$name" sh -c "echo proxy=$http_proxy >> /etc/yum.conf" inside "$name" sed -i s/enabled=1/enabled=0/ \ /etc/yum/pluginconf.d/fastestmirror.conf + inside "$name" sh -c "sed -i '/^#baseurl=/s/#//' /etc/yum.repos.d/*.repo" else debug 1 "do not know how to configure proxy on $OS_NAME" fi -- cgit v1.2.3 From c7248059dd2faaaadfbcef5c83e8e8ea166d6767 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Fri, 25 Jan 2019 22:35:40 +0000 Subject: tox: fix disco httpretty dependencies for py37 LP: #1813361 --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index d983348b..d3717200 100644 --- a/tox.ini +++ b/tox.ini @@ -75,7 +75,7 @@ deps = jsonpatch==1.16 six==1.10.0 # test-requirements - httpretty==0.8.6 + httpretty==0.9.6 mock==1.3.0 nose==1.3.7 unittest2==1.1.0 -- cgit v1.2.3 From d1a2fe7307e9cf2251d1f9a666c12d71d3f522d6 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Sat, 26 Jan 2019 15:06:42 +0000 Subject: opennebula: exclude EPOCHREALTIME as known bash env variable with a delta This branch is needed to allow cloud-init to sbuild on Ubuntu Disco. OpenNebula:parse_shell_config tries to do a comparison of bash environment values, excluding expected environment variables which are known to change. Bash on Ubuntu Disco surfaces a new EPOCHREALTIME environment variable which wasn't in previous bash environments, this var needs to be ignored by parse_shell_config too. LP: #1813383 --- cloudinit/sources/DataSourceOpenNebula.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index e62e9729..6e1d04bd 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -337,7 +337,7 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None, (output, _error) = util.subp(cmd, data=bcmd) # exclude vars in bash that change on their own or that we used - excluded = ("RANDOM", "LINENO", "SECONDS", "_", "__v") + excluded = ("EPOCHREALTIME", "RANDOM", "LINENO", "SECONDS", "_", "__v") preset = {} ret = {} target = None -- cgit v1.2.3 From 7a6ed1a23aeb26efaebc818a1a7cc7f7c6757b32 Mon Sep 17 00:00:00 2001 From: Paride Legovini Date: Mon, 28 Jan 2019 15:54:47 +0000 Subject: flake8: use ==/!= to compare str, bytes, and int literals --- cloudinit/config/cc_rh_subscription.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py index edee01e5..28c79b83 100644 --- a/cloudinit/config/cc_rh_subscription.py +++ b/cloudinit/config/cc_rh_subscription.py @@ -249,14 +249,14 @@ class SubscriptionManager(object): except util.ProcessExecutionError as e: if e.stdout.rstrip() != '': for line in e.stdout.split("\n"): - if line is not '': + if line != '': self.log_warn(line) else: self.log_warn("Setting the service level failed with: " "{0}".format(e.stderr.strip())) return False for line in return_out.split("\n"): - if line is not "": + if line != "": self.log.debug(line) return True @@ -268,7 +268,7 @@ class SubscriptionManager(object): self.log_warn("Auto-attach failed with: {0}".format(e)) return False for line in return_out.split("\n"): - if line is not "": + if line != "": self.log.debug(line) return True -- cgit v1.2.3 From 3f12012eba2aabb6ca7b3ef70bc33a4aa1edada4 Mon Sep 17 00:00:00 2001 From: Robert Schweikert Date: Mon, 28 Jan 2019 17:06:58 +0000 Subject: sysconfig: On SUSE, use STARTMODE instead of ONBOOT ONBOOT is not recognized on openSUSE and SUSE Linux Enterprise, add the STARTMODE setting LP: #1799540 --- cloudinit/net/sysconfig.py | 2 ++ tests/unittests/test_distros/test_netconfig.py | 8 ++++++ tests/unittests/test_net.py | 40 ++++++++++++++++++++++++++ 3 files changed, 50 insertions(+) diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index fd8e5010..19b3e60c 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -273,6 +273,7 @@ class Renderer(renderer.Renderer): ('USERCTL', False), ('NM_CONTROLLED', False), ('BOOTPROTO', 'none'), + ('STARTMODE', 'auto'), ]) # If these keys exist, then their values will be used to form @@ -367,6 +368,7 @@ class Renderer(renderer.Renderer): iface_cfg.name)) if subnet.get('control') == 'manual': iface_cfg['ONBOOT'] = False + iface_cfg['STARTMODE'] = 'manual' # set IPv4 and IPv6 static addresses ipv4_index = -1 diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index 6e339355..e986b593 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -468,6 +468,7 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase): NETMASK=255.255.255.0 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -476,6 +477,7 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase): DEVICE=eth1 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -499,6 +501,7 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase): IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -507,6 +510,7 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase): DEVICE=eth1 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -559,6 +563,7 @@ class TestNetCfgDistroOpensuse(TestNetCfgDistroBase): NETMASK=255.255.255.0 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -567,6 +572,7 @@ class TestNetCfgDistroOpensuse(TestNetCfgDistroBase): DEVICE=eth1 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -587,6 +593,7 @@ class TestNetCfgDistroOpensuse(TestNetCfgDistroBase): IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -595,6 +602,7 @@ class TestNetCfgDistroOpensuse(TestNetCfgDistroBase): DEVICE=eth1 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 5313d2df..e041e978 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -145,6 +145,7 @@ IPADDR=172.19.1.34 NETMASK=255.255.252.0 NM_CONTROLLED=no ONBOOT=yes +STARTMODE=auto TYPE=Ethernet USERCTL=no """.lstrip()), @@ -178,6 +179,7 @@ IPADDR=172.19.1.34 NETMASK=255.255.252.0 NM_CONTROLLED=no ONBOOT=yes +STARTMODE=auto TYPE=Ethernet USERCTL=no """.lstrip()), @@ -247,6 +249,7 @@ NETMASK=255.255.252.0 NETMASK1=255.255.255.0 NM_CONTROLLED=no ONBOOT=yes +STARTMODE=auto TYPE=Ethernet USERCTL=no """.lstrip()), @@ -282,6 +285,7 @@ NETMASK=255.255.252.0 NETMASK1=255.255.255.0 NM_CONTROLLED=no ONBOOT=yes +STARTMODE=auto TYPE=Ethernet USERCTL=no """.lstrip()), @@ -373,6 +377,7 @@ IPV6_DEFAULTGW=2001:DB8::1 NETMASK=255.255.252.0 NM_CONTROLLED=no ONBOOT=yes +STARTMODE=auto TYPE=Ethernet USERCTL=no """.lstrip()), @@ -410,6 +415,7 @@ IPV6_DEFAULTGW=2001:DB8::1 NETMASK=255.255.252.0 NM_CONTROLLED=no ONBOOT=yes +STARTMODE=auto TYPE=Ethernet USERCTL=no """.lstrip()), @@ -526,6 +532,7 @@ NETWORK_CONFIGS = { HWADDR=cf:d6:af:48:e8:80 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no"""), 'ifcfg-eth99': textwrap.dedent("""\ @@ -542,6 +549,7 @@ NETWORK_CONFIGS = { METRIC=10000 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no"""), }, @@ -655,6 +663,7 @@ NETWORK_CONFIGS = { NETMASK=255.255.255.0 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no MTU=9000 @@ -694,6 +703,7 @@ NETWORK_CONFIGS = { DEVICE=iface0 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -897,6 +907,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true MACADDR=aa:bb:cc:dd:ee:ff NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Bond USERCTL=no"""), 'ifcfg-bond0.200': textwrap.dedent("""\ @@ -905,6 +916,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true NM_CONTROLLED=no ONBOOT=yes PHYSDEV=bond0 + STARTMODE=auto TYPE=Ethernet USERCTL=no VLAN=yes"""), @@ -922,6 +934,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true NM_CONTROLLED=no ONBOOT=yes PRIO=22 + STARTMODE=auto STP=no TYPE=Bridge USERCTL=no"""), @@ -931,6 +944,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true HWADDR=c0:d6:9f:2c:e8:80 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no"""), 'ifcfg-eth0.101': textwrap.dedent("""\ @@ -949,6 +963,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true NM_CONTROLLED=no ONBOOT=yes PHYSDEV=eth0 + STARTMODE=auto TYPE=Ethernet USERCTL=no VLAN=yes"""), @@ -959,6 +974,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true MASTER=bond0 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto SLAVE=yes TYPE=Ethernet USERCTL=no"""), @@ -969,6 +985,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true MASTER=bond0 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto SLAVE=yes TYPE=Ethernet USERCTL=no"""), @@ -979,6 +996,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true HWADDR=66:bb:9f:2c:e8:80 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no"""), 'ifcfg-eth4': textwrap.dedent("""\ @@ -988,6 +1006,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true HWADDR=98:bb:9f:2c:e8:80 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no"""), 'ifcfg-eth5': textwrap.dedent("""\ @@ -996,6 +1015,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true HWADDR=98:bb:9f:2c:e8:8a NM_CONTROLLED=no ONBOOT=no + STARTMODE=manual TYPE=Ethernet USERCTL=no""") }, @@ -1307,6 +1327,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true NETMASK1=255.255.255.0 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Bond USERCTL=no """), @@ -1318,6 +1339,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true NM_CONTROLLED=no ONBOOT=yes SLAVE=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -1334,6 +1356,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true NM_CONTROLLED=no ONBOOT=yes SLAVE=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -1359,6 +1382,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true NETMASK1=255.255.255.0 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Bond USERCTL=no """), @@ -1370,6 +1394,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true NM_CONTROLLED=no ONBOOT=yes SLAVE=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -1392,6 +1417,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true NM_CONTROLLED=no ONBOOT=yes SLAVE=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -1429,6 +1455,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true HWADDR=aa:bb:cc:dd:e8:00 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no"""), 'ifcfg-en0.99': textwrap.dedent("""\ @@ -1447,6 +1474,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true NM_CONTROLLED=no ONBOOT=yes PHYSDEV=en0 + STARTMODE=auto TYPE=Ethernet USERCTL=no VLAN=yes"""), @@ -1488,6 +1516,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true NM_CONTROLLED=no ONBOOT=yes PRIO=22 + STARTMODE=auto STP=no TYPE=Bridge USERCTL=no @@ -1501,6 +1530,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true IPV6INIT=yes NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -1513,6 +1543,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true IPV6INIT=yes NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -1587,6 +1618,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true NETMASK=255.255.255.0 NM_CONTROLLED=no ONBOOT=no + STARTMODE=manual TYPE=Ethernet USERCTL=no """), @@ -1597,6 +1629,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true MTU=1480 NM_CONTROLLED=no ONBOOT=yes + STARTMODE=auto TYPE=Ethernet USERCTL=no """), @@ -1606,6 +1639,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true HWADDR=52:54:00:12:34:ff NM_CONTROLLED=no ONBOOT=no + STARTMODE=manual TYPE=Ethernet USERCTL=no """), @@ -1973,6 +2007,7 @@ DEVICE=eth1000 HWADDR=07-1C-C6-75-A4-BE NM_CONTROLLED=no ONBOOT=yes +STARTMODE=auto TYPE=Ethernet USERCTL=no """.lstrip() @@ -2094,6 +2129,7 @@ IPADDR=10.0.2.15 NETMASK=255.255.255.0 NM_CONTROLLED=no ONBOOT=yes +STARTMODE=auto TYPE=Ethernet USERCTL=no """ @@ -2119,6 +2155,7 @@ BOOTPROTO=dhcp DEVICE=eth0 NM_CONTROLLED=no ONBOOT=yes +STARTMODE=auto TYPE=Ethernet USERCTL=no """ @@ -2335,6 +2372,7 @@ DEVICE=eth1000 HWADDR=07-1C-C6-75-A4-BE NM_CONTROLLED=no ONBOOT=yes +STARTMODE=auto TYPE=Ethernet USERCTL=no """.lstrip() @@ -2456,6 +2494,7 @@ IPADDR=10.0.2.15 NETMASK=255.255.255.0 NM_CONTROLLED=no ONBOOT=yes +STARTMODE=auto TYPE=Ethernet USERCTL=no """ @@ -2481,6 +2520,7 @@ BOOTPROTO=dhcp DEVICE=eth0 NM_CONTROLLED=no ONBOOT=yes +STARTMODE=auto TYPE=Ethernet USERCTL=no """ -- cgit v1.2.3 From 09dcecf37628c5809ae21d7785693cb7358ca94c Mon Sep 17 00:00:00 2001 From: Robert Schweikert Date: Mon, 28 Jan 2019 17:51:57 +0000 Subject: systemd: Render generator from template to account for system differences. The systemd generator used had a hard coded path for the location target file to create. This path does not apply to all distributions. Make the generator and template to have the path set during build time. --- setup.py | 13 ++- systemd/cloud-init-generator | 170 ------------------------------------ systemd/cloud-init-generator.tmpl | 175 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 185 insertions(+), 173 deletions(-) delete mode 100755 systemd/cloud-init-generator create mode 100755 systemd/cloud-init-generator.tmpl diff --git a/setup.py b/setup.py index ea37efc3..186e215f 100755 --- a/setup.py +++ b/setup.py @@ -30,6 +30,8 @@ VARIANT = None def is_f(p): return os.path.isfile(p) +def is_generator(p): + return '-generator' in p def tiny_p(cmd, capture=True): # Darn python 2.6 doesn't have check_output (argggg) @@ -90,7 +92,7 @@ def read_requires(): return str(deps).splitlines() -def render_tmpl(template): +def render_tmpl(template, mode=None): """render template into a tmpdir under same dir as setup.py This is rendered to a temporary directory under the top level @@ -119,6 +121,8 @@ def render_tmpl(template): VARIANT, template, fpath]) else: tiny_p([sys.executable, './tools/render-cloudcfg', template, fpath]) + if mode: + os.chmod(fpath, mode) # return path relative to setup.py return os.path.join(os.path.basename(tmpd), bname) @@ -138,8 +142,11 @@ INITSYS_FILES = { 'systemd': [render_tmpl(f) for f in (glob('systemd/*.tmpl') + glob('systemd/*.service') + - glob('systemd/*.target')) if is_f(f)], - 'systemd.generators': [f for f in glob('systemd/*-generator') if is_f(f)], + glob('systemd/*.target')) + if (is_f(f) and not is_generator(f))], + 'systemd.generators': [ + render_tmpl(f, mode=0o755) + for f in glob('systemd/*') if is_f(f) and is_generator(f)], 'upstart': [f for f in glob('upstart/*') if is_f(f)], } INITSYS_ROOTS = { diff --git a/systemd/cloud-init-generator b/systemd/cloud-init-generator deleted file mode 100755 index bd9f2678..00000000 --- a/systemd/cloud-init-generator +++ /dev/null @@ -1,170 +0,0 @@ -#!/bin/sh -set -f - -LOG="" -DEBUG_LEVEL=1 -LOG_D="/run/cloud-init" -ENABLE="enabled" -DISABLE="disabled" -FOUND="found" -NOTFOUND="notfound" -RUN_ENABLED_FILE="$LOG_D/$ENABLE" -CLOUD_SYSTEM_TARGET="/lib/systemd/system/cloud-init.target" -CLOUD_TARGET_NAME="cloud-init.target" -# lxc sets 'container', but lets make that explicitly a global -CONTAINER="${container}" - -debug() { - local lvl="$1" - shift - [ "$lvl" -gt "$DEBUG_LEVEL" ] && return - if [ -z "$LOG" ]; then - local log="$LOG_D/${0##*/}.log" - { [ -d "$LOG_D" ] || mkdir -p "$LOG_D"; } && - { : > "$log"; } >/dev/null 2>&1 && LOG="$log" || - LOG="/dev/kmsg" - fi - echo "$@" >> "$LOG" -} - -etc_file() { - local pprefix="${1:-/etc/cloud/cloud-init.}" - _RET="unset" - [ -f "${pprefix}$ENABLE" ] && _RET="$ENABLE" && return 0 - [ -f "${pprefix}$DISABLE" ] && _RET="$DISABLE" && return 0 - return 0 -} - -read_proc_cmdline() { - # return /proc/cmdline for non-container, and /proc/1/cmdline for container - local ctname="systemd" - if [ -n "$CONTAINER" ] && ctname=$CONTAINER || - systemd-detect-virt --container --quiet; then - if { _RET=$(tr '\0' ' ' < /proc/1/cmdline); } 2>/dev/null; then - _RET_MSG="container[$ctname]: pid 1 cmdline" - return - fi - _RET="" - _RET_MSG="container[$ctname]: pid 1 cmdline not available" - return 0 - fi - - _RET_MSG="/proc/cmdline" - read _RET < /proc/cmdline -} - -kernel_cmdline() { - local cmdline="" tok="" - if [ -n "${KERNEL_CMDLINE+x}" ]; then - # use KERNEL_CMDLINE if present in environment even if empty - cmdline=${KERNEL_CMDLINE} - debug 1 "kernel command line from env KERNEL_CMDLINE: $cmdline" - elif read_proc_cmdline; then - read_proc_cmdline && cmdline="$_RET" - debug 1 "kernel command line ($_RET_MSG): $cmdline" - fi - _RET="unset" - cmdline=" $cmdline " - tok=${cmdline##* cloud-init=} - [ "$tok" = "$cmdline" ] && _RET="unset" - tok=${tok%% *} - [ "$tok" = "$ENABLE" -o "$tok" = "$DISABLE" ] && _RET="$tok" - return 0 -} - -default() { - _RET="$ENABLE" -} - -check_for_datasource() { - local ds_rc="" dsidentify="/usr/lib/cloud-init/ds-identify" - if [ ! -x "$dsidentify" ]; then - debug 1 "no ds-identify in $dsidentify. _RET=$FOUND" - return 0 - fi - $dsidentify - ds_rc=$? - debug 1 "ds-identify rc=$ds_rc" - if [ "$ds_rc" = "0" ]; then - _RET="$FOUND" - debug 1 "ds-identify _RET=$_RET" - return 0 - fi - _RET="$NOTFOUND" - debug 1 "ds-identify _RET=$_RET" - return 1 -} - -main() { - local normal_d="$1" early_d="$2" late_d="$3" - local target_name="multi-user.target" gen_d="$early_d" - local link_path="$gen_d/${target_name}.wants/${CLOUD_TARGET_NAME}" - local ds="$NOTFOUND" - - debug 1 "$0 normal=$normal_d early=$early_d late=$late_d" - debug 2 "$0 $*" - - local search result="error" ret="" - for search in kernel_cmdline etc_file default; do - if $search; then - debug 1 "$search found $_RET" - [ "$_RET" = "$ENABLE" -o "$_RET" = "$DISABLE" ] && - result=$_RET && break - else - ret=$? - debug 0 "search $search returned $ret" - fi - done - - # enable AND ds=found == enable - # enable AND ds=notfound == disable - # disable || == disabled - if [ "$result" = "$ENABLE" ]; then - debug 1 "checking for datasource" - check_for_datasource - ds=$_RET - if [ "$ds" = "$NOTFOUND" ]; then - debug 1 "cloud-init is enabled but no datasource found, disabling" - result="$DISABLE" - fi - fi - - if [ "$result" = "$ENABLE" ]; then - if [ -e "$link_path" ]; then - debug 1 "already enabled: no change needed" - else - [ -d "${link_path%/*}" ] || mkdir -p "${link_path%/*}" || - debug 0 "failed to make dir $link_path" - if ln -snf "$CLOUD_SYSTEM_TARGET" "$link_path"; then - debug 1 "enabled via $link_path -> $CLOUD_SYSTEM_TARGET" - else - ret=$? - debug 0 "[$ret] enable failed:" \ - "ln $CLOUD_SYSTEM_TARGET $link_path" - fi - fi - : > "$RUN_ENABLED_FILE" - elif [ "$result" = "$DISABLE" ]; then - if [ -f "$link_path" ]; then - if rm -f "$link_path"; then - debug 1 "disabled. removed existing $link_path" - else - ret=$? - debug 0 "[$ret] disable failed, remove $link_path" - fi - else - debug 1 "already disabled: no change needed [no $link_path]" - fi - if [ -e "$RUN_ENABLED_FILE" ]; then - rm -f "$RUN_ENABLED_FILE" - fi - else - debug 0 "unexpected result '$result' 'ds=$ds'" - ret=3 - fi - return $ret -} - -main "$@" - -# vi: ts=4 expandtab diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl new file mode 100755 index 00000000..cfa5eb53 --- /dev/null +++ b/systemd/cloud-init-generator.tmpl @@ -0,0 +1,175 @@ +## template:jinja +#!/bin/sh +set -f + +LOG="" +DEBUG_LEVEL=1 +LOG_D="/run/cloud-init" +ENABLE="enabled" +DISABLE="disabled" +FOUND="found" +NOTFOUND="notfound" +RUN_ENABLED_FILE="$LOG_D/$ENABLE" +{% if variant in ["suse"] %} +CLOUD_SYSTEM_TARGET="/usr/lib/systemd/system/cloud-init.target" +{% else %} +CLOUD_SYSTEM_TARGET="/lib/systemd/system/cloud-init.target" +{% endif %} +CLOUD_TARGET_NAME="cloud-init.target" +# lxc sets 'container', but lets make that explicitly a global +CONTAINER="${container}" + +debug() { + local lvl="$1" + shift + [ "$lvl" -gt "$DEBUG_LEVEL" ] && return + if [ -z "$LOG" ]; then + local log="$LOG_D/${0##*/}.log" + { [ -d "$LOG_D" ] || mkdir -p "$LOG_D"; } && + { : > "$log"; } >/dev/null 2>&1 && LOG="$log" || + LOG="/dev/kmsg" + fi + echo "$@" >> "$LOG" +} + +etc_file() { + local pprefix="${1:-/etc/cloud/cloud-init.}" + _RET="unset" + [ -f "${pprefix}$ENABLE" ] && _RET="$ENABLE" && return 0 + [ -f "${pprefix}$DISABLE" ] && _RET="$DISABLE" && return 0 + return 0 +} + +read_proc_cmdline() { + # return /proc/cmdline for non-container, and /proc/1/cmdline for container + local ctname="systemd" + if [ -n "$CONTAINER" ] && ctname=$CONTAINER || + systemd-detect-virt --container --quiet; then + if { _RET=$(tr '\0' ' ' < /proc/1/cmdline); } 2>/dev/null; then + _RET_MSG="container[$ctname]: pid 1 cmdline" + return + fi + _RET="" + _RET_MSG="container[$ctname]: pid 1 cmdline not available" + return 0 + fi + + _RET_MSG="/proc/cmdline" + read _RET < /proc/cmdline +} + +kernel_cmdline() { + local cmdline="" tok="" + if [ -n "${KERNEL_CMDLINE+x}" ]; then + # use KERNEL_CMDLINE if present in environment even if empty + cmdline=${KERNEL_CMDLINE} + debug 1 "kernel command line from env KERNEL_CMDLINE: $cmdline" + elif read_proc_cmdline; then + read_proc_cmdline && cmdline="$_RET" + debug 1 "kernel command line ($_RET_MSG): $cmdline" + fi + _RET="unset" + cmdline=" $cmdline " + tok=${cmdline##* cloud-init=} + [ "$tok" = "$cmdline" ] && _RET="unset" + tok=${tok%% *} + [ "$tok" = "$ENABLE" -o "$tok" = "$DISABLE" ] && _RET="$tok" + return 0 +} + +default() { + _RET="$ENABLE" +} + +check_for_datasource() { + local ds_rc="" dsidentify="/usr/lib/cloud-init/ds-identify" + if [ ! -x "$dsidentify" ]; then + debug 1 "no ds-identify in $dsidentify. _RET=$FOUND" + return 0 + fi + $dsidentify + ds_rc=$? + debug 1 "ds-identify rc=$ds_rc" + if [ "$ds_rc" = "0" ]; then + _RET="$FOUND" + debug 1 "ds-identify _RET=$_RET" + return 0 + fi + _RET="$NOTFOUND" + debug 1 "ds-identify _RET=$_RET" + return 1 +} + +main() { + local normal_d="$1" early_d="$2" late_d="$3" + local target_name="multi-user.target" gen_d="$early_d" + local link_path="$gen_d/${target_name}.wants/${CLOUD_TARGET_NAME}" + local ds="$NOTFOUND" + + debug 1 "$0 normal=$normal_d early=$early_d late=$late_d" + debug 2 "$0 $*" + + local search result="error" ret="" + for search in kernel_cmdline etc_file default; do + if $search; then + debug 1 "$search found $_RET" + [ "$_RET" = "$ENABLE" -o "$_RET" = "$DISABLE" ] && + result=$_RET && break + else + ret=$? + debug 0 "search $search returned $ret" + fi + done + + # enable AND ds=found == enable + # enable AND ds=notfound == disable + # disable || == disabled + if [ "$result" = "$ENABLE" ]; then + debug 1 "checking for datasource" + check_for_datasource + ds=$_RET + if [ "$ds" = "$NOTFOUND" ]; then + debug 1 "cloud-init is enabled but no datasource found, disabling" + result="$DISABLE" + fi + fi + + if [ "$result" = "$ENABLE" ]; then + if [ -e "$link_path" ]; then + debug 1 "already enabled: no change needed" + else + [ -d "${link_path%/*}" ] || mkdir -p "${link_path%/*}" || + debug 0 "failed to make dir $link_path" + if ln -snf "$CLOUD_SYSTEM_TARGET" "$link_path"; then + debug 1 "enabled via $link_path -> $CLOUD_SYSTEM_TARGET" + else + ret=$? + debug 0 "[$ret] enable failed:" \ + "ln $CLOUD_SYSTEM_TARGET $link_path" + fi + fi + : > "$RUN_ENABLED_FILE" + elif [ "$result" = "$DISABLE" ]; then + if [ -f "$link_path" ]; then + if rm -f "$link_path"; then + debug 1 "disabled. removed existing $link_path" + else + ret=$? + debug 0 "[$ret] disable failed, remove $link_path" + fi + else + debug 1 "already disabled: no change needed [no $link_path]" + fi + if [ -e "$RUN_ENABLED_FILE" ]; then + rm -f "$RUN_ENABLED_FILE" + fi + else + debug 0 "unexpected result '$result' 'ds=$ds'" + ret=3 + fi + return $ret +} + +main "$@" + +# vi: ts=4 expandtab -- cgit v1.2.3 From 8ee294d567c071fff1f9567e968ba73602308192 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Mon, 28 Jan 2019 20:07:03 +0000 Subject: opennebula: also exclude epochseconds from changed environment vars In addition to EPOCHREALTIME there is also an EPOCHSECONDS environment variable that OpenNebula needs to exclude as it is expected to change. This commit supplements the other exclusion in commit d1a2fe7307e9cf2251d1f9a666c12d71d3f522d6. Without this fix, unittests will intermittently fail if parse_shell_config is run across a timing boundary where the EPOCHSECONDS changes mid-test. LP: #1813641 --- cloudinit/sources/DataSourceOpenNebula.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index 6e1d04bd..02c9a7b8 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -337,7 +337,9 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None, (output, _error) = util.subp(cmd, data=bcmd) # exclude vars in bash that change on their own or that we used - excluded = ("EPOCHREALTIME", "RANDOM", "LINENO", "SECONDS", "_", "__v") + excluded = ( + "EPOCHREALTIME", "EPOCHSECONDS", "RANDOM", "LINENO", "SECONDS", "_", + "__v") preset = {} ret = {} target = None -- cgit v1.2.3 From 94a64529dccebd8fe8c7969370b8696e46023fbd Mon Sep 17 00:00:00 2001 From: Paride Legovini Date: Wed, 30 Jan 2019 15:38:56 +0000 Subject: Resolve flake8 comparison and pycodestyle over-ident issues Fixes: - flake8: use ==/!= to compare str, bytes, and int literals - pycodestyle: E117 over-indented --- cloudinit/config/schema.py | 2 +- cloudinit/handlers/upstart_job.py | 2 +- cloudinit/sources/DataSourceEc2.py | 2 +- cloudinit/stages.py | 4 ++-- cloudinit/url_helper.py | 2 +- tests/unittests/test_distros/test_netconfig.py | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index 080a6d06..807c3eee 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -367,7 +367,7 @@ def handle_schema_args(name, args): if not args.annotate: error(str(e)) except RuntimeError as e: - error(str(e)) + error(str(e)) else: print("Valid cloud-config file {0}".format(args.config_file)) if args.doc: diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py index 83fb0724..003cad60 100644 --- a/cloudinit/handlers/upstart_job.py +++ b/cloudinit/handlers/upstart_job.py @@ -89,7 +89,7 @@ def _has_suitable_upstart(): util.subp(["dpkg", "--compare-versions", dpkg_ver, "ge", good]) return True except util.ProcessExecutionError as e: - if e.exit_code is 1: + if e.exit_code == 1: pass else: util.logexc(LOG, "dpkg --compare-versions failed [%s]", diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 9ccf2cdc..eb6f27b2 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -442,7 +442,7 @@ def identify_aws(data): if (data['uuid'].startswith('ec2') and (data['uuid_source'] == 'hypervisor' or data['uuid'] == data['serial'])): - return CloudNames.AWS + return CloudNames.AWS return None diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 8a064124..da7d349a 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -548,11 +548,11 @@ class Init(object): with events.ReportEventStack("consume-user-data", "reading and applying user-data", parent=self.reporter): - self._consume_userdata(frequency) + self._consume_userdata(frequency) with events.ReportEventStack("consume-vendor-data", "reading and applying vendor-data", parent=self.reporter): - self._consume_vendordata(frequency) + self._consume_vendordata(frequency) # Perform post-consumption adjustments so that # modules that run during the init stage reflect diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 396d69ae..0af0d9e3 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -521,7 +521,7 @@ class OauthUrlHelper(object): if extra_exception_cb: ret = extra_exception_cb(msg, exception) finally: - self.exception_cb(msg, exception) + self.exception_cb(msg, exception) return ret def _headers_cb(self, extra_headers_cb, url): diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index e986b593..e4530408 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -407,7 +407,7 @@ class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase): self.assertEqual(0o644, get_mode(cfgpath, tmpd)) def netplan_path(self): - return '/etc/netplan/50-cloud-init.yaml' + return '/etc/netplan/50-cloud-init.yaml' def test_apply_network_config_v1_to_netplan_ub(self): expected_cfgs = { -- cgit v1.2.3 From 489553547f3461bd8ff67d3099237694130eb714 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 31 Jan 2019 03:52:13 +0000 Subject: clean: cloud-init clean should not trace when run from within cloud_dir Avoid traceback when cloud-init clean is run from within /var/lib/cloud/ deleted dirs. LP: #1795508 --- cloudinit/cmd/clean.py | 26 +++++++++++++------------- cloudinit/cmd/tests/test_clean.py | 3 ++- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py index de22f7f2..28ee7b84 100644 --- a/cloudinit/cmd/clean.py +++ b/cloudinit/cmd/clean.py @@ -5,12 +5,13 @@ """Define 'clean' utility and handler as part of cloud-init commandline.""" import argparse +import glob import os import sys from cloudinit.stages import Init from cloudinit.util import ( - ProcessExecutionError, chdir, del_dir, del_file, get_config_logfiles, + ProcessExecutionError, del_dir, del_file, get_config_logfiles, is_link, subp) @@ -61,18 +62,17 @@ def remove_artifacts(remove_logs, remove_seed=False): if not os.path.isdir(init.paths.cloud_dir): return 0 # Artifacts dir already cleaned - with chdir(init.paths.cloud_dir): - for path in os.listdir('.'): - if path == 'seed' and not remove_seed: - continue - try: - if os.path.isdir(path) and not is_link(path): - del_dir(path) - else: - del_file(path) - except OSError as e: - error('Could not remove {0}: {1}'.format(path, str(e))) - return 1 + for path in glob.glob('%s/*' % init.paths.cloud_dir): + if path == '%s/seed' % init.paths.cloud_dir and not remove_seed: + continue + try: + if os.path.isdir(path) and not is_link(path): + del_dir(path) + else: + del_file(path) + except OSError as e: + error('Could not remove {0}: {1}'.format(path, str(e))) + return 1 return 0 diff --git a/cloudinit/cmd/tests/test_clean.py b/cloudinit/cmd/tests/test_clean.py index 5a3ec3bf..15c3294e 100644 --- a/cloudinit/cmd/tests/test_clean.py +++ b/cloudinit/cmd/tests/test_clean.py @@ -136,7 +136,8 @@ class TestClean(CiTestCase): clean.remove_artifacts, remove_logs=False) self.assertEqual(1, retcode) self.assertEqual( - 'ERROR: Could not remove dir1: oops\n', m_stderr.getvalue()) + 'ERROR: Could not remove %s/dir1: oops\n' % self.artifact_dir, + m_stderr.getvalue()) def test_handle_clean_args_reboots(self): """handle_clean_args_reboots when reboot arg is provided.""" -- cgit v1.2.3 From 3a897fbd792adc1a0031364bd46d64831ca91228 Mon Sep 17 00:00:00 2001 From: Dominic Schlegel Date: Wed, 6 Feb 2019 21:22:38 +0000 Subject: correct grammar issue in instance metadata documentation LP: #1802188 --- doc/rtd/topics/instancedata.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/rtd/topics/instancedata.rst b/doc/rtd/topics/instancedata.rst index 5d2dc948..231a008c 100644 --- a/doc/rtd/topics/instancedata.rst +++ b/doc/rtd/topics/instancedata.rst @@ -4,7 +4,7 @@ Instance Metadata ***************** -What is a instance data? +What is instance data? ======================== Instance data is the collection of all configuration data that cloud-init -- cgit v1.2.3 From e9bf4f23209fecab15ff63427655e95bfa0934a7 Mon Sep 17 00:00:00 2001 From: Dominic Schlegel Date: Thu, 7 Feb 2019 21:15:34 +0000 Subject: add PyCharm IDE .idea/ path to .gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 75565ed4..80c509ec 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,4 @@ prime stage *.snap *.cover +.idea/ -- cgit v1.2.3 From cf30836645473c62599e838ab48b2d31677fa584 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Thu, 7 Feb 2019 22:38:41 +0000 Subject: netplan: Don't render yaml aliases when dumping netplan Cloud-init rendered netplan with duplicate aliases if a network config included "global" nameserver/search values. Netplan uses can read yaml files which do use aliaes but cloud-init did not render a single yaml dictionary, instead it combined yaml sections into a single document which sometimes resulted in duplicate aliases being present. This branch introduces a yaml SafeDumper class which can set the 'ignore_aliases' attribute. This is not enabled by default but callers to util.yaml_dumps can pass a boolean to toggle this. The netplan render uses noalias=True and the resulting yaml output does not contain any aliases. LP: #1815051 --- cloudinit/net/netplan.py | 3 +- cloudinit/safeyaml.py | 7 + cloudinit/util.py | 17 ++- tests/unittests/test_net.py | 336 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 355 insertions(+), 8 deletions(-) diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index 21517fda..e54a34e5 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -361,7 +361,8 @@ class Renderer(renderer.Renderer): if section: dump = util.yaml_dumps({name: section}, explicit_start=False, - explicit_end=False) + explicit_end=False, + noalias=True) txt = util.indent(dump, ' ' * 4) return [txt] return [] diff --git a/cloudinit/safeyaml.py b/cloudinit/safeyaml.py index 7bcf9dd3..3bd5e03d 100644 --- a/cloudinit/safeyaml.py +++ b/cloudinit/safeyaml.py @@ -17,6 +17,13 @@ _CustomSafeLoader.add_constructor( _CustomSafeLoader.construct_python_unicode) +class NoAliasSafeDumper(yaml.dumper.SafeDumper): + """A class which avoids constructing anchors/aliases on yaml dump""" + + def ignore_aliases(self, data): + return True + + def load(blob): return(yaml.load(blob, Loader=_CustomSafeLoader)) diff --git a/cloudinit/util.py b/cloudinit/util.py index a8a232b6..2be528a0 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1596,14 +1596,17 @@ def json_dumps(data): separators=(',', ': '), default=json_serialize_default) -def yaml_dumps(obj, explicit_start=True, explicit_end=True): +def yaml_dumps(obj, explicit_start=True, explicit_end=True, noalias=False): """Return data in nicely formatted yaml.""" - return yaml.safe_dump(obj, - line_break="\n", - indent=4, - explicit_start=explicit_start, - explicit_end=explicit_end, - default_flow_style=False) + + return yaml.dump(obj, + line_break="\n", + indent=4, + explicit_start=explicit_start, + explicit_end=explicit_end, + default_flow_style=False, + Dumper=(safeyaml.NoAliasSafeDumper + if noalias else yaml.dumper.Dumper)) def ensure_dir(path, mode=None): diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index e041e978..f001ae5a 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -19,6 +19,7 @@ import gzip import io import json import os +import re import textwrap import yaml @@ -103,6 +104,309 @@ STATIC_EXPECTED_1 = { 'address': '10.0.0.2'}], } +V1_NAMESERVER_ALIAS = """ +config: +- id: eno1 + mac_address: 08:94:ef:51:ae:e0 + mtu: 1500 + name: eno1 + subnets: + - type: manual + type: physical +- id: eno2 + mac_address: 08:94:ef:51:ae:e1 + mtu: 1500 + name: eno2 + subnets: + - type: manual + type: physical +- id: eno3 + mac_address: 08:94:ef:51:ae:de + mtu: 1500 + name: eno3 + subnets: + - type: manual + type: physical +- bond_interfaces: + - eno1 + - eno3 + id: bondM + mac_address: 08:94:ef:51:ae:e0 + mtu: 1500 + name: bondM + params: + bond-downdelay: 0 + bond-lacp-rate: fast + bond-miimon: 100 + bond-mode: 802.3ad + bond-updelay: 0 + bond-xmit-hash-policy: layer3+4 + subnets: + - address: 10.101.10.47/23 + gateway: 10.101.11.254 + type: static + type: bond +- id: eno4 + mac_address: 08:94:ef:51:ae:df + mtu: 1500 + name: eno4 + subnets: + - type: manual + type: physical +- id: enp0s20f0u1u6 + mac_address: 0a:94:ef:51:a4:b9 + mtu: 1500 + name: enp0s20f0u1u6 + subnets: + - type: manual + type: physical +- id: enp216s0f0 + mac_address: 68:05:ca:81:7c:e8 + mtu: 9000 + name: enp216s0f0 + subnets: + - type: manual + type: physical +- id: enp216s0f1 + mac_address: 68:05:ca:81:7c:e9 + mtu: 9000 + name: enp216s0f1 + subnets: + - type: manual + type: physical +- id: enp47s0f0 + mac_address: 68:05:ca:64:d3:6c + mtu: 9000 + name: enp47s0f0 + subnets: + - type: manual + type: physical +- bond_interfaces: + - enp216s0f0 + - enp47s0f0 + id: bond0 + mac_address: 68:05:ca:64:d3:6c + mtu: 9000 + name: bond0 + params: + bond-downdelay: 0 + bond-lacp-rate: fast + bond-miimon: 100 + bond-mode: 802.3ad + bond-updelay: 0 + bond-xmit-hash-policy: layer3+4 + subnets: + - type: manual + type: bond +- id: bond0.3502 + mtu: 9000 + name: bond0.3502 + subnets: + - address: 172.20.80.4/25 + type: static + type: vlan + vlan_id: 3502 + vlan_link: bond0 +- id: bond0.3503 + mtu: 9000 + name: bond0.3503 + subnets: + - address: 172.20.80.129/25 + type: static + type: vlan + vlan_id: 3503 + vlan_link: bond0 +- id: enp47s0f1 + mac_address: 68:05:ca:64:d3:6d + mtu: 9000 + name: enp47s0f1 + subnets: + - type: manual + type: physical +- bond_interfaces: + - enp216s0f1 + - enp47s0f1 + id: bond1 + mac_address: 68:05:ca:64:d3:6d + mtu: 9000 + name: bond1 + params: + bond-downdelay: 0 + bond-lacp-rate: fast + bond-miimon: 100 + bond-mode: 802.3ad + bond-updelay: 0 + bond-xmit-hash-policy: layer3+4 + subnets: + - address: 10.101.8.65/26 + routes: + - destination: 213.119.192.0/24 + gateway: 10.101.8.126 + metric: 0 + type: static + type: bond +- address: + - 10.101.10.1 + - 10.101.10.2 + - 10.101.10.3 + - 10.101.10.5 + search: + - foo.bar + - maas + type: nameserver +version: 1 +""" + +NETPLAN_NO_ALIAS = """ +network: + version: 2 + ethernets: + eno1: + match: + macaddress: 08:94:ef:51:ae:e0 + mtu: 1500 + set-name: eno1 + eno2: + match: + macaddress: 08:94:ef:51:ae:e1 + mtu: 1500 + set-name: eno2 + eno3: + match: + macaddress: 08:94:ef:51:ae:de + mtu: 1500 + set-name: eno3 + eno4: + match: + macaddress: 08:94:ef:51:ae:df + mtu: 1500 + set-name: eno4 + enp0s20f0u1u6: + match: + macaddress: 0a:94:ef:51:a4:b9 + mtu: 1500 + set-name: enp0s20f0u1u6 + enp216s0f0: + match: + macaddress: 68:05:ca:81:7c:e8 + mtu: 9000 + set-name: enp216s0f0 + enp216s0f1: + match: + macaddress: 68:05:ca:81:7c:e9 + mtu: 9000 + set-name: enp216s0f1 + enp47s0f0: + match: + macaddress: 68:05:ca:64:d3:6c + mtu: 9000 + set-name: enp47s0f0 + enp47s0f1: + match: + macaddress: 68:05:ca:64:d3:6d + mtu: 9000 + set-name: enp47s0f1 + bonds: + bond0: + interfaces: + - enp216s0f0 + - enp47s0f0 + macaddress: 68:05:ca:64:d3:6c + mtu: 9000 + parameters: + down-delay: 0 + lacp-rate: fast + mii-monitor-interval: 100 + mode: 802.3ad + transmit-hash-policy: layer3+4 + up-delay: 0 + bond1: + addresses: + - 10.101.8.65/26 + interfaces: + - enp216s0f1 + - enp47s0f1 + macaddress: 68:05:ca:64:d3:6d + mtu: 9000 + nameservers: + addresses: + - 10.101.10.1 + - 10.101.10.2 + - 10.101.10.3 + - 10.101.10.5 + search: + - foo.bar + - maas + parameters: + down-delay: 0 + lacp-rate: fast + mii-monitor-interval: 100 + mode: 802.3ad + transmit-hash-policy: layer3+4 + up-delay: 0 + routes: + - metric: 0 + to: 213.119.192.0/24 + via: 10.101.8.126 + bondM: + addresses: + - 10.101.10.47/23 + gateway4: 10.101.11.254 + interfaces: + - eno1 + - eno3 + macaddress: 08:94:ef:51:ae:e0 + mtu: 1500 + nameservers: + addresses: + - 10.101.10.1 + - 10.101.10.2 + - 10.101.10.3 + - 10.101.10.5 + search: + - foo.bar + - maas + parameters: + down-delay: 0 + lacp-rate: fast + mii-monitor-interval: 100 + mode: 802.3ad + transmit-hash-policy: layer3+4 + up-delay: 0 + vlans: + bond0.3502: + addresses: + - 172.20.80.4/25 + id: 3502 + link: bond0 + mtu: 9000 + nameservers: + addresses: + - 10.101.10.1 + - 10.101.10.2 + - 10.101.10.3 + - 10.101.10.5 + search: + - foo.bar + - maas + bond0.3503: + addresses: + - 172.20.80.129/25 + id: 3503 + link: bond0 + mtu: 9000 + nameservers: + addresses: + - 10.101.10.1 + - 10.101.10.2 + - 10.101.10.3 + - 10.101.10.5 + search: + - foo.bar + - maas +""" + + # Examples (and expected outputs for various renderers). OS_SAMPLES = [ { @@ -3065,6 +3369,38 @@ class TestNetplanRoundTrip(CiTestCase): entry['expected_netplan'].splitlines(), files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + def test_render_output_has_yaml_no_aliases(self): + entry = { + 'yaml': V1_NAMESERVER_ALIAS, + 'expected_netplan': NETPLAN_NO_ALIAS, + } + network_config = yaml.load(entry['yaml']) + ns = network_state.parse_net_config_data(network_config) + files = self._render_and_read(state=ns) + # check for alias + content = files['/etc/netplan/50-cloud-init.yaml'] + + # test load the yaml to ensure we don't render something not loadable + # this allows single aliases, but not duplicate ones + parsed = yaml.load(files['/etc/netplan/50-cloud-init.yaml']) + self.assertNotEqual(None, parsed) + + # now look for any alias, avoid rendering them entirely + # generate the first anchor string using the template + # as of this writing, looks like "&id001" + anchor = r'&' + yaml.serializer.Serializer.ANCHOR_TEMPLATE % 1 + found_alias = re.search(anchor, content, re.MULTILINE) + if found_alias: + msg = "Error at: %s\nContent:\n%s" % (found_alias, content) + raise ValueError('Found yaml alias in rendered netplan: ' + msg) + + print(entry['expected_netplan']) + print('-- expected ^ | v rendered --') + print(files['/etc/netplan/50-cloud-init.yaml']) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + class TestEniRoundTrip(CiTestCase): -- cgit v1.2.3 From fff37e7dc6849fd16db504b0d338fae20a7beb39 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Fri, 8 Feb 2019 22:08:47 +0000 Subject: netinfo: Adjust ifconfig output parsing for FreeBSD ipv6 entries FreeBSD ifconfig output for ipv6 addrs doesn't find scopeid values when present in the output and the pformat rendering assumes that an ipv6 address will have a 'scope6' entry in the netdev info dictionary. This patch finds the scopeid value, which is not always inside <>, and in some cases v6 addrs don't have a scopeid value in the output, so when rendering the table, allow scope6 value to be replaced with the empty value. LP: #1779672 --- cloudinit/netinfo.py | 7 +++++-- cloudinit/tests/test_netinfo.py | 14 ++++++++++++++ tests/data/netinfo/freebsd-ifconfig-output | 17 +++++++++++++++++ tests/data/netinfo/freebsd-netdev-formatted-output | 11 +++++++++++ 4 files changed, 47 insertions(+), 2 deletions(-) create mode 100644 tests/data/netinfo/freebsd-ifconfig-output create mode 100644 tests/data/netinfo/freebsd-netdev-formatted-output diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index 9ff929c2..e91cd263 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -141,6 +141,9 @@ def _netdev_info_ifconfig(ifconfig_data): res = re.match(r'.*<(\S+)>', toks[i + 1]) if res: devs[curdev]['ipv6'][-1]['scope6'] = res.group(1) + else: + devs[curdev]['ipv6'][-1]['scope6'] = toks[i + 1] + return devs @@ -389,8 +392,8 @@ def netdev_pformat(): addr.get('scope', empty), data["hwaddr"])) for addr in data.get('ipv6'): tbl.add_row( - (dev, data["up"], addr["ip"], empty, addr["scope6"], - data["hwaddr"])) + (dev, data["up"], addr["ip"], empty, + addr.get("scope6", empty), data["hwaddr"])) if len(data.get('ipv6')) + len(data.get('ipv4')) == 0: tbl.add_row((dev, data["up"], empty, empty, empty, data["hwaddr"])) diff --git a/cloudinit/tests/test_netinfo.py b/cloudinit/tests/test_netinfo.py index d76e768e..1c8a791e 100644 --- a/cloudinit/tests/test_netinfo.py +++ b/cloudinit/tests/test_netinfo.py @@ -11,6 +11,7 @@ from cloudinit.tests.helpers import CiTestCase, mock, readResource # Example ifconfig and route output SAMPLE_OLD_IFCONFIG_OUT = readResource("netinfo/old-ifconfig-output") SAMPLE_NEW_IFCONFIG_OUT = readResource("netinfo/new-ifconfig-output") +SAMPLE_FREEBSD_IFCONFIG_OUT = readResource("netinfo/freebsd-ifconfig-output") SAMPLE_IPADDRSHOW_OUT = readResource("netinfo/sample-ipaddrshow-output") SAMPLE_ROUTE_OUT_V4 = readResource("netinfo/sample-route-output-v4") SAMPLE_ROUTE_OUT_V6 = readResource("netinfo/sample-route-output-v6") @@ -18,6 +19,7 @@ SAMPLE_IPROUTE_OUT_V4 = readResource("netinfo/sample-iproute-output-v4") SAMPLE_IPROUTE_OUT_V6 = readResource("netinfo/sample-iproute-output-v6") NETDEV_FORMATTED_OUT = readResource("netinfo/netdev-formatted-output") ROUTE_FORMATTED_OUT = readResource("netinfo/route-formatted-output") +FREEBSD_NETDEV_OUT = readResource("netinfo/freebsd-netdev-formatted-output") class TestNetInfo(CiTestCase): @@ -43,6 +45,18 @@ class TestNetInfo(CiTestCase): content = netdev_pformat() self.assertEqual(NETDEV_FORMATTED_OUT, content) + @mock.patch('cloudinit.netinfo.util.which') + @mock.patch('cloudinit.netinfo.util.subp') + def test_netdev_freebsd_nettools_pformat(self, m_subp, m_which): + """netdev_pformat properly rendering netdev new nettools info.""" + m_subp.return_value = (SAMPLE_FREEBSD_IFCONFIG_OUT, '') + m_which.side_effect = lambda x: x if x == 'ifconfig' else None + content = netdev_pformat() + print() + print(content) + print() + self.assertEqual(FREEBSD_NETDEV_OUT, content) + @mock.patch('cloudinit.netinfo.util.which') @mock.patch('cloudinit.netinfo.util.subp') def test_netdev_iproute_pformat(self, m_subp, m_which): diff --git a/tests/data/netinfo/freebsd-ifconfig-output b/tests/data/netinfo/freebsd-ifconfig-output new file mode 100644 index 00000000..3de15a5a --- /dev/null +++ b/tests/data/netinfo/freebsd-ifconfig-output @@ -0,0 +1,17 @@ +vtnet0: flags=8843 metric 0 mtu 1500 + options=6c07bb + ether fa:16:3e:14:1f:99 + hwaddr fa:16:3e:14:1f:99 + inet 10.1.80.61 netmask 0xfffff000 broadcast 10.1.95.255 + nd6 options=29 + media: Ethernet 10Gbase-T + status: active +pflog0: flags=0<> metric 0 mtu 33160 +pfsync0: flags=0<> metric 0 mtu 1500 + syncpeer: 0.0.0.0 maxupd: 128 defer: off +lo0: flags=8049 metric 0 mtu 16384 + options=600003 + inet6 ::1 prefixlen 128 + inet6 fe80::1%lo0 prefixlen 64 scopeid 0x4 + inet 127.0.0.1 netmask 0xff000000 + nd6 options=21 diff --git a/tests/data/netinfo/freebsd-netdev-formatted-output b/tests/data/netinfo/freebsd-netdev-formatted-output new file mode 100644 index 00000000..a9d2ac14 --- /dev/null +++ b/tests/data/netinfo/freebsd-netdev-formatted-output @@ -0,0 +1,11 @@ ++++++++++++++++++++++++++++++++Net device info+++++++++++++++++++++++++++++++ ++---------+-------+----------------+------------+-------+-------------------+ +| Device | Up | Address | Mask | Scope | Hw-Address | ++---------+-------+----------------+------------+-------+-------------------+ +| lo0 | True | 127.0.0.1 | 0xff000000 | . | . | +| lo0 | True | ::1/128 | . | . | . | +| lo0 | True | fe80::1%lo0/64 | . | 0x4 | . | +| pflog0 | False | . | . | . | . | +| pfsync0 | False | . | . | . | . | +| vtnet0 | True | 10.1.80.61 | 0xfffff000 | . | fa:16:3e:14:1f:99 | ++---------+-------+----------------+------------+-------+-------------------+ -- cgit v1.2.3 From 0bb4c74e7f2d008b015d5453a1be88ae807b1f9b Mon Sep 17 00:00:00 2001 From: "Guilherme G. Piccoli" Date: Thu, 14 Feb 2019 20:37:32 +0000 Subject: EC2: Rewrite network config on AWS Classic instances every boot AWS EC2 instances' network come in 2 basic flavors: Classic and VPC (Virtual Private Cloud). The former has an interesting behavior of having its MAC address changed whenever the instance is stopped/restarted. This behavior is not observed in VPC instances. In Ubuntu 18.04 (Bionic) the network "management" changed from ENI-style (etc/network/interfaces) to netplan, and when using netplan we observe the following block present in /etc/netplan/50-cloud-init.yaml: match: macaddress: aa:bb:cc:dd:ee:ff Jani Ollikainen noticed in Launchpad bug #1802073 that the EC2 Classic instances were booting without network access in Bionic after stop/restart procedure, due to their MAC address change behavior. It was narrowed down to the netplan MAC match block, that kept the old MAC address after stopping and restarting an instance, since the network configuration writing happens by default only once in EC2 instances, in the first boot. This patch changes the network configuration write to every boot in EC2 Classic instances, by checking against the "vpc-id" metadata information provided only in the VPC instances - if we don't have this metadata value, cloud-init will rewrite the network configuration file in every boot. This was tested in an EC2 Classic instance and proved to fix the issue; unit tests were also added for the new method is_classic_instance(). LP: #1802073 Reported-by: Jani Ollikainen Suggested-by: Ryan Harper Co-developed-by: Chad Smith Signed-off-by: Guilherme G. Piccoli --- cloudinit/sources/DataSourceEc2.py | 21 +++++++++++++++++++++ doc/rtd/topics/datasources/ec2.rst | 11 +++++++++++ tests/unittests/test_datasource/test_ec2.py | 24 ++++++++++++++++++++++++ 3 files changed, 56 insertions(+) diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index eb6f27b2..4f2f6ccb 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -19,6 +19,7 @@ from cloudinit import sources from cloudinit import url_helper as uhelp from cloudinit import util from cloudinit import warnings +from cloudinit.event import EventType LOG = logging.getLogger(__name__) @@ -107,6 +108,19 @@ class DataSourceEc2(sources.DataSource): 'dynamic', {}).get('instance-identity', {}).get('document', {}) return True + def is_classic_instance(self): + """Report if this instance type is Ec2 Classic (non-vpc).""" + if not self.metadata: + # Can return False on inconclusive as we are also called in + # network_config where metadata will be present. + # Secondary call site is in packaging postinst script. + return False + ifaces_md = self.metadata.get('network', {}).get('interfaces', {}) + for _mac, mac_data in ifaces_md.get('macs', {}).items(): + if 'vpc-id' in mac_data: + return False + return True + @property def launch_index(self): if not self.metadata: @@ -320,6 +334,13 @@ class DataSourceEc2(sources.DataSource): if isinstance(net_md, dict): result = convert_ec2_metadata_network_config( net_md, macs_to_nics=macs_to_nics, fallback_nic=iface) + # RELEASE_BLOCKER: Xenial debian/postinst needs to add + # EventType.BOOT on upgrade path for classic. + + # Non-VPC (aka Classic) Ec2 instances need to rewrite the + # network config file every boot due to MAC address change. + if self.is_classic_instance(): + self.update_events['network'].add(EventType.BOOT) else: LOG.warning("Metadata 'network' key not valid: %s.", net_md) self._network_config = result diff --git a/doc/rtd/topics/datasources/ec2.rst b/doc/rtd/topics/datasources/ec2.rst index 64c325d8..76beca92 100644 --- a/doc/rtd/topics/datasources/ec2.rst +++ b/doc/rtd/topics/datasources/ec2.rst @@ -90,4 +90,15 @@ An example configuration with the default values is provided below: max_wait: 120 timeout: 50 +Notes +----- + * There are 2 types of EC2 instances network-wise: VPC ones (Virtual Private + Cloud) and Classic ones (also known as non-VPC). One major difference + between them is that Classic instances have their MAC address changed on + stop/restart operations, so cloud-init will recreate the network config + file for EC2 Classic instances every boot. On VPC instances this file is + generated only in the first boot of the instance. + The check for the instance type is performed by is_classic_instance() + method. + .. vi: textwidth=78 diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index 1a5956d9..20d59bfd 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -401,6 +401,30 @@ class TestEc2(test_helpers.HttprettyTestCase): ds.metadata = DEFAULT_METADATA self.assertEqual('my-identity-id', ds.get_instance_id()) + def test_classic_instance_true(self): + """If no vpc-id in metadata, is_classic_instance must return true.""" + md_copy = copy.deepcopy(DEFAULT_METADATA) + ifaces_md = md_copy.get('network', {}).get('interfaces', {}) + for _mac, mac_data in ifaces_md.get('macs', {}).items(): + if 'vpc-id' in mac_data: + del mac_data['vpc-id'] + + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': md_copy}) + self.assertTrue(ds.get_data()) + self.assertTrue(ds.is_classic_instance()) + + def test_classic_instance_false(self): + """If vpc-id in metadata, is_classic_instance must return false.""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md={'md': DEFAULT_METADATA}) + self.assertTrue(ds.get_data()) + self.assertFalse(ds.is_classic_instance()) + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') def test_valid_platform_with_strict_true(self, m_dhcp): """Valid platform data should return true with strict_id true.""" -- cgit v1.2.3 From 9cf9d8cdd3a8fd7d4d425f7051122d0ac8af2bbd Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Mon, 18 Feb 2019 22:55:49 +0000 Subject: This is to fix https://bugs.launchpad.net/cloud-init/+bug/1812676 --- tools/cloud-init-per | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tools/cloud-init-per b/tools/cloud-init-per index 7d6754b6..eae3e93f 100755 --- a/tools/cloud-init-per +++ b/tools/cloud-init-per @@ -38,7 +38,7 @@ fi [ "$1" = "-h" -o "$1" = "--help" ] && { Usage ; exit 0; } [ $# -ge 3 ] || { Usage 1>&2; exit 1; } freq=$1 -name=$2 +name=${2/-/_} shift 2; [ "${name#*/}" = "${name}" ] || fail "name cannot contain a /" @@ -53,6 +53,12 @@ esac [ -d "${sem%/*}" ] || mkdir -p "${sem%/*}" || fail "failed to make directory for ${sem}" +# Rename legacy sem files with dashes in their names. Do not overwrite existing +# sem files to prevent clobbering those which may have been created from calls +# outside of cloud-init. +sem_legacy="${sem/_/-}" +[ "$sem" != "$sem_legacy" -a -e "$sem_legacy" ] && mv -n "$sem_legacy" "$sem" + [ "$freq" != "always" -a -e "$sem" ] && exit 0 "$@" ret=$? -- cgit v1.2.3 From e7a8f81e6eee390ce6920df053bf7467b5e4dbd7 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Fri, 22 Feb 2019 09:41:28 +0000 Subject: tests: integration test failure summary to use traceback if empty error When integration tests verification fails, the object returned contains has 'error' and 'traceback' keys. Each key can contain empty strings. If the simplified 'error' message is empty, fallback and use the more verbose full 'traceback' text in the failure summary. --- tests/cloud_tests/verify.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/tests/cloud_tests/verify.py b/tests/cloud_tests/verify.py index 9911ecf2..7018f4d5 100644 --- a/tests/cloud_tests/verify.py +++ b/tests/cloud_tests/verify.py @@ -61,12 +61,17 @@ def format_test_failures(test_result): if not test_result['failures']: return '' failure_hdr = ' test failures:' - failure_fmt = ' * {module}.{class}.{function}\n {error}' + failure_fmt = ' * {module}.{class}.{function}\n ' output = [] for failure in test_result['failures']: if not output: output = [failure_hdr] - output.append(failure_fmt.format(**failure)) + msg = failure_fmt.format(**failure) + if failure.get('error'): + msg += failure['error'] + else: + msg += failure.get('traceback', '') + output.append(msg) return '\n'.join(output) -- cgit v1.2.3 From 79d40e6b7bce33af69572c6054b3398b8d8077c7 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Fri, 22 Feb 2019 09:56:03 +0000 Subject: doc: update merging doc with fixes and some additional details/examples Update config merging documentation with cloud-config syntax fix. Add an example showing how to merge two files with runcmd. --- doc/rtd/topics/merging.rst | 90 ++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 84 insertions(+), 6 deletions(-) diff --git a/doc/rtd/topics/merging.rst b/doc/rtd/topics/merging.rst index c75ca59c..5f7ca18d 100644 --- a/doc/rtd/topics/merging.rst +++ b/doc/rtd/topics/merging.rst @@ -21,12 +21,12 @@ For example. .. code-block:: yaml #cloud-config (1) - run_cmd: + runcmd: - bash1 - bash2 #cloud-config (2) - run_cmd: + runcmd: - bash3 - bash4 @@ -36,7 +36,7 @@ cloud-config object that contains the following. .. code-block:: yaml #cloud-config (merged) - run_cmd: + runcmd: - bash3 - bash4 @@ -45,7 +45,7 @@ Typically this is not what users want; instead they would likely prefer: .. code-block:: yaml #cloud-config (merged) - run_cmd: + runcmd: - bash1 - bash2 - bash3 @@ -55,6 +55,45 @@ This way makes it easier to combine the various cloud-config objects you have into a more useful list, thus reducing duplication necessary to accomplish the same result with the previous method. + +Built-in Mergers +================ + +Cloud-init provides merging for the following built-in types: + +- Dict +- List +- String + +The ``Dict`` merger has the following options which control what is done with +values contained within the config. + +- ``allow_delete``: Existing values not present in the new value can be deleted, defaults to False +- ``no_replace``: Do not replace an existing value if one is already present, enabled by default. +- ``replace``: Overwrite existing values with new ones. + +The ``List`` merger has the following options which control what is done with +the values contained within the config. + +- ``append``: Add new value to the end of the list, defaults to False. +- ``prepend``: Add new values to the start of the list, defaults to False. +- ``no_replace``: Do not replace an existing value if one is already present, enabled by default. +- ``replace``: Overwrite existing values with new ones. + +The ``Str`` merger has the following options which control what is done with +the values contained within the config. + +- ``append``: Add new value to the end of the string, defaults to False. + +Common options for all merge types which control how recursive merging is +done on other types. + +- ``recurse_dict``: If True merge the new values of the dictionary, defaults to True. +- ``recurse_list``: If True merge the new values of the list, defaults to False. +- ``recurse_array``: Alias for ``recurse_list``. +- ``recurse_str``: If True merge the new values of the string, defaults to False. + + Customizability =============== @@ -164,8 +203,8 @@ string format (i.e. the second option above), for example: .. code-block:: python - {'merge_how': [{'name': 'list', 'settings': ['extend']}, - {'name': 'dict', 'settings': []}, + {'merge_how': [{'name': 'list', 'settings': ['append']}, + {'name': 'dict', 'settings': ['no_replace', 'recurse_list']}, {'name': 'str', 'settings': ['append']}]} This would be the equivalent format for default string format but in dictionary @@ -201,4 +240,43 @@ Note, however, that merge algorithms are not used *across* types of configuration. As was the case before merging was implemented, user-data will overwrite conf.d configuration without merging. +Example cloud-config +==================== + +A common request is to include multiple ``runcmd`` directives in different +files and merge all of the commands together. To achieve this, we must modify +the default merging to allow for dictionaries to join list values. + + +The first config + +.. code-block:: yaml + + #cloud-config + merge_how: + - name: list + settings: [append] + - name: dict + settings: [no_replace, recurse_list] + + runcmd: + - bash1 + - bash2 + +The second config + +.. code-block:: yaml + + #cloud-config + merge_how: + - name: list + settings: [append] + - name: dict + settings: [no_replace, recurse_list] + + runcmd: + - bash3 + - bash4 + + .. vi: textwidth=78 -- cgit v1.2.3 From 34f54360fcc1e0f805002a0b639d0a84eb2cb8ee Mon Sep 17 00:00:00 2001 From: "Jason Zions (MSFT)" Date: Fri, 22 Feb 2019 13:26:31 +0000 Subject: azure: Filter list of ssh keys pulled from fabric The Azure data source is expected to expose a list of ssh keys for the user-to-be-provisioned in the crawled metadata. When configured to use the __builtin__ agent this list is built by the WALinuxAgentShim. The shim retrieves the full set of certificates and public keys exposed to the VM from the wireserver, extracts any ssh keys it can, and returns that list. This fix reduces that list of ssh keys to just the ones whose fingerprints appear in the "administrative user" section of the ovf-env.xml file. The Azure control plane exposes other ssh keys to the VM for other reasons, but those should not be added to the authorized_keys file for the provisioned user. --- cloudinit/sources/DataSourceAzure.py | 13 +- cloudinit/sources/helpers/azure.py | 109 ++++++++++----- tests/data/azure/parse_certificates_fingerprints | 4 + tests/data/azure/parse_certificates_pem | 152 +++++++++++++++++++++ tests/data/azure/pubkey_extract_cert | 13 ++ tests/data/azure/pubkey_extract_ssh_key | 1 + .../unittests/test_datasource/test_azure_helper.py | 71 +++++++++- 7 files changed, 322 insertions(+), 41 deletions(-) create mode 100644 tests/data/azure/parse_certificates_fingerprints create mode 100644 tests/data/azure/parse_certificates_pem create mode 100644 tests/data/azure/pubkey_extract_cert create mode 100644 tests/data/azure/pubkey_extract_ssh_key diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index a4f998b3..eccbee5a 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -627,9 +627,11 @@ class DataSourceAzure(sources.DataSource): if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN: self.bounce_network_with_azure_hostname() + pubkey_info = self.cfg.get('_pubkeys', None) metadata_func = partial(get_metadata_from_fabric, fallback_lease_file=self. - dhclient_lease_file) + dhclient_lease_file, + pubkey_info=pubkey_info) else: metadata_func = self.get_metadata_from_agent @@ -642,6 +644,7 @@ class DataSourceAzure(sources.DataSource): "Error communicating with Azure fabric; You may experience." "connectivity issues.", exc_info=True) return False + util.del_file(REPORTED_READY_MARKER_FILE) util.del_file(REPROVISION_MARKER_FILE) return fabric_data @@ -909,13 +912,15 @@ def find_child(node, filter_func): def load_azure_ovf_pubkeys(sshnode): # This parses a 'SSH' node formatted like below, and returns # an array of dicts. - # [{'fp': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7', - # 'path': 'where/to/go'}] + # [{'fingerprint': '6BE7A7C3C8A8F4B123CCA5D0C2F1BE4CA7B63ED7', + # 'path': '/where/to/go'}] # # - # ABC/ABC + # ABC/x/y/z # ... # + # Under some circumstances, there may be a element along with the + # Fingerprint and Path. Pass those along if they appear. results = find_child(sshnode, lambda n: n.localName == "PublicKeys") if len(results) == 0: return [] diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index e5696b1f..2829dd20 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -138,9 +138,36 @@ class OpenSSLManager(object): self.certificate = certificate LOG.debug('New certificate generated.') - def parse_certificates(self, certificates_xml): - tag = ElementTree.fromstring(certificates_xml).find( - './/Data') + @staticmethod + def _run_x509_action(action, cert): + cmd = ['openssl', 'x509', '-noout', action] + result, _ = util.subp(cmd, data=cert) + return result + + def _get_ssh_key_from_cert(self, certificate): + pub_key = self._run_x509_action('-pubkey', certificate) + keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin'] + ssh_key, _ = util.subp(keygen_cmd, data=pub_key) + return ssh_key + + def _get_fingerprint_from_cert(self, certificate): + """openssl x509 formats fingerprints as so: + 'SHA1 Fingerprint=07:3E:19:D1:4D:1C:79:92:24:C6:A0:FD:8D:DA:\ + B6:A8:BF:27:D4:73\n' + + Azure control plane passes that fingerprint as so: + '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473' + """ + raw_fp = self._run_x509_action('-fingerprint', certificate) + eq = raw_fp.find('=') + octets = raw_fp[eq+1:-1].split(':') + return ''.join(octets) + + def _decrypt_certs_from_xml(self, certificates_xml): + """Decrypt the certificates XML document using the our private key; + return the list of certs and private keys contained in the doc. + """ + tag = ElementTree.fromstring(certificates_xml).find('.//Data') certificates_content = tag.text lines = [ b'MIME-Version: 1.0', @@ -151,32 +178,30 @@ class OpenSSLManager(object): certificates_content.encode('utf-8'), ] with cd(self.tmpdir): - with open('Certificates.p7m', 'wb') as f: - f.write(b'\n'.join(lines)) out, _ = util.subp( - 'openssl cms -decrypt -in Certificates.p7m -inkey' + 'openssl cms -decrypt -in /dev/stdin -inkey' ' {private_key} -recip {certificate} | openssl pkcs12 -nodes' ' -password pass:'.format(**self.certificate_names), - shell=True) - private_keys, certificates = [], [] + shell=True, data=b'\n'.join(lines)) + return out + + def parse_certificates(self, certificates_xml): + """Given the Certificates XML document, return a dictionary of + fingerprints and associated SSH keys derived from the certs.""" + out = self._decrypt_certs_from_xml(certificates_xml) current = [] + keys = {} for line in out.splitlines(): current.append(line) if re.match(r'[-]+END .*?KEY[-]+$', line): - private_keys.append('\n'.join(current)) + # ignore private_keys current = [] elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line): - certificates.append('\n'.join(current)) + certificate = '\n'.join(current) + ssh_key = self._get_ssh_key_from_cert(certificate) + fingerprint = self._get_fingerprint_from_cert(certificate) + keys[fingerprint] = ssh_key current = [] - keys = [] - for certificate in certificates: - with cd(self.tmpdir): - public_key, _ = util.subp( - 'openssl x509 -noout -pubkey |' - 'ssh-keygen -i -m PKCS8 -f /dev/stdin', - data=certificate, - shell=True) - keys.append(public_key) return keys @@ -206,7 +231,6 @@ class WALinuxAgentShim(object): self.dhcpoptions = dhcp_options self._endpoint = None self.openssl_manager = None - self.values = {} self.lease_file = fallback_lease_file def clean_up(self): @@ -328,8 +352,9 @@ class WALinuxAgentShim(object): LOG.debug('Azure endpoint found at %s', endpoint_ip_address) return endpoint_ip_address - def register_with_azure_and_fetch_data(self): - self.openssl_manager = OpenSSLManager() + def register_with_azure_and_fetch_data(self, pubkey_info=None): + if self.openssl_manager is None: + self.openssl_manager = OpenSSLManager() http_client = AzureEndpointHttpClient(self.openssl_manager.certificate) LOG.info('Registering with Azure...') attempts = 0 @@ -347,16 +372,37 @@ class WALinuxAgentShim(object): attempts += 1 LOG.debug('Successfully fetched GoalState XML.') goal_state = GoalState(response.contents, http_client) - public_keys = [] - if goal_state.certificates_xml is not None: + ssh_keys = [] + if goal_state.certificates_xml is not None and pubkey_info is not None: LOG.debug('Certificate XML found; parsing out public keys.') - public_keys = self.openssl_manager.parse_certificates( + keys_by_fingerprint = self.openssl_manager.parse_certificates( goal_state.certificates_xml) - data = { - 'public-keys': public_keys, - } + ssh_keys = self._filter_pubkeys(keys_by_fingerprint, pubkey_info) self._report_ready(goal_state, http_client) - return data + return {'public-keys': ssh_keys} + + def _filter_pubkeys(self, keys_by_fingerprint, pubkey_info): + """cloud-init expects a straightforward array of keys to be dropped + into the user's authorized_keys file. Azure control plane exposes + multiple public keys to the VM via wireserver. Select just the + user's key(s) and return them, ignoring any other certs. + """ + keys = [] + for pubkey in pubkey_info: + if 'value' in pubkey and pubkey['value']: + keys.append(pubkey['value']) + elif 'fingerprint' in pubkey and pubkey['fingerprint']: + fingerprint = pubkey['fingerprint'] + if fingerprint in keys_by_fingerprint: + keys.append(keys_by_fingerprint[fingerprint]) + else: + LOG.warning("ovf-env.xml specified PublicKey fingerprint " + "%s not found in goalstate XML", fingerprint) + else: + LOG.warning("ovf-env.xml specified PublicKey with neither " + "value nor fingerprint: %s", pubkey) + + return keys def _report_ready(self, goal_state, http_client): LOG.debug('Reporting ready to Azure fabric.') @@ -373,11 +419,12 @@ class WALinuxAgentShim(object): LOG.info('Reported ready to Azure fabric.') -def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None): +def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, + pubkey_info=None): shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, dhcp_options=dhcp_opts) try: - return shim.register_with_azure_and_fetch_data() + return shim.register_with_azure_and_fetch_data(pubkey_info=pubkey_info) finally: shim.clean_up() diff --git a/tests/data/azure/parse_certificates_fingerprints b/tests/data/azure/parse_certificates_fingerprints new file mode 100644 index 00000000..f7293c56 --- /dev/null +++ b/tests/data/azure/parse_certificates_fingerprints @@ -0,0 +1,4 @@ +ECEDEB3B8488D31AF3BC4CCED493F64B7D27D7B1 +073E19D14D1C799224C6A0FD8DDAB6A8BF27D473 +4C16E7FAD6297D74A9B25EB8F0A12808CEBE293E +929130695289B450FE45DCD5F6EF0CDE69865867 diff --git a/tests/data/azure/parse_certificates_pem b/tests/data/azure/parse_certificates_pem new file mode 100644 index 00000000..3521ea3a --- /dev/null +++ b/tests/data/azure/parse_certificates_pem @@ -0,0 +1,152 @@ +Bag Attributes + localKeyID: 01 00 00 00 + Microsoft CSP Name: Microsoft Enhanced Cryptographic Provider v1.0 +Key Attributes + X509v3 Key Usage: 10 +-----BEGIN PRIVATE KEY----- +MIIEwAIBADANBgkqhkiG9w0BAQEFAASCBKowggSmAgEAAoIBAQDlEe5fUqwdrQTP +W2oVlGK2f31q/8ULT8KmOTyUvL0RPdJQ69vvHOc5Q2CKg2eviHC2LWhF8WmpnZj6 +61RL0GeFGizwvU8Moebw5p3oqdcgoGpHVtxf+mr4QcWF58/Fwez0dA4hcsimVNBz +eNpBBUIKNBMTBG+4d6hcQBUAGKUdGRcCGEyTqXLU0MgHjxC9JgVqWJl+X2LcAGj5 +7J+tGYGTLzKJmeCeGVNN5ZtJ0T85MYHCKQk1/FElK+Kq5akovXffQHjlnCPcx0NJ +47NBjlPaFp2gjnAChn79bT4iCjOFZ9avWpqRpeU517UCnY7djOr3fuod/MSQyh3L +Wuem1tWBAgMBAAECggEBAM4ZXQRs6Kjmo95BHGiAEnSqrlgX+dycjcBq3QPh8KZT +nifqnf48XhnackENy7tWIjr3DctoUq4mOp8AHt77ijhqfaa4XSg7fwKeK9NLBGC5 +lAXNtAey0o2894/sKrd+LMkgphoYIUnuI4LRaGV56potkj/ZDP/GwTcG/R4SDnTn +C1Nb05PNTAPQtPZrgPo7TdM6gGsTnFbVrYHQLyg2Sq/osHfF15YohB01esRLCAwb +EF8JkRC4hWIZoV7BsyQ39232zAJQGGla7+wKFs3kObwh3VnFkQpT94KZnNiZuEfG +x5pW4Pn3gXgNsftscXsaNe/M9mYZqo//Qw7NvUIvAvECgYEA9AVveyK0HOA06fhh ++3hUWdvw7Pbrl+e06jO9+bT1RjQMbHKyI60DZyVGuAySN86iChJRoJr5c6xj+iXU +cR6BVJDjGH5t1tyiK2aYf6hEpK9/j8Z54UiVQ486zPP0PGfT2TO4lBLK+8AUmoaH +gk21ul8QeVCeCJa/o+xEoRFvzcUCgYEA8FCbbvInrUtNY+9eKaUYoNodsgBVjm5X +I0YPUL9D4d+1nvupHSV2NVmQl0w1RaJwrNTafrl5LkqjhQbmuWNta6QgfZzSA3LB +lWXo1Mm0azKdcD3qMGbvn0Q3zU+yGNEgmB/Yju3/NtgYRG6tc+FCWRbPbiCnZWT8 +v3C2Y0XggI0CgYEA2/jCZBgGkTkzue5kNVJlh5OS/aog+pCvL6hxCtarfBuTT3ed +Sje+p46cz3DVpmUpATc+Si8py7KNdYQAm/BJ2be6X+woi9Xcgo87zWgcaPCjZzId +0I2jsIE/Gl6XvpRCDrxnGWRPgt3GNP4szbPLrDPiH9oie8+Y9eYYf7G+PZkCgYEA +nRSzZOPYV4f/QDF4pVQLMykfe/iH9B/fyWjEHg3He19VQmRReIHCMMEoqBziPXAe +onpHj8oAkeer1wpZyhhZr6CKtFDLXgGm09bXSC/IRMHC81klORovyzU2HHfZfCtG +WOmIDnU2+0xpIGIP8sztJ3qnf97MTJSkOSadsWo9gwkCgYEAh5AQmJQmck88Dff2 +qIfJIX8d+BDw47BFJ89OmMFjGV8TNB+JO+AV4Vkodg4hxKpLqTFZTTUFgoYfy5u1 +1/BhAjpmCDCrzubCFhx+8VEoM2+2+MmnuQoMAm9+/mD/IidwRaARgXgvEmp7sfdt +RyWd+p2lYvFkC/jORQtDMY4uW1o= +-----END PRIVATE KEY----- +Bag Attributes + localKeyID: 02 00 00 00 + Microsoft CSP Name: Microsoft Strong Cryptographic Provider +Key Attributes + X509v3 Key Usage: 10 +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDlQhPrZwVQYFV4 +FBc0H1iTXYaznMpwZvEITKtXWACzTdguUderEVOkXW3HTi5HvC2rMayt0nqo3zcd +x1eGiqdjpZQ/wMrkz9wNEM/nNMsXntEwxk0jCVNKB/jz6vf+BOtrSI01SritAGZW +dpKoTUyztT8C2mA3X6D8g3m4Dd07ltnzxaDqAQIU5jBHh3f/Q14tlPNZWUIiqVTC +gDxgAe7MDmfs9h3CInTBX1XM5J4UsLTL23/padgeSvP5YF5qr1+0c7Tdftxr2lwA +N3rLkisf5EiLAToVyJJlgP/exo2I8DaIKe7DZzD3Y1CrurOpkcMKYu5kM1Htlbua +tDkAa2oDAgMBAAECggEAOvdueS9DyiMlCKAeQb1IQosdQOh0l0ma+FgEABC2CWhd +0LgjQTBRM6cGO+urcq7/jhdWQ1UuUG4tVn71z7itCi/F/Enhxc2C22d2GhFVpWsn +giSXJYpZ/mIjkdVfWNo6FRuRmmHwMys1p0qTOS+8qUJWhSzW75csqJZGgeUrAI61 +LBV5F0SGR7dR2xZfy7PeDs9xpD0QivDt5DpsZWPaPvw4QlhdLgw6/YU1h9vtm6ci +xLjnPRLZ7JMpcQHO8dUDl6FiEI7yQ11BDm253VQAVMddYRPQABn7SpEF8kD/aZVh +2Clvz61Rz80SKjPUthMPLWMCRp7zB0xDMzt3/1i+tQKBgQD6Ar1/oD3eFnRnpi4u +n/hdHJtMuXWNfUA4dspNjP6WGOid9sgIeUUdif1XyVJ+afITzvgpWc7nUWIqG2bQ +WxJ/4q2rjUdvjNXTy1voVungR2jD5WLQ9DKeaTR0yCliWlx4JgdPG7qGI5MMwsr+ +R/PUoUUhGeEX+o/sCSieO3iUrQKBgQDqwBEMvIdhAv/CK2sG3fsKYX8rFT55ZNX3 +Tix9DbUGY3wQColNuI8U1nDlxE9U6VOfT9RPqKelBLCgbzB23kdEJnjSlnqlTxrx +E+Hkndyf2ckdJAR3XNxoQ6SRLJNBsgoBj/z5tlfZE9/Jc+uh0mYy3e6g6XCVPBcz +MgoIc+ofbwKBgQCGQhZ1hR30N+bHCozeaPW9OvGDIE0qcEqeh9xYDRFilXnF6pK9 +SjJ9jG7KR8jPLiHb1VebDSl5O1EV/6UU2vNyTc6pw7LLCryBgkGW4aWy1WZDXNnW +EG1meGS9GghvUss5kmJ2bxOZmV0Mi0brisQ8OWagQf+JGvtS7BAt+Q3l+QKBgAb9 +8YQPmXiqPjPqVyW9Ntz4SnFeEJ5NApJ7IZgX8GxgSjGwHqbR+HEGchZl4ncE/Bii +qBA3Vcb0fM5KgYcI19aPzsl28fA6ivLjRLcqfIfGVNcpW3iyq13vpdctHLW4N9QU +FdTaOYOds+ysJziKq8CYG6NvUIshXw+HTgUybqbBAoGBAIIOqcmmtgOClAwipA17 +dAHsI9Sjk+J0+d4JU6o+5TsmhUfUKIjXf5+xqJkJcQZMEe5GhxcCuYkgFicvh4Hz +kv2H/EU35LcJTqC6KTKZOWIbGcn1cqsvwm3GQJffYDiO8fRZSwCaif2J3F2lfH4Y +R/fA67HXFSTT+OncdRpY1NOn +-----END PRIVATE KEY----- +Bag Attributes: +subject=/CN=CRP/OU=AzureRT/O=Microsoft Corporation/L=Redmond/ST=WA/C=US +issuer=/CN=Root Agency +-----BEGIN CERTIFICATE----- +MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290 +IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV +BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv +cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE +BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIlPjJXzrRih4C +k/XsoI01oqo7IUxH3dA2F7vHGXQoIpKCp8Qe6Z6cFfdD8Uj+s+B1BX6hngwzIwjN +jE/23X3SALVzJVWzX4Y/IEjbgsuao6sOyNyB18wIU9YzZkVGj68fmMlUw3LnhPbe +eWkufZaJCaLyhQOwlRMbOcn48D6Ys8fccOyXNzpq3rH1OzeQpxS2M8zaJYP4/VZ/ +sf6KRpI7bP+QwyFvNKfhcaO9/gj4kMo9lVGjvDU20FW6g8UVNJCV9N4GO6mOcyqo +OhuhVfjCNGgW7N1qi0TIVn0/MQM4l4dcT2R7Z/bV9fhMJLjGsy5A4TLAdRrhKUHT +bzi9HyDvAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA= +-----END CERTIFICATE----- +Bag Attributes + localKeyID: 01 00 00 00 +subject=/C=US/ST=WASHINGTON/L=Seattle/O=Microsoft/OU=Azure/CN=AnhVo/emailAddress=redacted@microsoft.com +issuer=/C=US/ST=WASHINGTON/L=Seattle/O=Microsoft/OU=Azure/CN=AnhVo/emailAddress=redacted@microsoft.com +-----BEGIN CERTIFICATE----- +MIID7TCCAtWgAwIBAgIJALQS3yMg3R41MA0GCSqGSIb3DQEBCwUAMIGMMQswCQYD +VQQGEwJVUzETMBEGA1UECAwKV0FTSElOR1RPTjEQMA4GA1UEBwwHU2VhdHRsZTES +MBAGA1UECgwJTWljcm9zb2Z0MQ4wDAYDVQQLDAVBenVyZTEOMAwGA1UEAwwFQW5o +Vm8xIjAgBgkqhkiG9w0BCQEWE2FuaHZvQG1pY3Jvc29mdC5jb20wHhcNMTkwMjE0 +MjMxMjQwWhcNMjExMTEwMjMxMjQwWjCBjDELMAkGA1UEBhMCVVMxEzARBgNVBAgM +CldBU0hJTkdUT04xEDAOBgNVBAcMB1NlYXR0bGUxEjAQBgNVBAoMCU1pY3Jvc29m +dDEOMAwGA1UECwwFQXp1cmUxDjAMBgNVBAMMBUFuaFZvMSIwIAYJKoZIhvcNAQkB +FhNhbmh2b0BtaWNyb3NvZnQuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA5RHuX1KsHa0Ez1tqFZRitn99av/FC0/Cpjk8lLy9ET3SUOvb7xznOUNg +ioNnr4hwti1oRfFpqZ2Y+utUS9BnhRos8L1PDKHm8Oad6KnXIKBqR1bcX/pq+EHF +hefPxcHs9HQOIXLIplTQc3jaQQVCCjQTEwRvuHeoXEAVABilHRkXAhhMk6ly1NDI +B48QvSYFaliZfl9i3ABo+eyfrRmBky8yiZngnhlTTeWbSdE/OTGBwikJNfxRJSvi +quWpKL1330B45Zwj3MdDSeOzQY5T2hadoI5wAoZ+/W0+IgozhWfWr1qakaXlOde1 +Ap2O3Yzq937qHfzEkMody1rnptbVgQIDAQABo1AwTjAdBgNVHQ4EFgQUPvdgLiv3 +pAk4r0QTPZU3PFOZJvgwHwYDVR0jBBgwFoAUPvdgLiv3pAk4r0QTPZU3PFOZJvgw +DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAVUHZT+h9+uCPLTEl5IDg +kqd9WpzXA7PJd/V+7DeDDTkEd06FIKTWZLfxLVVDjQJnQqubQb//e0zGu1qKbXnX +R7xqWabGU4eyPeUFWddmt1OHhxKLU3HbJNJJdL6XKiQtpGGUQt/mqNQ/DEr6hhNF +im5I79iA8H/dXA2gyZrj5Rxea4mtsaYO0mfp1NrFtJpAh2Djy4B1lBXBIv4DWG9e +mMEwzcLCOZj2cOMA6+mdLMUjYCvIRtnn5MKUHyZX5EmX79wsqMTvVpddlVLB9Kgz +Qnvft9+SBWh9+F3ip7BsL6Q4Q9v8eHRbnP0ya7ddlgh64uwf9VOfZZdKCnwqudJP +3g== +-----END CERTIFICATE----- +Bag Attributes + localKeyID: 02 00 00 00 +subject=/CN=/subscriptions/redacted/resourcegroups/redacted/providers/Microsoft.Compute/virtualMachines/redacted +issuer=/CN=Microsoft.ManagedIdentity +-----BEGIN CERTIFICATE----- +MIIDnTCCAoWgAwIBAgIUB2lauSRccvFkoJybUfIwOUqBN7MwDQYJKoZIhvcNAQEL +BQAwJDEiMCAGA1UEAxMZTWljcm9zb2Z0Lk1hbmFnZWRJZGVudGl0eTAeFw0xOTAy +MTUxOTA5MDBaFw0xOTA4MTQxOTA5MDBaMIGUMYGRMIGOBgNVBAMTgYYvc3Vic2Ny +aXB0aW9ucy8yN2I3NTBjZC1lZDQzLTQyZmQtOTA0NC04ZDc1ZTEyNGFlNTUvcmVz +b3VyY2Vncm91cHMvYW5oZXh0cmFzc2gvcHJvdmlkZXJzL01pY3Jvc29mdC5Db21w +dXRlL3ZpcnR1YWxNYWNoaW5lcy9hbmh0ZXN0Y2VydDCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAOVCE+tnBVBgVXgUFzQfWJNdhrOcynBm8QhMq1dYALNN +2C5R16sRU6RdbcdOLke8LasxrK3SeqjfNx3HV4aKp2OllD/AyuTP3A0Qz+c0yxee +0TDGTSMJU0oH+PPq9/4E62tIjTVKuK0AZlZ2kqhNTLO1PwLaYDdfoPyDebgN3TuW +2fPFoOoBAhTmMEeHd/9DXi2U81lZQiKpVMKAPGAB7swOZ+z2HcIidMFfVczknhSw +tMvbf+lp2B5K8/lgXmqvX7RztN1+3GvaXAA3esuSKx/kSIsBOhXIkmWA/97GjYjw +Nogp7sNnMPdjUKu6s6mRwwpi7mQzUe2Vu5q0OQBragMCAwEAAaNWMFQwDgYDVR0P +AQH/BAQDAgeAMAwGA1UdEwEB/wQCMAAwEwYDVR0lBAwwCgYIKwYBBQUHAwIwHwYD +VR0jBBgwFoAUOJvzEsriQWdJBndPrK+Me1bCPjYwDQYJKoZIhvcNAQELBQADggEB +AFGP/g8o7Hv/to11M0UqfzJuW/AyH9RZtSRcNQFLZUndwweQ6fap8lFsA4REUdqe +7Quqp5JNNY1XzKLWXMPoheIDH1A8FFXdsAroArzlNs9tO3TlIHE8A7HxEVZEmR4b +7ZiixmkQPS2RkjEoV/GM6fheBrzuFn7X5kVZyE6cC5sfcebn8xhk3ZcXI0VmpdT0 +jFBsf5IvFCIXXLLhJI4KXc8VMoKFU1jT9na/jyaoGmfwovKj4ib8s2aiXGAp7Y38 +UCmY+bJapWom6Piy5Jzi/p/kzMVdJcSa+GqpuFxBoQYEVs2XYVl7cGu/wPM+NToC +pkSoWwF1QAnHn0eokR9E1rU= +-----END CERTIFICATE----- +Bag Attributes: +subject=/CN=CRP/OU=AzureRT/O=Microsoft Corporation/L=Redmond/ST=WA/C=US +issuer=/CN=Root Agency +-----BEGIN CERTIFICATE----- +MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290 +IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV +BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv +cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE +BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHU9IDclbKVYVb +Yuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoi +nlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmW +vwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+ +lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4y +WzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7 +t5btUyvpAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA= +-----END CERTIFICATE----- diff --git a/tests/data/azure/pubkey_extract_cert b/tests/data/azure/pubkey_extract_cert new file mode 100644 index 00000000..ce9b852d --- /dev/null +++ b/tests/data/azure/pubkey_extract_cert @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIIB+TCCAeOgAwIBAgIBATANBgkqhkiG9w0BAQUFADAWMRQwEgYDVQQDDAtSb290 +IEFnZW5jeTAeFw0xOTAyMTUxOTA0MDRaFw0yOTAyMTUxOTE0MDRaMGwxDDAKBgNV +BAMMA0NSUDEQMA4GA1UECwwHQXp1cmVSVDEeMBwGA1UECgwVTWljcm9zb2Z0IENv +cnBvcmF0aW9uMRAwDgYDVQQHDAdSZWRtb25kMQswCQYDVQQIDAJXQTELMAkGA1UE +BhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHU9IDclbKVYVb +Yuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoi +nlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmW +vwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+ +lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4y +WzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7 +t5btUyvpAgMBAAEwDQYJKoZIhvcNAQEFBQADAQA= +-----END CERTIFICATE----- diff --git a/tests/data/azure/pubkey_extract_ssh_key b/tests/data/azure/pubkey_extract_ssh_key new file mode 100644 index 00000000..54d749ed --- /dev/null +++ b/tests/data/azure/pubkey_extract_ssh_key @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDHU9IDclbKVYVbYuv0+zViX+wTwlKspslmy/uf3hkWLh7pyzyrq70S7qtSW2EGixUPxZS/R8pOLHoinlKF9ILgj0gVTCJsSwnWpXRg3rhZwIVoYMHN50BHS1SqVD0lsWNMXmo76LoJcjmWvwIznvj5C/gnhU+K7+c3m7AlCyU2wjwpBAEYj7PQs6l/wTqpEiaqC5NytNBd7qp+lYYysVrpa1PFL0Nj4MMZARIfjkiJtL9qDhy9YZeJRQ6q/Fhz0kjvkZnfxixfKF4yWzOfhBrAtpF6oOnuYKk3hxjh9KjTTX4/U8zdLojalX09iyHyEjwJKGlGEpzh1aY7t5btUyvp diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index 26b2b93d..02556165 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -1,11 +1,13 @@ # This file is part of cloud-init. See LICENSE file for license information. import os +import unittest2 from textwrap import dedent from cloudinit.sources.helpers import azure as azure_helper from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, populate_dir +from cloudinit.util import load_file from cloudinit.sources.helpers.azure import WALinuxAgentShim as wa_shim GOAL_STATE_TEMPLATE = """\ @@ -289,6 +291,50 @@ class TestOpenSSLManager(CiTestCase): self.assertEqual([mock.call(manager.tmpdir)], del_dir.call_args_list) +class TestOpenSSLManagerActions(CiTestCase): + + def setUp(self): + super(TestOpenSSLManagerActions, self).setUp() + + self.allowed_subp = True + + def _data_file(self, name): + path = 'tests/data/azure' + return os.path.join(path, name) + + @unittest2.skip("todo move to cloud_test") + def test_pubkey_extract(self): + cert = load_file(self._data_file('pubkey_extract_cert')) + good_key = load_file(self._data_file('pubkey_extract_ssh_key')) + sslmgr = azure_helper.OpenSSLManager() + key = sslmgr._get_ssh_key_from_cert(cert) + self.assertEqual(good_key, key) + + good_fingerprint = '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473' + fingerprint = sslmgr._get_fingerprint_from_cert(cert) + self.assertEqual(good_fingerprint, fingerprint) + + @unittest2.skip("todo move to cloud_test") + @mock.patch.object(azure_helper.OpenSSLManager, '_decrypt_certs_from_xml') + def test_parse_certificates(self, mock_decrypt_certs): + """Azure control plane puts private keys as well as certificates + into the Certificates XML object. Make sure only the public keys + from certs are extracted and that fingerprints are converted to + the form specified in the ovf-env.xml file. + """ + cert_contents = load_file(self._data_file('parse_certificates_pem')) + fingerprints = load_file(self._data_file( + 'parse_certificates_fingerprints') + ).splitlines() + mock_decrypt_certs.return_value = cert_contents + sslmgr = azure_helper.OpenSSLManager() + keys_by_fp = sslmgr.parse_certificates('') + for fp in keys_by_fp.keys(): + self.assertIn(fp, fingerprints) + for fp in fingerprints: + self.assertIn(fp, keys_by_fp) + + class TestWALinuxAgentShim(CiTestCase): def setUp(self): @@ -329,18 +375,31 @@ class TestWALinuxAgentShim(CiTestCase): def test_certificates_used_to_determine_public_keys(self): shim = wa_shim() - data = shim.register_with_azure_and_fetch_data() + """if register_with_azure_and_fetch_data() isn't passed some info about + the user's public keys, there's no point in even trying to parse + the certificates + """ + mypk = [{'fingerprint': 'fp1', 'path': 'path1'}, + {'fingerprint': 'fp3', 'path': 'path3', 'value': ''}] + certs = {'fp1': 'expected-key', + 'fp2': 'should-not-be-found', + 'fp3': 'expected-no-value-key', + } + sslmgr = self.OpenSSLManager.return_value + sslmgr.parse_certificates.return_value = certs + data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) self.assertEqual( [mock.call(self.GoalState.return_value.certificates_xml)], - self.OpenSSLManager.return_value.parse_certificates.call_args_list) - self.assertEqual( - self.OpenSSLManager.return_value.parse_certificates.return_value, - data['public-keys']) + sslmgr.parse_certificates.call_args_list) + self.assertIn('expected-key', data['public-keys']) + self.assertIn('expected-no-value-key', data['public-keys']) + self.assertNotIn('should-not-be-found', data['public-keys']) def test_absent_certificates_produces_empty_public_keys(self): + mypk = [{'fingerprint': 'fp1', 'path': 'path1'}] self.GoalState.return_value.certificates_xml = None shim = wa_shim() - data = shim.register_with_azure_and_fetch_data() + data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) self.assertEqual([], data['public-keys']) def test_correct_url_used_for_report_ready(self): -- cgit v1.2.3 From 8cfcc28db1acc7594dbbf76b846f4964f40f9e63 Mon Sep 17 00:00:00 2001 From: Eric Williams Date: Mon, 25 Feb 2019 19:09:39 +0000 Subject: Enable encrypted_data_bag_secret support for Chef Encrypted data bags require a secrets file to be present to decrypt, and the location of the file must be configured the Chef client configuration file, client.rb. This update enables cloud-init's chef module to update that setting in client.rb. LP: #1817082 --- cloudinit/config/cc_chef.py | 3 +++ doc/examples/cloud-config-chef.txt | 3 +++ templates/chef_client.rb.tmpl | 5 ++++- tests/unittests/test_handler/test_handler_chef.py | 3 +++ 4 files changed, 13 insertions(+), 1 deletion(-) diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index 46abedd1..a6240306 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -51,6 +51,7 @@ file). chef: client_key: + encrypted_data_bag_secret: environment: file_backup_path: file_cache_path: @@ -114,6 +115,7 @@ CHEF_RB_TPL_DEFAULTS = { 'file_backup_path': "/var/backups/chef", 'pid_file': "/var/run/chef/client.pid", 'show_time': True, + 'encrypted_data_bag_secret': None, } CHEF_RB_TPL_BOOL_KEYS = frozenset(['show_time']) CHEF_RB_TPL_PATH_KEYS = frozenset([ @@ -124,6 +126,7 @@ CHEF_RB_TPL_PATH_KEYS = frozenset([ 'json_attribs', 'file_cache_path', 'pid_file', + 'encrypted_data_bag_secret', ]) CHEF_RB_TPL_KEYS = list(CHEF_RB_TPL_DEFAULTS.keys()) CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_BOOL_KEYS) diff --git a/doc/examples/cloud-config-chef.txt b/doc/examples/cloud-config-chef.txt index defc5a54..2320e01a 100644 --- a/doc/examples/cloud-config-chef.txt +++ b/doc/examples/cloud-config-chef.txt @@ -98,6 +98,9 @@ chef: # to the install script omnibus_version: "12.3.0" + # If encrypted data bags are used, the client needs to have a secrets file + # configured to decrypt them + encrypted_data_bag_secret: "/etc/chef/encrypted_data_bag_secret" # Capture all subprocess output into a logfile # Useful for troubleshooting cloud-init issues diff --git a/templates/chef_client.rb.tmpl b/templates/chef_client.rb.tmpl index cbb6b15f..99978d3b 100644 --- a/templates/chef_client.rb.tmpl +++ b/templates/chef_client.rb.tmpl @@ -1,6 +1,6 @@ ## template:jinja {# -This file is only utilized if the module 'cc_chef' is enabled in +This file is only utilized if the module 'cc_chef' is enabled in cloud-config. Specifically, in order to enable it you need to add the following to config: chef: @@ -56,3 +56,6 @@ pid_file "{{pid_file}}" {% if show_time %} Chef::Log::Formatter.show_time = true {% endif %} +{% if encrypted_data_bag_secret %} +encrypted_data_bag_secret "{{encrypted_data_bag_secret}}" +{% endif %} diff --git a/tests/unittests/test_handler/test_handler_chef.py b/tests/unittests/test_handler/test_handler_chef.py index b16532ea..f4311268 100644 --- a/tests/unittests/test_handler/test_handler_chef.py +++ b/tests/unittests/test_handler/test_handler_chef.py @@ -145,6 +145,7 @@ class TestChef(FilesystemMockingTestCase): file_backup_path "/var/backups/chef" pid_file "/var/run/chef/client.pid" Chef::Log::Formatter.show_time = true + encrypted_data_bag_secret "/etc/chef/encrypted_data_bag_secret" """ tpl_file = util.load_file('templates/chef_client.rb.tmpl') self.patchUtils(self.tmp) @@ -157,6 +158,8 @@ class TestChef(FilesystemMockingTestCase): 'validation_name': 'bob', 'validation_key': "/etc/chef/vkey.pem", 'validation_cert': "this is my cert", + 'encrypted_data_bag_secret': + '/etc/chef/encrypted_data_bag_secret' }, } cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, []) -- cgit v1.2.3 From f0f09629a924435c223f405bea084401ecb7faa2 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 26 Feb 2019 15:13:38 +0000 Subject: cc_rsyslog: Escape possible nested set Under Python 3.7, we are seeing `FutureWarning: Possible nested set at position 23`; escaping this bracket causes that warning to disappear. LP: #1816967 --- cloudinit/config/cc_rsyslog.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py index 27d2366c..22b17532 100644 --- a/cloudinit/config/cc_rsyslog.py +++ b/cloudinit/config/cc_rsyslog.py @@ -203,7 +203,7 @@ LOG = logging.getLogger(__name__) COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*') HOST_PORT_RE = re.compile( r'^(?P[@]{0,2})' - r'(([[](?P[^\]]*)[\]])|(?P[^:]*))' + r'(([\[](?P[^\]]*)[\]])|(?P[^:]*))' r'([:](?P[0-9]+))?$') -- cgit v1.2.3 From f278a8a3dbb1e45e8d83491ad24e41812cb77ddb Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 26 Feb 2019 15:23:58 +0000 Subject: util: don't determine string_types ourselves six already provides this for us, and we're already paying the cost to determine it there; no need to do it twice. --- cloudinit/sources/DataSourceOVF.py | 4 +++- cloudinit/util.py | 7 +------ 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 3a3fcdf6..70e7a5c0 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -15,6 +15,8 @@ import os import re import time +import six + from cloudinit import log as logging from cloudinit import sources from cloudinit import util @@ -434,7 +436,7 @@ def maybe_cdrom_device(devname): """ if not devname: return False - elif not isinstance(devname, util.string_types): + elif not isinstance(devname, six.string_types): raise ValueError("Unexpected input for devname: %s" % devname) # resolve '..' and multi '/' elements diff --git a/cloudinit/util.py b/cloudinit/util.py index 2be528a0..e5403f7d 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -51,11 +51,6 @@ from cloudinit import version from cloudinit.settings import (CFG_BUILTIN) -try: - string_types = (basestring,) -except NameError: - string_types = (str,) - _DNS_REDIRECT_IP = None LOG = logging.getLogger(__name__) @@ -125,7 +120,7 @@ def target_path(target, path=None): # return 'path' inside target, accepting target as None if target in (None, ""): target = "/" - elif not isinstance(target, string_types): + elif not isinstance(target, six.string_types): raise ValueError("Unexpected input for target: %s" % target) else: target = os.path.abspath(target) -- cgit v1.2.3 From 1182ad5f9362e1570c622345a3ac996c07eb2eeb Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 26 Feb 2019 15:37:36 +0000 Subject: tests: fix some slow tests and some leaking state In test_ds_identify, don't mutate otherwise-static test data. When running tests in a random order, this was causing failures due to breaking preconditions for other tests. In tests/helpers, reset logging level in tearDown. Some of the CLI tests set the level of the root logger in a way that isn't correctly reset. For test_poll_imds_re_dhcp_on_timeout and test_dhcp_discovery_run_in_sandbox_warns_invalid_pid, mock out time.sleep; this saves ~11 seconds (or ~40% of previous test time!). --- cloudinit/net/tests/test_dhcp.py | 1 + cloudinit/tests/helpers.py | 1 + tests/unittests/test_datasource/test_azure.py | 1 + tests/unittests/test_ds_identify.py | 2 +- 4 files changed, 4 insertions(+), 1 deletion(-) diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py index 79e8842f..51390249 100644 --- a/cloudinit/net/tests/test_dhcp.py +++ b/cloudinit/net/tests/test_dhcp.py @@ -117,6 +117,7 @@ class TestDHCPDiscoveryClean(CiTestCase): self.assertEqual('eth9', call[0][1]) self.assertIn('/var/tmp/cloud-init/cloud-init-dhcp-', call[0][2]) + @mock.patch('time.sleep', mock.MagicMock()) @mock.patch('cloudinit.net.dhcp.os.kill') @mock.patch('cloudinit.net.dhcp.util.subp') def test_dhcp_discovery_run_in_sandbox_warns_invalid_pid(self, m_subp, diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 2eb7b0cd..46a49416 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -207,6 +207,7 @@ class CiTestCase(TestCase): if self.with_logs: # Remove the handler we setup logging.getLogger().handlers = self.old_handlers + logging.getLogger().level = None util.subp = _real_subp super(CiTestCase, self).tearDown() diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 417d86a9..5edf36e8 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1692,6 +1692,7 @@ class TestPreprovisioningPollIMDS(CiTestCase): self.paths = helpers.Paths({'cloud_dir': self.tmp}) dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + @mock.patch('time.sleep', mock.MagicMock()) @mock.patch(MOCKPATH + 'EphemeralDHCPv4') def test_poll_imds_re_dhcp_on_timeout(self, m_dhcpv4, report_ready_func, fake_resp, m_media_switch, m_dhcp, diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 756b4fb4..d00c1b4b 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -441,7 +441,7 @@ class TestDsIdentify(DsIdentifyBase): nova does not identify itself on platforms other than intel. https://bugs.launchpad.net/cloud-init/+bugs?field.tag=dsid-nova""" - data = VALID_CFG['OpenStack'].copy() + data = copy.deepcopy(VALID_CFG['OpenStack']) del data['files'][P_PRODUCT_NAME] data.update({'policy_dmi': POLICY_FOUND_OR_MAYBE, 'policy_no_dmi': POLICY_FOUND_OR_MAYBE}) -- cgit v1.2.3 From f2f530e5960ce8afd33e7f62a9b5d8898a6d0d79 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 27 Feb 2019 03:10:57 +0000 Subject: cc_apt_pipelining: stop disabling pipelining by default This was introduced due to Ubuntu using S3 mirrors, and S3 having a buggy pipelining implementation. Those Ubuntu mirrors are no longer in production and, furthremore, apt has also grown the ability to handle servers with broken pipelining. As such, we can stop disabling pipelining, which should result in improved apt download speeds. LP: #1794982 --- cloudinit/config/cc_apt_pipelining.py | 4 ++-- cloudinit/config/tests/test_apt_pipelining.py | 28 +++++++++++++++++++++++++++ 2 files changed, 30 insertions(+), 2 deletions(-) create mode 100644 cloudinit/config/tests/test_apt_pipelining.py diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py index cdf28cd9..459332ab 100644 --- a/cloudinit/config/cc_apt_pipelining.py +++ b/cloudinit/config/cc_apt_pipelining.py @@ -49,7 +49,7 @@ APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n" def handle(_name, cfg, _cloud, log, _args): - apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False) + apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", 'os') apt_pipe_value_s = str(apt_pipe_value).lower().strip() if apt_pipe_value_s == "false": @@ -59,7 +59,7 @@ def handle(_name, cfg, _cloud, log, _args): elif apt_pipe_value_s in [str(b) for b in range(0, 6)]: write_apt_snippet(apt_pipe_value_s, log, DEFAULT_FILE) else: - log.warn("Invalid option for apt_pipeling: %s", apt_pipe_value) + log.warn("Invalid option for apt_pipelining: %s", apt_pipe_value) def write_apt_snippet(setting, log, f_name): diff --git a/cloudinit/config/tests/test_apt_pipelining.py b/cloudinit/config/tests/test_apt_pipelining.py new file mode 100644 index 00000000..2a6bb10b --- /dev/null +++ b/cloudinit/config/tests/test_apt_pipelining.py @@ -0,0 +1,28 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Tests cc_apt_pipelining handler""" + +import cloudinit.config.cc_apt_pipelining as cc_apt_pipelining + +from cloudinit.tests.helpers import CiTestCase, mock + + +class TestAptPipelining(CiTestCase): + + @mock.patch('cloudinit.config.cc_apt_pipelining.util.write_file') + def test_not_disabled_by_default(self, m_write_file): + """ensure that default behaviour is to not disable pipelining""" + cc_apt_pipelining.handle('foo', {}, None, mock.MagicMock(), None) + self.assertEqual(0, m_write_file.call_count) + + @mock.patch('cloudinit.config.cc_apt_pipelining.util.write_file') + def test_false_disables_pipelining(self, m_write_file): + """ensure that pipelining can be disabled with correct config""" + cc_apt_pipelining.handle( + 'foo', {'apt_pipelining': 'false'}, None, mock.MagicMock(), None) + self.assertEqual(1, m_write_file.call_count) + args, _ = m_write_file.call_args + self.assertEqual(cc_apt_pipelining.DEFAULT_FILE, args[0]) + self.assertIn('Pipeline-Depth "0"', args[1]) + +# vi: ts=4 expandtab -- cgit v1.2.3 From bd35300ba36bd63686715fa9661516a518781f6d Mon Sep 17 00:00:00 2001 From: Kurt Stieger Date: Mon, 4 Mar 2019 15:54:25 +0000 Subject: net: append type:dhcp[46] only if dhcp[46] is True in v2 netconfig When providing netplan configuration to cloud-init, the internal network state would enable DHCP if the 'dhcp' key was present in the source config. In netplan, dhcp[46] is a boolean and the value of the boolean should control whether DHCP is enabled rather than the presence of the key. This issue leaded to inconsistant sysconfig/network-scripts on fedora. 'BOOTPROTO' was always 'dhcp', even if the address config was static. After this change a dhcp subnet is added only if the 'dhcp' setting in source cfg dict is True. LP: #1818032 --- cloudinit/net/network_state.py | 4 +-- tests/unittests/test_net.py | 61 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+), 2 deletions(-) diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index f76e508a..539b76d8 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -706,9 +706,9 @@ class NetworkStateInterpreter(object): """Common ipconfig extraction from v2 to v1 subnets array.""" subnets = [] - if 'dhcp4' in cfg: + if cfg.get('dhcp4'): subnets.append({'type': 'dhcp4'}) - if 'dhcp6' in cfg: + if cfg.get('dhcp6'): self.use_ipv6 = True subnets.append({'type': 'dhcp6'}) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index f001ae5a..e3b9e02b 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -406,6 +406,23 @@ network: - maas """ +NETPLAN_DHCP_FALSE = """ +version: 2 +ethernets: + ens3: + match: + macaddress: 52:54:00:ab:cd:ef + dhcp4: false + dhcp6: false + addresses: + - 192.168.42.100/24 + - 2001:db8::100/32 + gateway4: 192.168.42.1 + gateway6: 2001:db8::1 + nameservers: + search: [example.com] + addresses: [192.168.42.53, 1.1.1.1] +""" # Examples (and expected outputs for various renderers). OS_SAMPLES = [ @@ -2590,6 +2607,50 @@ USERCTL=no config = sysconfig.ConfigObj(nm_cfg) self.assertIn('ifcfg-rh', config['main']['plugins']) + def test_netplan_dhcp_false_disable_dhcp_in_state(self): + """netplan config with dhcp[46]: False should not add dhcp in state""" + net_config = yaml.load(NETPLAN_DHCP_FALSE) + ns = network_state.parse_net_config_data(net_config, + skip_broken=False) + + dhcp_found = [snet for iface in ns.iter_interfaces() + for snet in iface['subnets'] if 'dhcp' in snet['type']] + + self.assertEqual([], dhcp_found) + + def test_netplan_dhcp_false_no_dhcp_in_sysconfig(self): + """netplan cfg with dhcp[46]: False should not have bootproto=dhcp""" + + entry = { + 'yaml': NETPLAN_DHCP_FALSE, + 'expected_sysconfig': { + 'ifcfg-ens3': textwrap.dedent("""\ + BOOTPROTO=none + DEFROUTE=yes + DEVICE=ens3 + DNS1=192.168.42.53 + DNS2=1.1.1.1 + DOMAIN=example.com + GATEWAY=192.168.42.1 + HWADDR=52:54:00:ab:cd:ef + IPADDR=192.168.42.100 + IPV6ADDR=2001:db8::100/32 + IPV6INIT=yes + IPV6_DEFAULTGW=2001:db8::1 + NETMASK=255.255.255.0 + NM_CONTROLLED=no + ONBOOT=yes + STARTMODE=auto + TYPE=Ethernet + USERCTL=no + """), + } + } + + found = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self._compare_files_to_expected(entry['expected_sysconfig'], found) + self._assert_headers(found) + class TestOpenSuseSysConfigRendering(CiTestCase): -- cgit v1.2.3 From eee0e09ead3d11c32e8888d13d164810ee5f19d6 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Mon, 4 Mar 2019 16:50:31 +0000 Subject: tip-pylint: Fix assignment-from-return-none errors pylint now complains about assignment of None from a return of a function call. This does not account for subclassing so we resolve this issue by removing the assignment in the unittest. --- tests/unittests/test_datasource/test_configdrive.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index dcdabea5..7a6802f6 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -268,8 +268,7 @@ class TestConfigDriveDataSource(CiTestCase): exists_mock = mocks.enter_context( mock.patch.object(os.path, 'exists', side_effect=exists_side_effect())) - device = cfg_ds.device_name_to_device(name) - self.assertEqual(dev_name, device) + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) find_mock.assert_called_once_with(mock.ANY) self.assertEqual(exists_mock.call_count, 2) @@ -296,8 +295,7 @@ class TestConfigDriveDataSource(CiTestCase): exists_mock = mocks.enter_context( mock.patch.object(os.path, 'exists', return_value=True)) - device = cfg_ds.device_name_to_device(name) - self.assertEqual(dev_name, device) + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) find_mock.assert_called_once_with(mock.ANY) exists_mock.assert_called_once_with(mock.ANY) @@ -331,8 +329,7 @@ class TestConfigDriveDataSource(CiTestCase): yield True with mock.patch.object(os.path, 'exists', side_effect=exists_side_effect()): - device = cfg_ds.device_name_to_device(name) - self.assertEqual(dev_name, device) + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) # We don't assert the call count for os.path.exists() because # not all of the entries in name_tests results in two calls to # that function. Specifically, 'root2k' doesn't seem to call @@ -359,8 +356,7 @@ class TestConfigDriveDataSource(CiTestCase): } for name, dev_name in name_tests.items(): with mock.patch.object(os.path, 'exists', return_value=True): - device = cfg_ds.device_name_to_device(name) - self.assertEqual(dev_name, device) + self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) def test_dir_valid(self): """Verify a dir is read as such.""" -- cgit v1.2.3 From edf052c3196139169ecbfe98049c278f4babc8ca Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Mon, 4 Mar 2019 18:21:59 +0000 Subject: drop Python 2.6 support and our NIH version detection - Remove the last few places that use `if PY26` - Replace our Python version detection logic with six's (which we were already using in most places) --- cloudinit/tests/helpers.py | 22 +--------------------- cloudinit/util.py | 4 ---- tests/unittests/test_datasource/test_azure.py | 4 +--- 3 files changed, 2 insertions(+), 28 deletions(-) diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 46a49416..f41180fd 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -41,26 +41,6 @@ _real_subp = util.subp SkipTest = unittest2.SkipTest skipIf = unittest2.skipIf -# Used for detecting different python versions -PY2 = False -PY26 = False -PY27 = False -PY3 = False - -_PY_VER = sys.version_info -_PY_MAJOR, _PY_MINOR, _PY_MICRO = _PY_VER[0:3] -if (_PY_MAJOR, _PY_MINOR) <= (2, 6): - if (_PY_MAJOR, _PY_MINOR) == (2, 6): - PY26 = True - if (_PY_MAJOR, _PY_MINOR) >= (2, 0): - PY2 = True -else: - if (_PY_MAJOR, _PY_MINOR) == (2, 7): - PY27 = True - PY2 = True - if (_PY_MAJOR, _PY_MINOR) >= (3, 0): - PY3 = True - # Makes the old path start # with new base instead of whatever @@ -357,7 +337,7 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): def patchOpen(self, new_root): trap_func = retarget_many_wrapper(new_root, 1, open) - name = 'builtins.open' if PY3 else '__builtin__.open' + name = 'builtins.open' if six.PY3 else '__builtin__.open' self.patched_funcs.enter_context(mock.patch(name, trap_func)) def patchStdoutAndStderr(self, stdout=None, stderr=None): diff --git a/cloudinit/util.py b/cloudinit/util.py index e5403f7d..a192091f 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -72,7 +72,6 @@ CONTAINER_TESTS = (['systemd-detect-virt', '--quiet', '--container'], PROC_CMDLINE = None _LSB_RELEASE = {} -PY26 = sys.version_info[0:2] == (2, 6) def get_architecture(target=None): @@ -2815,9 +2814,6 @@ def load_shell_content(content, add_empty=False, empty_val=None): variables. Set their value to empty_val.""" def _shlex_split(blob): - if PY26 and isinstance(blob, six.text_type): - # Older versions don't support unicode input - blob = blob.encode("utf8") return shlex.split(blob, comments=True) data = {} diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 5edf36e8..6b05b8f1 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -11,7 +11,7 @@ from cloudinit.util import (b64e, decode_binary, load_file, write_file, from cloudinit.version import version_string as vs from cloudinit.tests.helpers import ( HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, - ExitStack, PY26, SkipTest) + ExitStack) import crypt import httpretty @@ -221,8 +221,6 @@ class TestAzureDataSource(CiTestCase): def setUp(self): super(TestAzureDataSource, self).setUp() - if PY26: - raise SkipTest("Does not work on python 2.6") self.tmp = self.tmp_dir() # patch cloud_dir, so our 'seed_dir' is guaranteed empty -- cgit v1.2.3 From 5352dd99eb2937b4eaaaf596b40ad7ca69d87f64 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Mon, 4 Mar 2019 18:41:05 +0000 Subject: helpers/openstack: Treat unknown link types as physical Some deployments of OpenStack expose link types to the guest which cloud-init doesn't recognise. These will almost always be physical, so we can operate more robustly if we assume that they are (whilst warning the user that we're seeing something unexpected). LP: #1639263 --- cloudinit/sources/helpers/openstack.py | 12 +++++------ .../unittests/test_datasource/test_configdrive.py | 23 ++++++++++++++++++++++ 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 9c29ceac..8f069115 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -67,7 +67,7 @@ OS_VERSIONS = ( OS_ROCKY, ) -PHYSICAL_TYPES = ( +KNOWN_PHYSICAL_TYPES = ( None, 'bgpovs', # not present in OpenStack upstream but used on OVH cloud. 'bridge', @@ -600,9 +600,7 @@ def convert_net_json(network_json=None, known_macs=None): subnet['ipv6'] = True subnets.append(subnet) cfg.update({'subnets': subnets}) - if link['type'] in PHYSICAL_TYPES: - cfg.update({'type': 'physical', 'mac_address': link_mac_addr}) - elif link['type'] in ['bond']: + if link['type'] in ['bond']: params = {} if link_mac_addr: params['mac_address'] = link_mac_addr @@ -641,8 +639,10 @@ def convert_net_json(network_json=None, known_macs=None): curinfo.update({'mac': link['vlan_mac_address'], 'name': name}) else: - raise ValueError( - 'Unknown network_data link type: %s' % link['type']) + if link['type'] not in KNOWN_PHYSICAL_TYPES: + LOG.warning('Unknown network_data link type (%s); treating as' + ' physical', link['type']) + cfg.update({'type': 'physical', 'mac_address': link_mac_addr}) config.append(cfg) link_id_info[curinfo['id']] = curinfo diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 7a6802f6..520c50fe 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -600,6 +600,9 @@ class TestNetJson(CiTestCase): class TestConvertNetworkData(CiTestCase): + + with_logs = True + def setUp(self): super(TestConvertNetworkData, self).setUp() self.tmp = self.tmp_dir() @@ -726,6 +729,26 @@ class TestConvertNetworkData(CiTestCase): 'enp0s2': 'fa:16:3e:d4:57:ad'} self.assertEqual(expected, config_name2mac) + def test_unknown_device_types_accepted(self): + # If we don't recognise a link, we should treat it as physical for a + # best-effort boot + my_netdata = deepcopy(NETWORK_DATA) + my_netdata['links'][0]['type'] = 'my-special-link-type' + + ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS) + config_name2mac = {} + for n in ncfg['config']: + if n['type'] == 'physical': + config_name2mac[n['name']] = n['mac_address'] + + expected = {'nic0': 'fa:16:3e:05:30:fe', 'enp0s1': 'fa:16:3e:69:b0:58', + 'enp0s2': 'fa:16:3e:d4:57:ad'} + self.assertEqual(expected, config_name2mac) + + # We should, however, warn the user that we don't recognise the type + self.assertIn('Unknown network_data link type (my-special-link-type)', + self.logs.getvalue()) + def cfg_ds_from_dir(base_d, files=None): run = os.path.join(base_d, "run") -- cgit v1.2.3 From 109772c2e9066f5ae53aa2806c4bb4a2ab6f4bff Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Mon, 4 Mar 2019 22:36:07 +0000 Subject: clean: correctly determine the path for excluding seed directory Previously, init.paths.cloud_dir has a trailing slash, which meant that "/var/lib/cloud//seed" was being compared to "/var/lib/cloud/seed" and (of course), never matching. In this commit, switch to using os.path.join to avoid this case (and update the tests to catch it in future). LP: #1818571 --- cloudinit/cmd/clean.py | 3 ++- cloudinit/cmd/tests/test_clean.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py index 28ee7b84..30e49de0 100644 --- a/cloudinit/cmd/clean.py +++ b/cloudinit/cmd/clean.py @@ -62,8 +62,9 @@ def remove_artifacts(remove_logs, remove_seed=False): if not os.path.isdir(init.paths.cloud_dir): return 0 # Artifacts dir already cleaned + seed_path = os.path.join(init.paths.cloud_dir, 'seed') for path in glob.glob('%s/*' % init.paths.cloud_dir): - if path == '%s/seed' % init.paths.cloud_dir and not remove_seed: + if path == seed_path and not remove_seed: continue try: if os.path.isdir(path) and not is_link(path): diff --git a/cloudinit/cmd/tests/test_clean.py b/cloudinit/cmd/tests/test_clean.py index 15c3294e..f092ab3d 100644 --- a/cloudinit/cmd/tests/test_clean.py +++ b/cloudinit/cmd/tests/test_clean.py @@ -22,7 +22,8 @@ class TestClean(CiTestCase): class FakeInit(object): cfg = {'def_log_file': self.log1, 'output': {'all': '|tee -a {0}'.format(self.log2)}} - paths = mypaths(cloud_dir=self.artifact_dir) + # Ensure cloud_dir has a trailing slash, to match real behaviour + paths = mypaths(cloud_dir='{}/'.format(self.artifact_dir)) def __init__(self, ds_deps): pass -- cgit v1.2.3 From 43f5850767f2412c63a5e75d47598e5d0479fd25 Mon Sep 17 00:00:00 2001 From: Anton Olifir Date: Wed, 6 Mar 2019 00:39:02 +0000 Subject: Example for Microsoft Azure data disk added. --- doc/examples/cloud-config-disk-setup.txt | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/doc/examples/cloud-config-disk-setup.txt b/doc/examples/cloud-config-disk-setup.txt index 43a62a26..89d9ff57 100644 --- a/doc/examples/cloud-config-disk-setup.txt +++ b/doc/examples/cloud-config-disk-setup.txt @@ -17,7 +17,7 @@ fs_setup: device: ephemeral0 partition: auto -# Default disk definitions for Windows Azure +# Default disk definitions for Microsoft Azure # ------------------------------------------ device_aliases: {'ephemeral0': '/dev/sdb'} @@ -34,6 +34,21 @@ fs_setup: replace_fs: ntfs +# Data disks definitions for Microsoft Azure +# ------------------------------------------ + +disk_setup: + /dev/disk/azure/scsi1/lun0: + table_type: gpt + layout: True + overwrite: True + +fs_setup: + - device: /dev/disk/azure/scsi1/lun0 + partition: 1 + filesystem: ext4 + + # Default disk definitions for SmartOS # ------------------------------------ @@ -242,7 +257,7 @@ fs_setup: # # "false": If an existing file system exists, skip the creation. # -# : This is a special directive, used for Windows Azure that +# : This is a special directive, used for Microsoft Azure that # instructs cloud-init to replace a file system of . NOTE: # unless you define a label, this requires the use of the 'any' partition # directive. -- cgit v1.2.3 From 7c07af289b77ce9ae2e20c6f2638a54e63f016ef Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 6 Mar 2019 20:23:35 +0000 Subject: Support locking user with usermod if passwd is not available. In some cases, the 'passwd' command might not be available, but 'usermod' might be. In debian systems both are provided by the 'passwd' package. In Redhat/Centos passwd comes from 'passwd' package while 'usermod' comes from `shadow-utils` This should just support either one with no real cost other than the check. --- cloudinit/distros/__init__.py | 13 +++++++---- tests/unittests/test_distros/test_create_users.py | 28 +++++++++++++++++++++++ 2 files changed, 37 insertions(+), 4 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index ef618c28..20c994dc 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -577,11 +577,16 @@ class Distro(object): """ Lock the password of a user, i.e., disable password logins """ + # passwd must use short '-l' due to SLES11 lacking long form '--lock' + lock_tools = (['passwd', '-l', name], ['usermod', '--lock', name]) try: - # Need to use the short option name '-l' instead of '--lock' - # (which would be more descriptive) since SLES 11 doesn't know - # about long names. - util.subp(['passwd', '-l', name]) + cmd = next(l for l in lock_tools if util.which(l[0])) + except StopIteration: + raise RuntimeError(( + "Unable to lock user account '%s'. No tools available. " + " Tried: %s.") % (name, [c[0] for c in lock_tools])) + try: + util.subp(cmd) except Exception as e: util.logexc(LOG, 'Failed to disable password for user %s', name) raise e diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/test_distros/test_create_users.py index c3f258d5..40624952 100644 --- a/tests/unittests/test_distros/test_create_users.py +++ b/tests/unittests/test_distros/test_create_users.py @@ -240,4 +240,32 @@ class TestCreateUser(CiTestCase): [mock.call(set(['auth1']), user), # not disabled mock.call(set(['key1']), 'foouser', options=disable_prefix)]) + @mock.patch("cloudinit.distros.util.which") + def test_lock_with_usermod_if_no_passwd(self, m_which, m_subp, + m_is_snappy): + """Lock uses usermod --lock if no 'passwd' cmd available.""" + m_which.side_effect = lambda m: m in ('usermod',) + self.dist.lock_passwd("bob") + self.assertEqual( + [mock.call(['usermod', '--lock', 'bob'])], + m_subp.call_args_list) + + @mock.patch("cloudinit.distros.util.which") + def test_lock_with_passwd_if_available(self, m_which, m_subp, + m_is_snappy): + """Lock with only passwd will use passwd.""" + m_which.side_effect = lambda m: m in ('passwd',) + self.dist.lock_passwd("bob") + self.assertEqual( + [mock.call(['passwd', '-l', 'bob'])], + m_subp.call_args_list) + + @mock.patch("cloudinit.distros.util.which") + def test_lock_raises_runtime_if_no_commands(self, m_which, m_subp, + m_is_snappy): + """Lock with no commands available raises RuntimeError.""" + m_which.return_value = None + with self.assertRaises(RuntimeError): + self.dist.lock_passwd("bob") + # vi: ts=4 expandtab -- cgit v1.2.3 From 3554ffe8657738795ae5e1b89f22b39358d78821 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Fri, 8 Mar 2019 22:37:05 +0000 Subject: cloud-init-per: POSIX sh does not support string subst, use sed cloud-init-per is run via /bin/sh which requires POSIX shell compliance and does not implement string substitution like bash. Replace these calls with use of sed. LP: #1819222 --- tools/cloud-init-per | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/cloud-init-per b/tools/cloud-init-per index eae3e93f..fcd1ea79 100755 --- a/tools/cloud-init-per +++ b/tools/cloud-init-per @@ -38,7 +38,7 @@ fi [ "$1" = "-h" -o "$1" = "--help" ] && { Usage ; exit 0; } [ $# -ge 3 ] || { Usage 1>&2; exit 1; } freq=$1 -name=${2/-/_} +name=$(echo $2 | sed 's/-/_/g') shift 2; [ "${name#*/}" = "${name}" ] || fail "name cannot contain a /" @@ -56,7 +56,7 @@ esac # Rename legacy sem files with dashes in their names. Do not overwrite existing # sem files to prevent clobbering those which may have been created from calls # outside of cloud-init. -sem_legacy="${sem/_/-}" +sem_legacy=$(echo $sem | sed 's/_/-/g') [ "$sem" != "$sem_legacy" -a -e "$sem_legacy" ] && mv -n "$sem_legacy" "$sem" [ "$freq" != "always" -a -e "$sem" ] && exit 0 -- cgit v1.2.3 From 1e6a72b679838d87c425edd21013260e9f17b500 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 12 Mar 2019 14:52:53 +0000 Subject: DataSourceEc2: update RELEASE_BLOCKER to be more accurate Our previous understanding of the upgrade issue was incomplete; it turns out the only change we need is the one now outlined. --- cloudinit/sources/DataSourceEc2.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 4f2f6ccb..ac28f1db 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -334,8 +334,12 @@ class DataSourceEc2(sources.DataSource): if isinstance(net_md, dict): result = convert_ec2_metadata_network_config( net_md, macs_to_nics=macs_to_nics, fallback_nic=iface) - # RELEASE_BLOCKER: Xenial debian/postinst needs to add - # EventType.BOOT on upgrade path for classic. + + # RELEASE_BLOCKER: xenial should drop the below if statement, + # because the issue being addressed doesn't exist pre-netplan. + # (This datasource doesn't implement check_instance_id() so the + # datasource object is recreated every boot; this means we don't + # need to modify update_events on cloud-init upgrade.) # Non-VPC (aka Classic) Ec2 instances need to rewrite the # network config file every boot due to MAC address change. -- cgit v1.2.3 From 3acaacc92be1b7d7bad099c323d6e923664a8afa Mon Sep 17 00:00:00 2001 From: Robert Schweikert Date: Tue, 12 Mar 2019 21:08:22 +0000 Subject: net/sysconfig: Handle default route setup for dhcp configured NICs When the network configuration has a default route configured and another network device that is configured with dhcp, SUSE sysconfig output should not accept the default route provided by the dhcp server. LP: #1812117 --- cloudinit/net/network_state.py | 41 +++++++++++++++++++++------ cloudinit/net/sysconfig.py | 31 +++++++++++++++------ tests/unittests/test_net.py | 63 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 118 insertions(+), 17 deletions(-) diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 539b76d8..4d19f562 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -148,6 +148,7 @@ class NetworkState(object): self._network_state = copy.deepcopy(network_state) self._version = version self.use_ipv6 = network_state.get('use_ipv6', False) + self._has_default_route = None @property def config(self): @@ -157,14 +158,6 @@ class NetworkState(object): def version(self): return self._version - def iter_routes(self, filter_func=None): - for route in self._network_state.get('routes', []): - if filter_func is not None: - if filter_func(route): - yield route - else: - yield route - @property def dns_nameservers(self): try: @@ -179,6 +172,12 @@ class NetworkState(object): except KeyError: return [] + @property + def has_default_route(self): + if self._has_default_route is None: + self._has_default_route = self._maybe_has_default_route() + return self._has_default_route + def iter_interfaces(self, filter_func=None): ifaces = self._network_state.get('interfaces', {}) for iface in six.itervalues(ifaces): @@ -188,6 +187,32 @@ class NetworkState(object): if filter_func(iface): yield iface + def iter_routes(self, filter_func=None): + for route in self._network_state.get('routes', []): + if filter_func is not None: + if filter_func(route): + yield route + else: + yield route + + def _maybe_has_default_route(self): + for route in self.iter_routes(): + if self._is_default_route(route): + return True + for iface in self.iter_interfaces(): + for subnet in iface.get('subnets', []): + for route in subnet.get('routes', []): + if self._is_default_route(route): + return True + return False + + def _is_default_route(self, route): + default_nets = ('::', '0.0.0.0') + return ( + route.get('prefix') == 0 + and route.get('network') in default_nets + ) + @six.add_metaclass(CommandHandlerMeta) class NetworkStateInterpreter(object): diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 19b3e60c..e59753d5 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -322,7 +322,7 @@ class Renderer(renderer.Renderer): iface_cfg[new_key] = old_value @classmethod - def _render_subnets(cls, iface_cfg, subnets): + def _render_subnets(cls, iface_cfg, subnets, has_default_route): # setting base values iface_cfg['BOOTPROTO'] = 'none' @@ -331,6 +331,7 @@ class Renderer(renderer.Renderer): mtu_key = 'MTU' subnet_type = subnet.get('type') if subnet_type == 'dhcp6': + # TODO need to set BOOTPROTO to dhcp6 on SUSE iface_cfg['IPV6INIT'] = True iface_cfg['DHCPV6C'] = True elif subnet_type in ['dhcp4', 'dhcp']: @@ -375,9 +376,9 @@ class Renderer(renderer.Renderer): ipv6_index = -1 for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): subnet_type = subnet.get('type') - if subnet_type == 'dhcp6': - continue - elif subnet_type in ['dhcp4', 'dhcp']: + if subnet_type in ['dhcp', 'dhcp4', 'dhcp6']: + if has_default_route and iface_cfg['BOOTPROTO'] != 'none': + iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = False continue elif subnet_type == 'static': if subnet_is_ipv6(subnet): @@ -443,6 +444,8 @@ class Renderer(renderer.Renderer): # TODO(harlowja): add validation that no other iface has # also provided the default route? iface_cfg['DEFROUTE'] = True + if iface_cfg['BOOTPROTO'] in ('dhcp', 'dhcp4', 'dhcp6'): + iface_cfg['DHCLIENT_SET_DEFAULT_ROUTE'] = True if 'gateway' in route: if is_ipv6 or is_ipv6_addr(route['gateway']): iface_cfg['IPV6_DEFAULTGW'] = route['gateway'] @@ -493,7 +496,9 @@ class Renderer(renderer.Renderer): iface_cfg = iface_contents[iface_name] route_cfg = iface_cfg.routes - cls._render_subnets(iface_cfg, iface_subnets) + cls._render_subnets( + iface_cfg, iface_subnets, network_state.has_default_route + ) cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) @classmethod @@ -518,7 +523,9 @@ class Renderer(renderer.Renderer): iface_subnets = iface.get("subnets", []) route_cfg = iface_cfg.routes - cls._render_subnets(iface_cfg, iface_subnets) + cls._render_subnets( + iface_cfg, iface_subnets, network_state.has_default_route + ) cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) # iter_interfaces on network-state is not sorted to produce @@ -547,7 +554,9 @@ class Renderer(renderer.Renderer): iface_subnets = iface.get("subnets", []) route_cfg = iface_cfg.routes - cls._render_subnets(iface_cfg, iface_subnets) + cls._render_subnets( + iface_cfg, iface_subnets, network_state.has_default_route + ) cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) @staticmethod @@ -608,7 +617,9 @@ class Renderer(renderer.Renderer): iface_subnets = iface.get("subnets", []) route_cfg = iface_cfg.routes - cls._render_subnets(iface_cfg, iface_subnets) + cls._render_subnets( + iface_cfg, iface_subnets, network_state.has_default_route + ) cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) @classmethod @@ -620,7 +631,9 @@ class Renderer(renderer.Renderer): iface_cfg.kind = 'infiniband' iface_subnets = iface.get("subnets", []) route_cfg = iface_cfg.routes - cls._render_subnets(iface_cfg, iface_subnets) + cls._render_subnets( + iface_cfg, iface_subnets, network_state.has_default_route + ) cls._render_subnet_routes(iface_cfg, route_cfg, iface_subnets) @classmethod diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index e3b9e02b..468d544a 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -860,6 +860,7 @@ NETWORK_CONFIGS = { BOOTPROTO=dhcp DEFROUTE=yes DEVICE=eth99 + DHCLIENT_SET_DEFAULT_ROUTE=yes DNS1=8.8.8.8 DNS2=8.8.4.4 DOMAIN="barley.maas sach.maas" @@ -1234,6 +1235,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true 'ifcfg-bond0.200': textwrap.dedent("""\ BOOTPROTO=dhcp DEVICE=bond0.200 + DHCLIENT_SET_DEFAULT_ROUTE=no NM_CONTROLLED=no ONBOOT=yes PHYSDEV=bond0 @@ -1333,6 +1335,7 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true 'ifcfg-eth5': textwrap.dedent("""\ BOOTPROTO=dhcp DEVICE=eth5 + DHCLIENT_SET_DEFAULT_ROUTE=no HWADDR=98:bb:9f:2c:e8:8a NM_CONTROLLED=no ONBOOT=no @@ -1988,6 +1991,23 @@ CONFIG_V1_SIMPLE_SUBNET = { 'type': 'static'}], 'type': 'physical'}]} +CONFIG_V1_MULTI_IFACE = { + 'version': 1, + 'config': [{'type': 'physical', + 'mtu': 1500, + 'subnets': [{'type': 'static', + 'netmask': '255.255.240.0', + 'routes': [{'netmask': '0.0.0.0', + 'network': '0.0.0.0', + 'gateway': '51.68.80.1'}], + 'address': '51.68.89.122', + 'ipv4': True}], + 'mac_address': 'fa:16:3e:25:b4:59', + 'name': 'eth0'}, + {'type': 'physical', + 'mtu': 9000, + 'subnets': [{'type': 'dhcp4'}], + 'mac_address': 'fa:16:3e:b1:ca:29', 'name': 'eth1'}]} DEFAULT_DEV_ATTRS = { 'eth1000': { @@ -2460,6 +2480,49 @@ USERCTL=no respath = '/etc/resolv.conf' self.assertNotIn(respath, found.keys()) + def test_network_config_v1_multi_iface_samples(self): + ns = network_state.parse_net_config_data(CONFIG_V1_MULTI_IFACE) + render_dir = self.tmp_path("render") + os.makedirs(render_dir) + renderer = self._get_renderer() + renderer.render_network_state(ns, target=render_dir) + found = dir2dict(render_dir) + nspath = '/etc/sysconfig/network-scripts/' + self.assertNotIn(nspath + 'ifcfg-lo', found.keys()) + expected_i1 = """\ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=none +DEFROUTE=yes +DEVICE=eth0 +GATEWAY=51.68.80.1 +HWADDR=fa:16:3e:25:b4:59 +IPADDR=51.68.89.122 +MTU=1500 +NETMASK=255.255.240.0 +NM_CONTROLLED=no +ONBOOT=yes +STARTMODE=auto +TYPE=Ethernet +USERCTL=no +""" + self.assertEqual(expected_i1, found[nspath + 'ifcfg-eth0']) + expected_i2 = """\ +# Created by cloud-init on instance boot automatically, do not edit. +# +BOOTPROTO=dhcp +DEVICE=eth1 +DHCLIENT_SET_DEFAULT_ROUTE=no +HWADDR=fa:16:3e:b1:ca:29 +MTU=9000 +NM_CONTROLLED=no +ONBOOT=yes +STARTMODE=auto +TYPE=Ethernet +USERCTL=no +""" + self.assertEqual(expected_i2, found[nspath + 'ifcfg-eth1']) + def test_config_with_explicit_loopback(self): ns = network_state.parse_net_config_data(CONFIG_V1_EXPLICIT_LOOPBACK) render_dir = self.tmp_path("render") -- cgit v1.2.3 From f2fd6eac4407e60d0e98826ab03847dda4cde138 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Thu, 14 Mar 2019 23:06:47 +0000 Subject: DataSource: move update_events from a class to an instance attribute Currently, DataSourceAzure updates self.update_events in __init__. As update_events is a class attribute on DataSource, this updates it for all instances of classes derived from DataSource including those for other clouds. This means that if DataSourceAzure is even instantiated, its behaviour is applied to whichever data source ends up being used for boot. To address this, update_events is moved from a class attribute to an instance attribute (that is therefore populated at instantiation time). This retains the defaults for all DataSource sub-class instances, but avoids them being able to mutate the state in instances of other DataSource sub-classes. update_events is only ever referenced on an instance of DataSource (or a sub-class); no code relies on it being a class attribute. (In fact, it's only used within methods on DataSource or its sub-classes, so it doesn't even _need_ to remain public, though I think it's appropriate for it to be public.) DataSourceScaleway is also updated to move update_events from a class attribute to an instance attribute, as the class attribute would now be masked by the DataSource instance attribute. LP: #1819913 --- cloudinit/sources/DataSourceScaleway.py | 3 ++- cloudinit/sources/__init__.py | 6 +++--- cloudinit/sources/tests/test_init.py | 15 +++++++++++++++ tests/unittests/test_datasource/test_scaleway.py | 7 +++++++ 4 files changed, 27 insertions(+), 4 deletions(-) diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index b573b382..54bfc1fe 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -171,10 +171,11 @@ def query_data_api(api_type, api_address, retries, timeout): class DataSourceScaleway(sources.DataSource): dsname = "Scaleway" - update_events = {'network': [EventType.BOOT_NEW_INSTANCE, EventType.BOOT]} def __init__(self, sys_cfg, distro, paths): super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) + self.update_events = { + 'network': {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}} self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}), diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index e6966b31..1604932d 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -164,9 +164,6 @@ class DataSource(object): # A datasource which supports writing network config on each system boot # would call update_events['network'].add(EventType.BOOT). - # Default: generate network config on new instance id (first boot). - update_events = {'network': set([EventType.BOOT_NEW_INSTANCE])} - # N-tuple listing default values for any metadata-related class # attributes cached on an instance by a process_data runs. These attribute # values are reset via clear_cached_attrs during any update_metadata call. @@ -191,6 +188,9 @@ class DataSource(object): self.vendordata = None self.vendordata_raw = None + # Default: generate network config on new instance id (first boot). + self.update_events = {'network': {EventType.BOOT_NEW_INSTANCE}} + self.ds_cfg = util.get_cfg_by_path( self.sys_cfg, ("datasource", self.dsname), {}) if not self.ds_cfg: diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index 6378e98b..cb1912be 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -575,6 +575,21 @@ class TestDataSource(CiTestCase): " events: New instance first boot", self.logs.getvalue()) + def test_data_sources_cant_mutate_update_events_for_others(self): + """update_events shouldn't be changed for other DSes (LP: #1819913)""" + + class ModifyingDS(DataSource): + + def __init__(self, sys_cfg, distro, paths): + # This mirrors what DataSourceAzure does which causes LP: + # #1819913 + DataSource.__init__(self, sys_cfg, distro, paths) + self.update_events['network'].add(EventType.BOOT) + + before_update_events = copy.deepcopy(self.datasource.update_events) + ModifyingDS(self.sys_cfg, self.distro, self.paths) + self.assertEqual(before_update_events, self.datasource.update_events) + class TestRedactSensitiveData(CiTestCase): diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py index f96bf0a2..3bfd7527 100644 --- a/tests/unittests/test_datasource/test_scaleway.py +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -7,6 +7,7 @@ import requests from cloudinit import helpers from cloudinit import settings +from cloudinit.event import EventType from cloudinit.sources import DataSourceScaleway from cloudinit.tests.helpers import mock, HttprettyTestCase, CiTestCase @@ -403,3 +404,9 @@ class TestDataSourceScaleway(HttprettyTestCase): netcfg = self.datasource.network_config self.assertEqual(netcfg, '0xdeadbeef') + + def test_update_events_is_correct(self): + """ensure update_events contains correct data""" + self.assertEqual( + {'network': {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}}, + self.datasource.update_events) -- cgit v1.2.3 From 200b0ac1fc1709f6c06bb963beb3080a5b5c6fb1 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Mon, 18 Mar 2019 17:10:13 +0000 Subject: tox: bump pylint version to latest (2.3.1) The previous version was emitting errors due to an incompatibility with one of its dependencies. (We could have pinned the dependency instead, but staying current on pylint is a worthy goal in and of itself.) --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index d3717200..967321f8 100644 --- a/tox.ini +++ b/tox.ini @@ -21,7 +21,7 @@ setenv = basepython = python3 deps = # requirements - pylint==2.2.2 + pylint==2.3.1 # test-requirements because unit tests are now present in cloudinit tree -r{toxinidir}/test-requirements.txt commands = {envpython} -m pylint {posargs:cloudinit tests tools} -- cgit v1.2.3 From 6d58bd8a65e1e7723cd6019b0ceca39564c435fd Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Mon, 18 Mar 2019 18:09:43 +0000 Subject: doc: Refresh Azure walinuxagent docs - Remove outdated waagent.conf recommendations - Recommend using Provisioning.UseCloudInit - Reorganise sections so walinuxagent recommendations are easier to find --- doc/rtd/topics/datasources/azure.rst | 57 ++++++++++++++++++++++-------------- 1 file changed, 35 insertions(+), 22 deletions(-) diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst index 720a475c..b41cddd9 100644 --- a/doc/rtd/topics/datasources/azure.rst +++ b/doc/rtd/topics/datasources/azure.rst @@ -5,9 +5,30 @@ Azure This datasource finds metadata and user-data from the Azure cloud platform. -Azure Platform --------------- -The azure cloud-platform provides initial data to an instance via an attached +walinuxagent +------------ +walinuxagent has several functions within images. For cloud-init +specifically, the relevant functionality it performs is to register the +instance with the Azure cloud platform at boot so networking will be +permitted. For more information about the other functionality of +walinuxagent, see `Azure's documentation +`_ for more details. +(Note, however, that only one of walinuxagent's provisioning and cloud-init +should be used to perform instance customisation.) + +If you are configuring walinuxagent yourself, you will want to ensure that you +have `Provisioning.UseCloudInit +`_ set to +``y``. + + +Builtin Agent +------------- +An alternative to using walinuxagent to register to the Azure cloud platform +is to use the ``__builtin__`` agent command. This section contains more +background on what that code path does, and how to enable it. + +The Azure cloud platform provides initial data to an instance via an attached CD formatted in UDF. That CD contains a 'ovf-env.xml' file that provides some information. Additional information is obtained via interaction with the "endpoint". @@ -36,25 +57,17 @@ for the endpoint server (again option 245). You can define the path to the lease file with the 'dhclient_lease_file' configuration. -walinuxagent ------------- -In order to operate correctly, cloud-init needs walinuxagent to provide much -of the interaction with azure. In addition to "provisioning" code, walinux -does the following on the agent is a long running daemon that handles the -following things: -- generate a x509 certificate and send that to the endpoint - -waagent.conf config -^^^^^^^^^^^^^^^^^^^ -in order to use waagent.conf with cloud-init, the following settings are recommended. Other values can be changed or set to the defaults. - - :: - - # disabling provisioning turns off all 'Provisioning.*' function - Provisioning.Enabled=n - # this is currently not handled by cloud-init, so let walinuxagent do it. - ResourceDisk.Format=y - ResourceDisk.MountPoint=/mnt + +IMDS +---- +Azure provides the `instance metadata service (IMDS) +`_ +which is a REST service on ``196.254.196.254`` providing additional +configuration information to the instance. Cloud-init uses the IMDS for: + +- network configuration for the instance which is applied per boot +- a preprovisioing gate which blocks instance configuration until Azure fabric + is ready to provision Configuration -- cgit v1.2.3 From 5e5894d68d21bf33649aca36973a0ef2fe72f01d Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 19 Mar 2019 14:24:37 +0000 Subject: Add ubuntu_drivers config module The ubuntu_drivers config module enables usage of the 'ubuntu-drivers' command. At this point it only serves as a way of installing NVIDIA drivers for general purpose graphics processing unit (GPGPU) functionality. Also, a small usability improvement to get_cfg_by_path to allow it to take a string for the key path "toplevel/second/mykey" in addition to the original: ("toplevel", "second", "mykey") --- cloudinit/config/cc_ubuntu_drivers.py | 112 +++++++++++++++++ cloudinit/config/tests/test_ubuntu_drivers.py | 174 ++++++++++++++++++++++++++ cloudinit/util.py | 15 +++ config/cloud.cfg.tmpl | 3 + doc/rtd/topics/modules.rst | 1 + tests/unittests/test_handler/test_schema.py | 1 + 6 files changed, 306 insertions(+) create mode 100644 cloudinit/config/cc_ubuntu_drivers.py create mode 100644 cloudinit/config/tests/test_ubuntu_drivers.py diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py new file mode 100644 index 00000000..91feb603 --- /dev/null +++ b/cloudinit/config/cc_ubuntu_drivers.py @@ -0,0 +1,112 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Ubuntu Drivers: Interact with third party drivers in Ubuntu.""" + +from textwrap import dedent + +from cloudinit.config.schema import ( + get_schema_doc, validate_cloudconfig_schema) +from cloudinit import log as logging +from cloudinit.settings import PER_INSTANCE +from cloudinit import type_utils +from cloudinit import util + +LOG = logging.getLogger(__name__) + +frequency = PER_INSTANCE +distros = ['ubuntu'] +schema = { + 'id': 'cc_ubuntu_drivers', + 'name': 'Ubuntu Drivers', + 'title': 'Interact with third party drivers in Ubuntu.', + 'description': dedent("""\ + This module interacts with the 'ubuntu-drivers' command to install + third party driver packages."""), + 'distros': distros, + 'examples': [dedent("""\ + drivers: + nvidia: + license-accepted: true + """)], + 'frequency': frequency, + 'type': 'object', + 'properties': { + 'drivers': { + 'type': 'object', + 'additionalProperties': False, + 'properties': { + 'nvidia': { + 'type': 'object', + 'additionalProperties': False, + 'required': ['license-accepted'], + 'properties': { + 'license-accepted': { + 'type': 'boolean', + 'description': ("Do you accept the NVIDIA driver" + " license?"), + }, + 'version': { + 'type': 'string', + 'description': ( + 'The version of the driver to install (e.g.' + ' "390", "410"). Defaults to the latest' + ' version.'), + }, + }, + }, + }, + }, + }, +} +OLD_UBUNTU_DRIVERS_STDERR_NEEDLE = ( + "ubuntu-drivers: error: argument : invalid choice: 'install'") + +__doc__ = get_schema_doc(schema) # Supplement python help() + + +def install_drivers(cfg, pkg_install_func): + if not isinstance(cfg, dict): + raise TypeError( + "'drivers' config expected dict, found '%s': %s" % + (type_utils.obj_name(cfg), cfg)) + + cfgpath = 'nvidia/license-accepted' + # Call translate_bool to ensure that we treat string values like "yes" as + # acceptance and _don't_ treat string values like "nah" as acceptance + # because they're True-ish + nv_acc = util.translate_bool(util.get_cfg_by_path(cfg, cfgpath)) + if not nv_acc: + LOG.debug("Not installing NVIDIA drivers. %s=%s", cfgpath, nv_acc) + return + + if not util.which('ubuntu-drivers'): + LOG.debug("'ubuntu-drivers' command not available. " + "Installing ubuntu-drivers-common") + pkg_install_func(['ubuntu-drivers-common']) + + driver_arg = 'nvidia' + version_cfg = util.get_cfg_by_path(cfg, 'nvidia/version') + if version_cfg: + driver_arg += ':{}'.format(version_cfg) + + LOG.debug("Installing NVIDIA drivers (%s=%s, version=%s)", + cfgpath, nv_acc, version_cfg if version_cfg else 'latest') + + try: + util.subp(['ubuntu-drivers', 'install', '--gpgpu', driver_arg]) + except util.ProcessExecutionError as exc: + if OLD_UBUNTU_DRIVERS_STDERR_NEEDLE in exc.stderr: + LOG.warning('the available version of ubuntu-drivers is' + ' too old to perform requested driver installation') + elif 'No drivers found for installation.' in exc.stdout: + LOG.warning('ubuntu-drivers found no drivers for installation') + raise + + +def handle(name, cfg, cloud, log, _args): + if "drivers" not in cfg: + log.debug("Skipping module named %s, no 'drivers' key in config", name) + return + + validate_cloudconfig_schema(cfg, schema) + install_drivers(cfg['drivers'], cloud.distro.install_packages) diff --git a/cloudinit/config/tests/test_ubuntu_drivers.py b/cloudinit/config/tests/test_ubuntu_drivers.py new file mode 100644 index 00000000..efba4ce7 --- /dev/null +++ b/cloudinit/config/tests/test_ubuntu_drivers.py @@ -0,0 +1,174 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import copy + +from cloudinit.tests.helpers import CiTestCase, skipUnlessJsonSchema, mock +from cloudinit.config.schema import ( + SchemaValidationError, validate_cloudconfig_schema) +from cloudinit.config import cc_ubuntu_drivers as drivers +from cloudinit.util import ProcessExecutionError + +MPATH = "cloudinit.config.cc_ubuntu_drivers." +OLD_UBUNTU_DRIVERS_ERROR_STDERR = ( + "ubuntu-drivers: error: argument : invalid choice: 'install' " + "(choose from 'list', 'autoinstall', 'devices', 'debug')\n") + + +class TestUbuntuDrivers(CiTestCase): + cfg_accepted = {'drivers': {'nvidia': {'license-accepted': True}}} + install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia'] + + with_logs = True + + @skipUnlessJsonSchema() + def test_schema_requires_boolean_for_license_accepted(self): + with self.assertRaisesRegex( + SchemaValidationError, ".*license-accepted.*TRUE.*boolean"): + validate_cloudconfig_schema( + {'drivers': {'nvidia': {'license-accepted': "TRUE"}}}, + schema=drivers.schema, strict=True) + + @mock.patch(MPATH + "util.subp", return_value=('', '')) + @mock.patch(MPATH + "util.which", return_value=False) + def _assert_happy_path_taken(self, config, m_which, m_subp): + """Positive path test through handle. Package should be installed.""" + myCloud = mock.MagicMock() + drivers.handle('ubuntu_drivers', config, myCloud, None, None) + self.assertEqual([mock.call(['ubuntu-drivers-common'])], + myCloud.distro.install_packages.call_args_list) + self.assertEqual([mock.call(self.install_gpgpu)], + m_subp.call_args_list) + + def test_handle_does_package_install(self): + self._assert_happy_path_taken(self.cfg_accepted) + + def test_trueish_strings_are_considered_approval(self): + for true_value in ['yes', 'true', 'on', '1']: + new_config = copy.deepcopy(self.cfg_accepted) + new_config['drivers']['nvidia']['license-accepted'] = true_value + self._assert_happy_path_taken(new_config) + + @mock.patch(MPATH + "util.subp", side_effect=ProcessExecutionError( + stdout='No drivers found for installation.\n', exit_code=1)) + @mock.patch(MPATH + "util.which", return_value=False) + def test_handle_raises_error_if_no_drivers_found(self, m_which, m_subp): + """If ubuntu-drivers doesn't install any drivers, raise an error.""" + myCloud = mock.MagicMock() + with self.assertRaises(Exception): + drivers.handle( + 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None) + self.assertEqual([mock.call(['ubuntu-drivers-common'])], + myCloud.distro.install_packages.call_args_list) + self.assertEqual([mock.call(self.install_gpgpu)], + m_subp.call_args_list) + self.assertIn('ubuntu-drivers found no drivers for installation', + self.logs.getvalue()) + + @mock.patch(MPATH + "util.subp", return_value=('', '')) + @mock.patch(MPATH + "util.which", return_value=False) + def _assert_inert_with_config(self, config, m_which, m_subp): + """Helper to reduce repetition when testing negative cases""" + myCloud = mock.MagicMock() + drivers.handle('ubuntu_drivers', config, myCloud, None, None) + self.assertEqual(0, myCloud.distro.install_packages.call_count) + self.assertEqual(0, m_subp.call_count) + + def test_handle_inert_if_license_not_accepted(self): + """Ensure we don't do anything if the license is rejected.""" + self._assert_inert_with_config( + {'drivers': {'nvidia': {'license-accepted': False}}}) + + def test_handle_inert_if_garbage_in_license_field(self): + """Ensure we don't do anything if unknown text is in license field.""" + self._assert_inert_with_config( + {'drivers': {'nvidia': {'license-accepted': 'garbage'}}}) + + def test_handle_inert_if_no_license_key(self): + """Ensure we don't do anything if no license key.""" + self._assert_inert_with_config({'drivers': {'nvidia': {}}}) + + def test_handle_inert_if_no_nvidia_key(self): + """Ensure we don't do anything if other license accepted.""" + self._assert_inert_with_config( + {'drivers': {'acme': {'license-accepted': True}}}) + + def test_handle_inert_if_string_given(self): + """Ensure we don't do anything if string refusal given.""" + for false_value in ['no', 'false', 'off', '0']: + self._assert_inert_with_config( + {'drivers': {'nvidia': {'license-accepted': false_value}}}) + + @mock.patch(MPATH + "install_drivers") + def test_handle_no_drivers_does_nothing(self, m_install_drivers): + """If no 'drivers' key in the config, nothing should be done.""" + myCloud = mock.MagicMock() + myLog = mock.MagicMock() + drivers.handle('ubuntu_drivers', {'foo': 'bzr'}, myCloud, myLog, None) + self.assertIn('Skipping module named', + myLog.debug.call_args_list[0][0][0]) + self.assertEqual(0, m_install_drivers.call_count) + + @mock.patch(MPATH + "util.subp", return_value=('', '')) + @mock.patch(MPATH + "util.which", return_value=True) + def test_install_drivers_no_install_if_present(self, m_which, m_subp): + """If 'ubuntu-drivers' is present, no package install should occur.""" + pkg_install = mock.MagicMock() + drivers.install_drivers(self.cfg_accepted['drivers'], + pkg_install_func=pkg_install) + self.assertEqual(0, pkg_install.call_count) + self.assertEqual([mock.call('ubuntu-drivers')], + m_which.call_args_list) + self.assertEqual([mock.call(self.install_gpgpu)], + m_subp.call_args_list) + + def test_install_drivers_rejects_invalid_config(self): + """install_drivers should raise TypeError if not given a config dict""" + pkg_install = mock.MagicMock() + with self.assertRaisesRegex(TypeError, ".*expected dict.*"): + drivers.install_drivers("mystring", pkg_install_func=pkg_install) + self.assertEqual(0, pkg_install.call_count) + + @mock.patch(MPATH + "util.subp", side_effect=ProcessExecutionError( + stderr=OLD_UBUNTU_DRIVERS_ERROR_STDERR, exit_code=2)) + @mock.patch(MPATH + "util.which", return_value=False) + def test_install_drivers_handles_old_ubuntu_drivers_gracefully( + self, m_which, m_subp): + """Older ubuntu-drivers versions should emit message and raise error""" + myCloud = mock.MagicMock() + with self.assertRaises(Exception): + drivers.handle( + 'ubuntu_drivers', self.cfg_accepted, myCloud, None, None) + self.assertEqual([mock.call(['ubuntu-drivers-common'])], + myCloud.distro.install_packages.call_args_list) + self.assertEqual([mock.call(self.install_gpgpu)], + m_subp.call_args_list) + self.assertIn('WARNING: the available version of ubuntu-drivers is' + ' too old to perform requested driver installation', + self.logs.getvalue()) + + +# Sub-class TestUbuntuDrivers to run the same test cases, but with a version +class TestUbuntuDriversWithVersion(TestUbuntuDrivers): + cfg_accepted = { + 'drivers': {'nvidia': {'license-accepted': True, 'version': '123'}}} + install_gpgpu = ['ubuntu-drivers', 'install', '--gpgpu', 'nvidia:123'] + + @mock.patch(MPATH + "util.subp", return_value=('', '')) + @mock.patch(MPATH + "util.which", return_value=False) + def test_version_none_uses_latest(self, m_which, m_subp): + myCloud = mock.MagicMock() + version_none_cfg = { + 'drivers': {'nvidia': {'license-accepted': True, 'version': None}}} + drivers.handle( + 'ubuntu_drivers', version_none_cfg, myCloud, None, None) + self.assertEqual( + [mock.call(['ubuntu-drivers', 'install', '--gpgpu', 'nvidia'])], + m_subp.call_args_list) + + def test_specifying_a_version_doesnt_override_license_acceptance(self): + self._assert_inert_with_config({ + 'drivers': {'nvidia': {'license-accepted': False, + 'version': '123'}} + }) + +# vi: ts=4 expandtab diff --git a/cloudinit/util.py b/cloudinit/util.py index a192091f..385f231c 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -703,6 +703,21 @@ def get_cfg_option_list(yobj, key, default=None): # get a cfg entry by its path array # for f['a']['b']: get_cfg_by_path(mycfg,('a','b')) def get_cfg_by_path(yobj, keyp, default=None): + """Return the value of the item at path C{keyp} in C{yobj}. + + example: + get_cfg_by_path({'a': {'b': {'num': 4}}}, 'a/b/num') == 4 + get_cfg_by_path({'a': {'b': {'num': 4}}}, 'c/d') == None + + @param yobj: A dictionary. + @param keyp: A path inside yobj. it can be a '/' delimited string, + or an iterable. + @param default: The default to return if the path does not exist. + @return: The value of the item at keyp." + is not found.""" + + if isinstance(keyp, six.string_types): + keyp = keyp.split("/") cur = yobj for tok in keyp: if tok not in cur: diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 7513176b..25db43e0 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -112,6 +112,9 @@ cloud_final_modules: - landscape - lxd {% endif %} +{% if variant in ["ubuntu", "unknown"] %} + - ubuntu-drivers +{% endif %} {% if variant not in ["freebsd"] %} - puppet - chef diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst index d9720f6a..3dcdd3bc 100644 --- a/doc/rtd/topics/modules.rst +++ b/doc/rtd/topics/modules.rst @@ -54,6 +54,7 @@ Modules .. automodule:: cloudinit.config.cc_ssh_import_id .. automodule:: cloudinit.config.cc_timezone .. automodule:: cloudinit.config.cc_ubuntu_advantage +.. automodule:: cloudinit.config.cc_ubuntu_drivers .. automodule:: cloudinit.config.cc_update_etc_hosts .. automodule:: cloudinit.config.cc_update_hostname .. automodule:: cloudinit.config.cc_users_groups diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py index 1bad07f6..e69a47a9 100644 --- a/tests/unittests/test_handler/test_schema.py +++ b/tests/unittests/test_handler/test_schema.py @@ -28,6 +28,7 @@ class GetSchemaTest(CiTestCase): 'cc_runcmd', 'cc_snap', 'cc_ubuntu_advantage', + 'cc_ubuntu_drivers', 'cc_zypper_add_repo' ], [subschema['id'] for subschema in schema['allOf']]) -- cgit v1.2.3 From 22e332933e78bc1c819c4f876d48620605ae813b Mon Sep 17 00:00:00 2001 From: Raphael Glon Date: Thu, 21 Mar 2019 13:38:53 +0000 Subject: net: Fix ipv6 static routes when using eni renderer When rendering ipv6 static routes in eni format the post-up/pre down commands were not correct for ipv6. LP: #1818669 --- cloudinit/net/eni.py | 16 +++-- tests/unittests/test_net.py | 147 +++++++++++++++++++++++++++++++++++++++----- 2 files changed, 143 insertions(+), 20 deletions(-) diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index 64236320..b129bb62 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -366,8 +366,6 @@ class Renderer(renderer.Renderer): down = indent + "pre-down route del" or_true = " || true" mapping = { - 'network': '-net', - 'netmask': 'netmask', 'gateway': 'gw', 'metric': 'metric', } @@ -379,13 +377,21 @@ class Renderer(renderer.Renderer): default_gw = ' -A inet6 default' route_line = '' - for k in ['network', 'netmask', 'gateway', 'metric']: - if default_gw and k in ['network', 'netmask']: + for k in ['network', 'gateway', 'metric']: + if default_gw and k == 'network': continue if k == 'gateway': route_line += '%s %s %s' % (default_gw, mapping[k], route[k]) elif k in route: - route_line += ' %s %s' % (mapping[k], route[k]) + if k == 'network': + if ':' in route[k]: + route_line += ' -A inet6' + else: + route_line += ' -net' + if 'prefix' in route: + route_line += ' %s/%s' % (route[k], route['prefix']) + else: + route_line += ' %s %s' % (mapping[k], route[k]) content.append(up + route_line + or_true) content.append(down + route_line + or_true) return content diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 468d544a..1b415b00 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1114,8 +1114,8 @@ iface eth0.101 inet static iface eth0.101 inet static address 192.168.2.10/24 -post-up route add -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true -pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true +post-up route add -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true +pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true """), 'expected_netplan': textwrap.dedent(""" network: @@ -1508,17 +1508,18 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true - gateway: 192.168.0.3 netmask: 255.255.255.0 network: 10.1.3.0 - - gateway: 2001:67c:1562:1 - network: 2001:67c:1 - netmask: ffff:ffff:0 - - gateway: 3001:67c:1562:1 - network: 3001:67c:1 - netmask: ffff:ffff:0 - metric: 10000 - type: static address: 192.168.1.2/24 - type: static address: 2001:1::1/92 + routes: + - gateway: 2001:67c:1562:1 + network: 2001:67c:1 + netmask: ffff:ffff:0 + - gateway: 3001:67c:1562:1 + network: 3001:67c:1 + netmask: ffff:ffff:0 + metric: 10000 """), 'expected_netplan': textwrap.dedent(""" network: @@ -1557,6 +1558,51 @@ pre-down route del -net 10.0.0.0 netmask 255.0.0.0 gw 11.0.0.1 metric 3 || true to: 3001:67c:1/32 via: 3001:67c:1562:1 """), + 'expected_eni': textwrap.dedent("""\ +auto lo +iface lo inet loopback + +auto bond0s0 +iface bond0s0 inet manual + bond-master bond0 + bond-mode active-backup + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + +auto bond0s1 +iface bond0s1 inet manual + bond-master bond0 + bond-mode active-backup + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + +auto bond0 +iface bond0 inet static + address 192.168.0.2/24 + gateway 192.168.0.1 + bond-mode active-backup + bond-slaves none + bond-xmit-hash-policy layer3+4 + bond_miimon 100 + hwaddress aa:bb:cc:dd:e8:ff + mtu 9000 + post-up route add -net 10.1.3.0/24 gw 192.168.0.3 || true + pre-down route del -net 10.1.3.0/24 gw 192.168.0.3 || true + +# control-alias bond0 +iface bond0 inet static + address 192.168.1.2/24 + +# control-alias bond0 +iface bond0 inet6 static + address 2001:1::1/92 + post-up route add -A inet6 2001:67c:1/32 gw 2001:67c:1562:1 || true + pre-down route del -A inet6 2001:67c:1/32 gw 2001:67c:1562:1 || true + post-up route add -A inet6 3001:67c:1/32 gw 3001:67c:1562:1 metric 10000 \ +|| true + pre-down route del -A inet6 3001:67c:1/32 gw 3001:67c:1562:1 metric 10000 \ +|| true + """), 'yaml-v2': textwrap.dedent(""" version: 2 ethernets: @@ -3633,17 +3679,17 @@ class TestEniRoundTrip(CiTestCase): 'iface eth0 inet static', ' address 172.23.31.42/26', ' gateway 172.23.31.2', - ('post-up route add -net 10.0.0.0 netmask 255.240.0.0 gw ' + ('post-up route add -net 10.0.0.0/12 gw ' '172.23.31.1 metric 0 || true'), - ('pre-down route del -net 10.0.0.0 netmask 255.240.0.0 gw ' + ('pre-down route del -net 10.0.0.0/12 gw ' '172.23.31.1 metric 0 || true'), - ('post-up route add -net 192.168.2.0 netmask 255.255.0.0 gw ' + ('post-up route add -net 192.168.2.0/16 gw ' '172.23.31.1 metric 0 || true'), - ('pre-down route del -net 192.168.2.0 netmask 255.255.0.0 gw ' + ('pre-down route del -net 192.168.2.0/16 gw ' '172.23.31.1 metric 0 || true'), - ('post-up route add -net 10.0.200.0 netmask 255.255.0.0 gw ' + ('post-up route add -net 10.0.200.0/16 gw ' '172.23.31.1 metric 1 || true'), - ('pre-down route del -net 10.0.200.0 netmask 255.255.0.0 gw ' + ('pre-down route del -net 10.0.200.0/16 gw ' '172.23.31.1 metric 1 || true'), ] found = files['/etc/network/interfaces'].splitlines() @@ -3651,6 +3697,77 @@ class TestEniRoundTrip(CiTestCase): self.assertEqual( expected, [line for line in found if line]) + def test_ipv6_static_routes(self): + # as reported in bug 1818669 + conf = [ + {'name': 'eno3', 'type': 'physical', + 'subnets': [{ + 'address': 'fd00::12/64', + 'dns_nameservers': ['fd00:2::15'], + 'gateway': 'fd00::1', + 'ipv6': True, + 'type': 'static', + 'routes': [{'netmask': '32', + 'network': 'fd00:12::', + 'gateway': 'fd00::2'}, + {'network': 'fd00:14::', + 'gateway': 'fd00::3'}, + {'destination': 'fe00:14::/48', + 'gateway': 'fe00::4', + 'metric': 500}, + {'gateway': '192.168.23.1', + 'metric': 999, + 'netmask': 24, + 'network': '192.168.23.0'}, + {'destination': '10.23.23.0/24', + 'gateway': '10.23.23.2', + 'metric': 300}]}]}, + ] + + files = self._render_and_read( + network_config={'config': conf, 'version': 1}) + expected = [ + 'auto lo', + 'iface lo inet loopback', + 'auto eno3', + 'iface eno3 inet6 static', + ' address fd00::12/64', + ' dns-nameservers fd00:2::15', + ' gateway fd00::1', + (' post-up route add -A inet6 fd00:12::/32 gw ' + 'fd00::2 || true'), + (' pre-down route del -A inet6 fd00:12::/32 gw ' + 'fd00::2 || true'), + (' post-up route add -A inet6 fd00:14::/64 gw ' + 'fd00::3 || true'), + (' pre-down route del -A inet6 fd00:14::/64 gw ' + 'fd00::3 || true'), + (' post-up route add -A inet6 fe00:14::/48 gw ' + 'fe00::4 metric 500 || true'), + (' pre-down route del -A inet6 fe00:14::/48 gw ' + 'fe00::4 metric 500 || true'), + (' post-up route add -net 192.168.23.0/24 gw ' + '192.168.23.1 metric 999 || true'), + (' pre-down route del -net 192.168.23.0/24 gw ' + '192.168.23.1 metric 999 || true'), + (' post-up route add -net 10.23.23.0/24 gw ' + '10.23.23.2 metric 300 || true'), + (' pre-down route del -net 10.23.23.0/24 gw ' + '10.23.23.2 metric 300 || true'), + + ] + found = files['/etc/network/interfaces'].splitlines() + + self.assertEqual( + expected, [line for line in found if line]) + + def testsimple_render_bond(self): + entry = NETWORK_CONFIGS['bond'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + self.assertEqual( + entry['expected_eni'].splitlines(), + files['/etc/network/interfaces'].splitlines()) + class TestNetRenderers(CiTestCase): @mock.patch("cloudinit.net.renderers.sysconfig.available") -- cgit v1.2.3 From dfe50e300882e3affcb02e686578807aea921b99 Mon Sep 17 00:00:00 2001 From: Thomas Bechtold Date: Thu, 21 Mar 2019 16:22:29 +0000 Subject: tox: Update testenv for openSUSE Leap to 15.0 Use the requirements for the openSUSE Leap 15.0 release. --- tox.ini | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/tox.ini b/tox.ini index 967321f8..1f01eb76 100644 --- a/tox.ini +++ b/tox.ini @@ -96,19 +96,18 @@ deps = six==1.9.0 -r{toxinidir}/test-requirements.txt -[testenv:opensusel42] +[testenv:opensusel150] basepython = python2.7 commands = nosetests {posargs:tests/unittests cloudinit} deps = # requirements - argparse==1.3.0 - jinja2==2.8 - PyYAML==3.11 - oauthlib==0.7.2 + jinja2==2.10 + PyYAML==3.12 + oauthlib==2.0.6 configobj==5.0.6 - requests==2.11.1 - jsonpatch==1.11 - six==1.9.0 + requests==2.18.4 + jsonpatch==1.16 + six==1.11.0 -r{toxinidir}/test-requirements.txt [testenv:tip-pycodestyle] -- cgit v1.2.3 From bb0b6f1d4e587d74a6e8fe17fa1c4dc3cf5287f7 Mon Sep 17 00:00:00 2001 From: Robert Schweikert Date: Mon, 25 Mar 2019 15:53:12 +0000 Subject: net/sysconfig: write out SUSE-compatible IPv6 config For writing IPv6 addresses to ifcfg-* the name "IPV6ADDR" is used. For secondary IPs the value for "IPV6ADDR_SECONDARIES" is set. On SUSE based distributions the names "IPADDR6" and "IPADDR6_$SOMELABEL" need to be used. --- cloudinit/net/sysconfig.py | 3 +++ tests/unittests/test_distros/test_netconfig.py | 2 ++ tests/unittests/test_net.py | 14 ++++++++++++++ 3 files changed, 19 insertions(+) diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index e59753d5..09983929 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -386,10 +386,13 @@ class Renderer(renderer.Renderer): ipv6_cidr = "%s/%s" % (subnet['address'], subnet['prefix']) if ipv6_index == 0: iface_cfg['IPV6ADDR'] = ipv6_cidr + iface_cfg['IPADDR6'] = ipv6_cidr elif ipv6_index == 1: iface_cfg['IPV6ADDR_SECONDARIES'] = ipv6_cidr + iface_cfg['IPADDR6_0'] = ipv6_cidr else: iface_cfg['IPV6ADDR_SECONDARIES'] += " " + ipv6_cidr + iface_cfg['IPADDR6_%d' % ipv6_index] = ipv6_cidr else: ipv4_index = ipv4_index + 1 suff = "" if ipv4_index == 0 else str(ipv4_index) diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index e4530408..c3c0c8c5 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -496,6 +496,7 @@ class TestNetCfgDistroRedhat(TestNetCfgDistroBase): BOOTPROTO=none DEFROUTE=yes DEVICE=eth0 + IPADDR6=2607:f0d0:1002:0011::2/64 IPV6ADDR=2607:f0d0:1002:0011::2/64 IPV6INIT=yes IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 @@ -588,6 +589,7 @@ class TestNetCfgDistroOpensuse(TestNetCfgDistroBase): BOOTPROTO=none DEFROUTE=yes DEVICE=eth0 + IPADDR6=2607:f0d0:1002:0011::2/64 IPV6ADDR=2607:f0d0:1002:0011::2/64 IPV6INIT=yes IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 1b415b00..fd03deb6 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -691,6 +691,9 @@ DEVICE=eth0 GATEWAY=172.19.3.254 HWADDR=fa:16:3e:ed:9a:59 IPADDR=172.19.1.34 +IPADDR6=2001:DB8::10/64 +IPADDR6_0=2001:DB9::10/64 +IPADDR6_2=2001:DB10::10/64 IPV6ADDR=2001:DB8::10/64 IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64" IPV6INIT=yes @@ -729,6 +732,9 @@ DEVICE=eth0 GATEWAY=172.19.3.254 HWADDR=fa:16:3e:ed:9a:59 IPADDR=172.19.1.34 +IPADDR6=2001:DB8::10/64 +IPADDR6_0=2001:DB9::10/64 +IPADDR6_2=2001:DB10::10/64 IPV6ADDR=2001:DB8::10/64 IPV6ADDR_SECONDARIES="2001:DB9::10/64 2001:DB10::10/64" IPV6INIT=yes @@ -980,6 +986,7 @@ NETWORK_CONFIGS = { BOOTPROTO=none DEVICE=iface0 IPADDR=192.168.14.2 + IPADDR6=2001:1::1/64 IPV6ADDR=2001:1::1/64 IPV6INIT=yes NETMASK=255.255.255.0 @@ -1249,6 +1256,7 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true DEFROUTE=yes DEVICE=br0 IPADDR=192.168.14.2 + IPADDR6=2001:1::1/64 IPV6ADDR=2001:1::1/64 IPV6INIT=yes IPV6_DEFAULTGW=2001:4800:78ff:1b::1 @@ -1690,6 +1698,7 @@ iface bond0 inet6 static MACADDR=aa:bb:cc:dd:e8:ff IPADDR=192.168.0.2 IPADDR1=192.168.1.2 + IPADDR6=2001:1::1/92 IPV6ADDR=2001:1::1/92 IPV6INIT=yes MTU=9000 @@ -1745,6 +1754,7 @@ iface bond0 inet6 static MACADDR=aa:bb:cc:dd:e8:ff IPADDR=192.168.0.2 IPADDR1=192.168.1.2 + IPADDR6=2001:1::1/92 IPV6ADDR=2001:1::1/92 IPV6INIT=yes MTU=9000 @@ -1835,6 +1845,7 @@ iface bond0 inet6 static GATEWAY=192.168.1.1 IPADDR=192.168.2.2 IPADDR1=192.168.1.2 + IPADDR6=2001:1::bbbb/96 IPV6ADDR=2001:1::bbbb/96 IPV6INIT=yes IPV6_DEFAULTGW=2001:1::1 @@ -1896,6 +1907,7 @@ iface bond0 inet6 static BRIDGE=br0 DEVICE=eth0 HWADDR=52:54:00:12:34:00 + IPADDR6=2001:1::100/96 IPV6ADDR=2001:1::100/96 IPV6INIT=yes NM_CONTROLLED=no @@ -1909,6 +1921,7 @@ iface bond0 inet6 static BRIDGE=br0 DEVICE=eth1 HWADDR=52:54:00:12:34:01 + IPADDR6=2001:1::101/96 IPV6ADDR=2001:1::101/96 IPV6INIT=yes NM_CONTROLLED=no @@ -2743,6 +2756,7 @@ USERCTL=no GATEWAY=192.168.42.1 HWADDR=52:54:00:ab:cd:ef IPADDR=192.168.42.100 + IPADDR6=2001:db8::100/32 IPV6ADDR=2001:db8::100/32 IPV6INIT=yes IPV6_DEFAULTGW=2001:db8::1 -- cgit v1.2.3 From 0dc3a77f41f4544e4cb5a41637af7693410d4cdf Mon Sep 17 00:00:00 2001 From: "Jason Zions (MSFT)" Date: Tue, 26 Mar 2019 18:53:50 +0000 Subject: Azure: Ensure platform random_seed is always serializable as JSON. The Azure platform surfaces random bytes into /sys via Hyper-V. Python 2.7 json.dump() raises an exception if asked to convert a str with non-character content, and python 3.0 json.dump() won't serialize a "bytes" value. As a result, c-i instance data is often not written by Azure, making reboots slower (c-i has to repeat work). The random data is base64-encoded and then decoded into a string (str or unicode depending on the version of Python in use). The base64 string has just as many bits of entropy, so we're not throwing away useful "information", but we can be certain json.dump() will correctly serialize the bits. --- cloudinit/sources/DataSourceAzure.py | 24 +++++++++++++++++++----- tests/data/azure/non_unicode_random_string | 1 + tests/unittests/test_datasource/test_azure.py | 24 ++++++++++++++++++++++-- 3 files changed, 42 insertions(+), 7 deletions(-) create mode 100644 tests/data/azure/non_unicode_random_string diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index eccbee5a..b4e3f061 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -54,6 +54,7 @@ REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready" AGENT_SEED_DIR = '/var/lib/waagent' IMDS_URL = "http://169.254.169.254/metadata/" +PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0" # List of static scripts and network config artifacts created by # stock ubuntu suported images. @@ -195,6 +196,8 @@ if util.is_FreeBSD(): RESOURCE_DISK_PATH = "/dev/" + res_disk else: LOG.debug("resource disk is None") + # TODO Find where platform entropy data is surfaced + PLATFORM_ENTROPY_SOURCE = None BUILTIN_DS_CONFIG = { 'agent_command': AGENT_START_BUILTIN, @@ -1100,16 +1103,27 @@ def _check_freebsd_cdrom(cdrom_dev): return False -def _get_random_seed(): +def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE): """Return content random seed file if available, otherwise, return None.""" # azure / hyper-v provides random data here - # TODO. find the seed on FreeBSD platform # now update ds_cfg to reflect contents pass in config - if util.is_FreeBSD(): + if source is None: return None - return util.load_file("/sys/firmware/acpi/tables/OEM0", - quiet=True, decode=False) + seed = util.load_file(source, quiet=True, decode=False) + + # The seed generally contains non-Unicode characters. load_file puts + # them into a str (in python 2) or bytes (in python 3). In python 2, + # bad octets in a str cause util.json_dumps() to throw an exception. In + # python 3, bytes is a non-serializable type, and the handler load_file + # uses applies b64 encoding *again* to handle it. The simplest solution + # is to just b64encode the data and then decode it to a serializable + # string. Same number of bits of entropy, just with 25% more zeroes. + # There's no need to undo this base64-encoding when the random seed is + # actually used in cc_seed_random.py. + seed = base64.b64encode(seed).decode() + + return seed def list_possible_azure_ds_devs(): diff --git a/tests/data/azure/non_unicode_random_string b/tests/data/azure/non_unicode_random_string new file mode 100644 index 00000000..b9ecefb9 --- /dev/null +++ b/tests/data/azure/non_unicode_random_string @@ -0,0 +1 @@ +OEM0d\x00\x00\x00\x01\x80VRTUALMICROSFT\x02\x17\x00\x06MSFT\x97\x00\x00\x00C\xb4{V\xf4X%\x061x\x90\x1c\xfen\x86\xbf~\xf5\x8c\x94&\x88\xed\x84\xf9B\xbd\xd3\xf1\xdb\xee:\xd9\x0fc\x0e\x83(\xbd\xe3'\xfc\x85,\xdf\xf4\x13\x99N\xc5\xf3Y\x1e\xe3\x0b\xa4H\x08J\xb9\xdcdb$ \ No newline at end of file diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 6b05b8f1..53c56cd0 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -7,11 +7,11 @@ from cloudinit.sources import ( UNSET, DataSourceAzure as dsaz, InvalidMetaDataException) from cloudinit.util import (b64e, decode_binary, load_file, write_file, find_freebsd_part, get_path_dev_freebsd, - MountFailedError) + MountFailedError, json_dumps, load_json) from cloudinit.version import version_string as vs from cloudinit.tests.helpers import ( HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, - ExitStack) + ExitStack, resourceLocation) import crypt import httpretty @@ -1923,4 +1923,24 @@ class TestWBIsPlatformViable(CiTestCase): self.logs.getvalue()) +class TestRandomSeed(CiTestCase): + """Test proper handling of random_seed""" + + def test_non_ascii_seed_is_serializable(self): + """Pass if a random string from the Azure infrastructure which + contains at least one non-Unicode character can be converted to/from + JSON without alteration and without throwing an exception. + """ + path = resourceLocation("azure/non_unicode_random_string") + result = dsaz._get_random_seed(path) + + obj = {'seed': result} + try: + serialized = json_dumps(obj) + deserialized = load_json(serialized) + except UnicodeDecodeError: + self.fail("Non-serializable random seed returned") + + self.assertEqual(deserialized['seed'], result) + # vi: ts=4 expandtab -- cgit v1.2.3 From 47c53002ea7a661c674c3e409357db7e8a00297a Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Mon, 1 Apr 2019 14:24:26 +0000 Subject: cloud_tests: fix apt_pipelining test-cases The apt_pipelining test-cases were broken but until cloud-init changed it's default behavior to not disable, these silently passed as both only ever checked if pipelinging was disabled. First, the tests used the 'apt' namespace, which is not for configuring pipelining, rather that requires 'apt_pipelining' as the namespace. Second, the 'os' variant needs to check that cloud-init does not write a configuration file; it was a copy-and-paste error from the disable test-case. This branch fixes the config and collection to validate both scenarios. --- tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml | 3 +-- tests/cloud_tests/testcases/modules/apt_pipelining_os.py | 6 +++--- tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml | 9 ++++----- 3 files changed, 8 insertions(+), 10 deletions(-) diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml b/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml index bd9b5d08..22a31dc4 100644 --- a/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml +++ b/tests/cloud_tests/testcases/modules/apt_pipelining_disable.yaml @@ -5,8 +5,7 @@ required_features: - apt cloud_config: | #cloud-config - apt: - apt_pipelining: false + apt_pipelining: false collect_scripts: 90cloud-init-pipelining: | #!/bin/bash diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_os.py b/tests/cloud_tests/testcases/modules/apt_pipelining_os.py index 740dc7c0..2b940a66 100644 --- a/tests/cloud_tests/testcases/modules/apt_pipelining_os.py +++ b/tests/cloud_tests/testcases/modules/apt_pipelining_os.py @@ -8,8 +8,8 @@ class TestAptPipeliningOS(base.CloudTestCase): """Test apt-pipelining module.""" def test_os_pipelining(self): - """Test pipelining set to os.""" - out = self.get_data_file('90cloud-init-pipelining') - self.assertIn('Acquire::http::Pipeline-Depth "0";', out) + """test 'os' settings does not write apt config file.""" + out = self.get_data_file('90cloud-init-pipelining_not_written') + self.assertEqual(0, int(out)) # vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml b/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml index cbed3ba3..86d5220b 100644 --- a/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml +++ b/tests/cloud_tests/testcases/modules/apt_pipelining_os.yaml @@ -1,15 +1,14 @@ # -# Set apt pipelining value to OS +# Set apt pipelining value to OS, no conf written # required_features: - apt cloud_config: | #cloud-config - apt: - apt_pipelining: os + apt_pipelining: os collect_scripts: - 90cloud-init-pipelining: | + 90cloud-init-pipelining_not_written: | #!/bin/bash - cat /etc/apt/apt.conf.d/90cloud-init-pipelining + ls /etc/apt/apt.conf.d/90cloud-init-pipelining | wc -l # vi: ts=4 expandtab -- cgit v1.2.3 From 0d8c88393b51db6454491a379dcc2e691551217a Mon Sep 17 00:00:00 2001 From: Anh Vo Date: Wed, 3 Apr 2019 18:23:18 +0000 Subject: DatasourceAzure: add additional logging for azure datasource Create an Azure logging decorator and use additional ReportEventStack context managers to provide additional logging details. --- cloudinit/sources/DataSourceAzure.py | 231 ++++++++++++++++++++++------------- cloudinit/sources/helpers/azure.py | 31 +++++ 2 files changed, 179 insertions(+), 83 deletions(-) mode change 100644 => 100755 cloudinit/sources/DataSourceAzure.py mode change 100644 => 100755 cloudinit/sources/helpers/azure.py diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py old mode 100644 new mode 100755 index b4e3f061..d4230b3c --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -21,10 +21,14 @@ from cloudinit import net from cloudinit.event import EventType from cloudinit.net.dhcp import EphemeralDHCPv4 from cloudinit import sources -from cloudinit.sources.helpers.azure import get_metadata_from_fabric from cloudinit.sources.helpers import netlink from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc from cloudinit import util +from cloudinit.reporting import events + +from cloudinit.sources.helpers.azure import (azure_ds_reporter, + azure_ds_telemetry_reporter, + get_metadata_from_fabric) LOG = logging.getLogger(__name__) @@ -244,6 +248,7 @@ def set_hostname(hostname, hostname_command='hostname'): util.subp([hostname_command, hostname]) +@azure_ds_telemetry_reporter @contextlib.contextmanager def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): """ @@ -290,6 +295,7 @@ class DataSourceAzure(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s]" % (root, self.seed) + @azure_ds_telemetry_reporter def bounce_network_with_azure_hostname(self): # When using cloud-init to provision, we have to set the hostname from # the metadata and "bounce" the network to force DDNS to update via @@ -315,6 +321,7 @@ class DataSourceAzure(sources.DataSource): util.logexc(LOG, "handling set_hostname failed") return False + @azure_ds_telemetry_reporter def get_metadata_from_agent(self): temp_hostname = self.metadata.get('local-hostname') agent_cmd = self.ds_cfg['agent_command'] @@ -344,15 +351,18 @@ class DataSourceAzure(sources.DataSource): LOG.debug("ssh authentication: " "using fingerprint from fabirc") - # wait very long for public SSH keys to arrive - # https://bugs.launchpad.net/cloud-init/+bug/1717611 - missing = util.log_time(logfunc=LOG.debug, - msg="waiting for SSH public key files", - func=util.wait_for_files, - args=(fp_files, 900)) - - if len(missing): - LOG.warning("Did not find files, but going on: %s", missing) + with events.ReportEventStack( + name="waiting-for-ssh-public-key", + description="wait for agents to retrieve ssh keys", + parent=azure_ds_reporter): + # wait very long for public SSH keys to arrive + # https://bugs.launchpad.net/cloud-init/+bug/1717611 + missing = util.log_time(logfunc=LOG.debug, + msg="waiting for SSH public key files", + func=util.wait_for_files, + args=(fp_files, 900)) + if len(missing): + LOG.warning("Did not find files, but going on: %s", missing) metadata = {} metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) @@ -366,6 +376,7 @@ class DataSourceAzure(sources.DataSource): subplatform_type = 'seed-dir' return '%s (%s)' % (subplatform_type, self.seed) + @azure_ds_telemetry_reporter def crawl_metadata(self): """Walk all instance metadata sources returning a dict on success. @@ -467,6 +478,7 @@ class DataSourceAzure(sources.DataSource): super(DataSourceAzure, self).clear_cached_attrs(attr_defaults) self._metadata_imds = sources.UNSET + @azure_ds_telemetry_reporter def _get_data(self): """Crawl and process datasource metadata caching metadata as attrs. @@ -513,6 +525,7 @@ class DataSourceAzure(sources.DataSource): # quickly (local check only) if self.instance_id is still valid return sources.instance_id_matches_system_uuid(self.get_instance_id()) + @azure_ds_telemetry_reporter def setup(self, is_new_instance): if self._negotiated is False: LOG.debug("negotiating for %s (new_instance=%s)", @@ -580,6 +593,7 @@ class DataSourceAzure(sources.DataSource): if nl_sock: nl_sock.close() + @azure_ds_telemetry_reporter def _report_ready(self, lease): """Tells the fabric provisioning has completed """ try: @@ -617,9 +631,14 @@ class DataSourceAzure(sources.DataSource): def _reprovision(self): """Initiate the reprovisioning workflow.""" contents = self._poll_imds() - md, ud, cfg = read_azure_ovf(contents) - return (md, ud, cfg, {'ovf-env.xml': contents}) - + with events.ReportEventStack( + name="reprovisioning-read-azure-ovf", + description="read azure ovf during reprovisioning", + parent=azure_ds_reporter): + md, ud, cfg = read_azure_ovf(contents) + return (md, ud, cfg, {'ovf-env.xml': contents}) + + @azure_ds_telemetry_reporter def _negotiate(self): """Negotiate with fabric and return data from it. @@ -652,6 +671,7 @@ class DataSourceAzure(sources.DataSource): util.del_file(REPROVISION_MARKER_FILE) return fabric_data + @azure_ds_telemetry_reporter def activate(self, cfg, is_new_instance): address_ephemeral_resize(is_new_instance=is_new_instance, preserve_ntfs=self.ds_cfg.get( @@ -690,12 +710,14 @@ def _partitions_on_device(devpath, maxnum=16): return [] +@azure_ds_telemetry_reporter def _has_ntfs_filesystem(devpath): ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True) LOG.debug('ntfs_devices found = %s', ntfs_devices) return os.path.realpath(devpath) in ntfs_devices +@azure_ds_telemetry_reporter def can_dev_be_reformatted(devpath, preserve_ntfs): """Determine if the ephemeral drive at devpath should be reformatted. @@ -744,43 +766,59 @@ def can_dev_be_reformatted(devpath, preserve_ntfs): (cand_part, cand_path, devpath)) return False, msg + @azure_ds_telemetry_reporter def count_files(mp): ignored = set(['dataloss_warning_readme.txt']) return len([f for f in os.listdir(mp) if f.lower() not in ignored]) bmsg = ('partition %s (%s) on device %s was ntfs formatted' % (cand_part, cand_path, devpath)) - try: - file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", - update_env_for_mount={'LANG': 'C'}) - except util.MountFailedError as e: - if "unknown filesystem type 'ntfs'" in str(e): - return True, (bmsg + ' but this system cannot mount NTFS,' - ' assuming there are no important files.' - ' Formatting allowed.') - return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) - - if file_count != 0: - LOG.warning("it looks like you're using NTFS on the ephemeral disk, " - 'to ensure that filesystem does not get wiped, set ' - '%s.%s in config', '.'.join(DS_CFG_PATH), - DS_CFG_KEY_PRESERVE_NTFS) - return False, bmsg + ' but had %d files on it.' % file_count + + with events.ReportEventStack( + name="mount-ntfs-and-count", + description="mount-ntfs-and-count", + parent=azure_ds_reporter) as evt: + try: + file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", + update_env_for_mount={'LANG': 'C'}) + except util.MountFailedError as e: + evt.description = "cannot mount ntfs" + if "unknown filesystem type 'ntfs'" in str(e): + return True, (bmsg + ' but this system cannot mount NTFS,' + ' assuming there are no important files.' + ' Formatting allowed.') + return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e) + + if file_count != 0: + evt.description = "mounted and counted %d files" % file_count + LOG.warning("it looks like you're using NTFS on the ephemeral" + " disk, to ensure that filesystem does not get wiped," + " set %s.%s in config", '.'.join(DS_CFG_PATH), + DS_CFG_KEY_PRESERVE_NTFS) + return False, bmsg + ' but had %d files on it.' % file_count return True, bmsg + ' and had no important files. Safe for reformatting.' +@azure_ds_telemetry_reporter def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, is_new_instance=False, preserve_ntfs=False): # wait for ephemeral disk to come up naplen = .2 - missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen, - log_pre="Azure ephemeral disk: ") - - if missing: - LOG.warning("ephemeral device '%s' did not appear after %d seconds.", - devpath, maxwait) - return + with events.ReportEventStack( + name="wait-for-ephemeral-disk", + description="wait for ephemeral disk", + parent=azure_ds_reporter): + missing = util.wait_for_files([devpath], + maxwait=maxwait, + naplen=naplen, + log_pre="Azure ephemeral disk: ") + + if missing: + LOG.warning("ephemeral device '%s' did" + " not appear after %d seconds.", + devpath, maxwait) + return result = False msg = None @@ -808,6 +846,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, return +@azure_ds_telemetry_reporter def perform_hostname_bounce(hostname, cfg, prev_hostname): # set the hostname to 'hostname' if it is not already set to that. # then, if policy is not off, bounce the interface using command @@ -843,6 +882,7 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname): return True +@azure_ds_telemetry_reporter def crtfile_to_pubkey(fname, data=None): pipeline = ('openssl x509 -noout -pubkey < "$0" |' 'ssh-keygen -i -m PKCS8 -f /dev/stdin') @@ -851,6 +891,7 @@ def crtfile_to_pubkey(fname, data=None): return out.rstrip() +@azure_ds_telemetry_reporter def pubkeys_from_crt_files(flist): pubkeys = [] errors = [] @@ -866,6 +907,7 @@ def pubkeys_from_crt_files(flist): return pubkeys +@azure_ds_telemetry_reporter def write_files(datadir, files, dirmode=None): def _redact_password(cnt, fname): @@ -893,6 +935,7 @@ def write_files(datadir, files, dirmode=None): util.write_file(filename=fname, content=content, mode=0o600) +@azure_ds_telemetry_reporter def invoke_agent(cmd): # this is a function itself to simplify patching it for test if cmd: @@ -912,6 +955,7 @@ def find_child(node, filter_func): return ret +@azure_ds_telemetry_reporter def load_azure_ovf_pubkeys(sshnode): # This parses a 'SSH' node formatted like below, and returns # an array of dicts. @@ -964,6 +1008,7 @@ def load_azure_ovf_pubkeys(sshnode): return found +@azure_ds_telemetry_reporter def read_azure_ovf(contents): try: dom = minidom.parseString(contents) @@ -1064,6 +1109,7 @@ def read_azure_ovf(contents): return (md, ud, cfg) +@azure_ds_telemetry_reporter def _extract_preprovisioned_vm_setting(dom): """Read the preprovision flag from the ovf. It should not exist unless true.""" @@ -1092,6 +1138,7 @@ def encrypt_pass(password, salt_id="$6$"): return crypt.crypt(password, salt_id + util.rand_str(strlen=16)) +@azure_ds_telemetry_reporter def _check_freebsd_cdrom(cdrom_dev): """Return boolean indicating path to cdrom device has content.""" try: @@ -1103,6 +1150,7 @@ def _check_freebsd_cdrom(cdrom_dev): return False +@azure_ds_telemetry_reporter def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE): """Return content random seed file if available, otherwise, return None.""" @@ -1126,6 +1174,7 @@ def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE): return seed +@azure_ds_telemetry_reporter def list_possible_azure_ds_devs(): devlist = [] if util.is_FreeBSD(): @@ -1140,6 +1189,7 @@ def list_possible_azure_ds_devs(): return devlist +@azure_ds_telemetry_reporter def load_azure_ds_dir(source_dir): ovf_file = os.path.join(source_dir, "ovf-env.xml") @@ -1162,47 +1212,54 @@ def parse_network_config(imds_metadata): @param: imds_metadata: Dict of content read from IMDS network service. @return: Dictionary containing network version 2 standard configuration. """ - if imds_metadata != sources.UNSET and imds_metadata: - netconfig = {'version': 2, 'ethernets': {}} - LOG.debug('Azure: generating network configuration from IMDS') - network_metadata = imds_metadata['network'] - for idx, intf in enumerate(network_metadata['interface']): - nicname = 'eth{idx}'.format(idx=idx) - dev_config = {} - for addr4 in intf['ipv4']['ipAddress']: - privateIpv4 = addr4['privateIpAddress'] - if privateIpv4: - if dev_config.get('dhcp4', False): - # Append static address config for nic > 1 - netPrefix = intf['ipv4']['subnet'][0].get( - 'prefix', '24') - if not dev_config.get('addresses'): - dev_config['addresses'] = [] - dev_config['addresses'].append( - '{ip}/{prefix}'.format( - ip=privateIpv4, prefix=netPrefix)) - else: - dev_config['dhcp4'] = True - for addr6 in intf['ipv6']['ipAddress']: - privateIpv6 = addr6['privateIpAddress'] - if privateIpv6: - dev_config['dhcp6'] = True - break - if dev_config: - mac = ':'.join(re.findall(r'..', intf['macAddress'])) - dev_config.update( - {'match': {'macaddress': mac.lower()}, - 'set-name': nicname}) - netconfig['ethernets'][nicname] = dev_config - else: - blacklist = ['mlx4_core'] - LOG.debug('Azure: generating fallback configuration') - # generate a network config, blacklist picking mlx4_core devs - netconfig = net.generate_fallback_config( - blacklist_drivers=blacklist, config_driver=True) - return netconfig + with events.ReportEventStack( + name="parse_network_config", + description="", + parent=azure_ds_reporter) as evt: + if imds_metadata != sources.UNSET and imds_metadata: + netconfig = {'version': 2, 'ethernets': {}} + LOG.debug('Azure: generating network configuration from IMDS') + network_metadata = imds_metadata['network'] + for idx, intf in enumerate(network_metadata['interface']): + nicname = 'eth{idx}'.format(idx=idx) + dev_config = {} + for addr4 in intf['ipv4']['ipAddress']: + privateIpv4 = addr4['privateIpAddress'] + if privateIpv4: + if dev_config.get('dhcp4', False): + # Append static address config for nic > 1 + netPrefix = intf['ipv4']['subnet'][0].get( + 'prefix', '24') + if not dev_config.get('addresses'): + dev_config['addresses'] = [] + dev_config['addresses'].append( + '{ip}/{prefix}'.format( + ip=privateIpv4, prefix=netPrefix)) + else: + dev_config['dhcp4'] = True + for addr6 in intf['ipv6']['ipAddress']: + privateIpv6 = addr6['privateIpAddress'] + if privateIpv6: + dev_config['dhcp6'] = True + break + if dev_config: + mac = ':'.join(re.findall(r'..', intf['macAddress'])) + dev_config.update( + {'match': {'macaddress': mac.lower()}, + 'set-name': nicname}) + netconfig['ethernets'][nicname] = dev_config + evt.description = "network config from imds" + else: + blacklist = ['mlx4_core'] + LOG.debug('Azure: generating fallback configuration') + # generate a network config, blacklist picking mlx4_core devs + netconfig = net.generate_fallback_config( + blacklist_drivers=blacklist, config_driver=True) + evt.description = "network config from fallback" + return netconfig +@azure_ds_telemetry_reporter def get_metadata_from_imds(fallback_nic, retries): """Query Azure's network metadata service, returning a dictionary. @@ -1227,6 +1284,7 @@ def get_metadata_from_imds(fallback_nic, retries): return util.log_time(**kwargs) +@azure_ds_telemetry_reporter def _get_metadata_from_imds(retries): url = IMDS_URL + "instance?api-version=2017-12-01" @@ -1246,6 +1304,7 @@ def _get_metadata_from_imds(retries): return {} +@azure_ds_telemetry_reporter def maybe_remove_ubuntu_network_config_scripts(paths=None): """Remove Azure-specific ubuntu network config for non-primary nics. @@ -1283,14 +1342,20 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None): def _is_platform_viable(seed_dir): - """Check platform environment to report if this datasource may run.""" - asset_tag = util.read_dmi_data('chassis-asset-tag') - if asset_tag == AZURE_CHASSIS_ASSET_TAG: - return True - LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) - if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')): - return True - return False + with events.ReportEventStack( + name="check-platform-viability", + description="found azure asset tag", + parent=azure_ds_reporter) as evt: + + """Check platform environment to report if this datasource may run.""" + asset_tag = util.read_dmi_data('chassis-asset-tag') + if asset_tag == AZURE_CHASSIS_ASSET_TAG: + return True + LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) + evt.description = "Non-Azure DMI asset tag '%s' discovered.", asset_tag + if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')): + return True + return False class BrokenAzureDataSource(Exception): diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py old mode 100644 new mode 100755 index 2829dd20..d3af05ee --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -16,10 +16,27 @@ from xml.etree import ElementTree from cloudinit import url_helper from cloudinit import util +from cloudinit.reporting import events LOG = logging.getLogger(__name__) +azure_ds_reporter = events.ReportEventStack( + name="azure-ds", + description="initialize reporter for azure ds", + reporting_enabled=True) + + +def azure_ds_telemetry_reporter(func): + def impl(*args, **kwargs): + with events.ReportEventStack( + name=func.__name__, + description=func.__name__, + parent=azure_ds_reporter): + return func(*args, **kwargs) + return impl + + @contextmanager def cd(newdir): prevdir = os.getcwd() @@ -119,6 +136,7 @@ class OpenSSLManager(object): def clean_up(self): util.del_dir(self.tmpdir) + @azure_ds_telemetry_reporter def generate_certificate(self): LOG.debug('Generating certificate for communication with fabric...') if self.certificate is not None: @@ -139,17 +157,20 @@ class OpenSSLManager(object): LOG.debug('New certificate generated.') @staticmethod + @azure_ds_telemetry_reporter def _run_x509_action(action, cert): cmd = ['openssl', 'x509', '-noout', action] result, _ = util.subp(cmd, data=cert) return result + @azure_ds_telemetry_reporter def _get_ssh_key_from_cert(self, certificate): pub_key = self._run_x509_action('-pubkey', certificate) keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin'] ssh_key, _ = util.subp(keygen_cmd, data=pub_key) return ssh_key + @azure_ds_telemetry_reporter def _get_fingerprint_from_cert(self, certificate): """openssl x509 formats fingerprints as so: 'SHA1 Fingerprint=07:3E:19:D1:4D:1C:79:92:24:C6:A0:FD:8D:DA:\ @@ -163,6 +184,7 @@ class OpenSSLManager(object): octets = raw_fp[eq+1:-1].split(':') return ''.join(octets) + @azure_ds_telemetry_reporter def _decrypt_certs_from_xml(self, certificates_xml): """Decrypt the certificates XML document using the our private key; return the list of certs and private keys contained in the doc. @@ -185,6 +207,7 @@ class OpenSSLManager(object): shell=True, data=b'\n'.join(lines)) return out + @azure_ds_telemetry_reporter def parse_certificates(self, certificates_xml): """Given the Certificates XML document, return a dictionary of fingerprints and associated SSH keys derived from the certs.""" @@ -265,11 +288,13 @@ class WALinuxAgentShim(object): return socket.inet_ntoa(packed_bytes) @staticmethod + @azure_ds_telemetry_reporter def _networkd_get_value_from_leases(leases_d=None): return dhcp.networkd_get_option_from_leases( 'OPTION_245', leases_d=leases_d) @staticmethod + @azure_ds_telemetry_reporter def _get_value_from_leases_file(fallback_lease_file): leases = [] content = util.load_file(fallback_lease_file) @@ -287,6 +312,7 @@ class WALinuxAgentShim(object): return leases[-1] @staticmethod + @azure_ds_telemetry_reporter def _load_dhclient_json(): dhcp_options = {} hooks_dir = WALinuxAgentShim._get_hooks_dir() @@ -305,6 +331,7 @@ class WALinuxAgentShim(object): return dhcp_options @staticmethod + @azure_ds_telemetry_reporter def _get_value_from_dhcpoptions(dhcp_options): if dhcp_options is None: return None @@ -318,6 +345,7 @@ class WALinuxAgentShim(object): return _value @staticmethod + @azure_ds_telemetry_reporter def find_endpoint(fallback_lease_file=None, dhcp245=None): value = None if dhcp245 is not None: @@ -352,6 +380,7 @@ class WALinuxAgentShim(object): LOG.debug('Azure endpoint found at %s', endpoint_ip_address) return endpoint_ip_address + @azure_ds_telemetry_reporter def register_with_azure_and_fetch_data(self, pubkey_info=None): if self.openssl_manager is None: self.openssl_manager = OpenSSLManager() @@ -404,6 +433,7 @@ class WALinuxAgentShim(object): return keys + @azure_ds_telemetry_reporter def _report_ready(self, goal_state, http_client): LOG.debug('Reporting ready to Azure fabric.') document = self.REPORT_READY_XML_TEMPLATE.format( @@ -419,6 +449,7 @@ class WALinuxAgentShim(object): LOG.info('Reported ready to Azure fabric.') +@azure_ds_telemetry_reporter def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, pubkey_info=None): shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, -- cgit v1.2.3 From 528366820bb48c13957d0c58afc2a46a3ba84bef Mon Sep 17 00:00:00 2001 From: "Jason Zions (MSFT)" Date: Wed, 3 Apr 2019 22:23:29 +0000 Subject: Azure: Treat _unset network configuration as if it were absent When the Azure datasource persists all of its metadata to the instance directory, it deliberately sets the self.network_config value to be the sources.UNSET value. The goal is to ensure that each time the system boots, fresh network configuration data is fetched from the cloud platform so that any control plane changes will take effect. When a VM is first created, there's no pickled instance to restore, so self._network_config is None, resulting in self.network_config() properly building a new config. Azure suffered from LP: #1801364 which prevented ds from being stored in obj.pkl in the instance directory, so subsequent reboots always regenerated their network configuration. Commit 0dc3a77f41f4544e4cb5a41637af7693410d4cdf introduced a new bug in which self.network_config() assumed the self._network_config value was either None or trustable; when the config was unpickled, that value was _unset, thus breaking the assumption. LP: #1823084 --- cloudinit/sources/DataSourceAzure.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index d4230b3c..76b16616 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -688,7 +688,7 @@ class DataSourceAzure(sources.DataSource): 2. Generate a fallback network config that does not include any of the blacklisted devices. """ - if not self._network_config: + if not self._network_config or self._network_config == sources.UNSET: if self.ds_cfg.get('apply_network_config'): nc_src = self._metadata_imds else: -- cgit v1.2.3 From f247dd20ea73f8e153936bee50c57dae9440ecf7 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 4 Apr 2019 20:40:44 +0000 Subject: ubuntu_advantage: rewrite cloud-config module MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ubuntu-advantage-tools version 19 has a different command line interface. Update cloud-init's config module to accept new ubuntu_advantage configuration settings. * Underscores better than hyphens: deprecate 'ubuntu-advantage'   cloud-config key in favor of 'ubuntu_advantage' * Attach machines with either sso credentials of UA user_token * Services are enabled by name though an 'enable' list * Raise warnings if deprecated ubuntu-advantage config keys are   present, or errors if its config we cannott adapt to Ubuntu Advantage support can now be configured via #cloud-config with the following yaml: ubuntu_advantage:   token: 'thisismyubuntuadvantagetoken'   enable: [esm, fips, livepatch] Co-Authored-By: Daniel Watkins --- cloudinit/config/cc_ubuntu_advantage.py | 225 +++++++-------- cloudinit/config/tests/test_ubuntu_advantage.py | 347 +++++++++++++----------- 2 files changed, 307 insertions(+), 265 deletions(-) diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py index 5e082bd6..f4881233 100644 --- a/cloudinit/config/cc_ubuntu_advantage.py +++ b/cloudinit/config/cc_ubuntu_advantage.py @@ -1,150 +1,143 @@ -# Copyright (C) 2018 Canonical Ltd. -# # This file is part of cloud-init. See LICENSE file for license information. -"""Ubuntu advantage: manage ubuntu-advantage offerings from Canonical.""" +"""ubuntu_advantage: Configure Ubuntu Advantage support services""" -import sys from textwrap import dedent -from cloudinit import log as logging +import six + from cloudinit.config.schema import ( get_schema_doc, validate_cloudconfig_schema) +from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE -from cloudinit.subp import prepend_base_command from cloudinit import util -distros = ['ubuntu'] -frequency = PER_INSTANCE +UA_URL = 'https://ubuntu.com/advantage' -LOG = logging.getLogger(__name__) +distros = ['ubuntu'] schema = { 'id': 'cc_ubuntu_advantage', 'name': 'Ubuntu Advantage', - 'title': 'Install, configure and manage ubuntu-advantage offerings', + 'title': 'Configure Ubuntu Advantage support services', 'description': dedent("""\ - This module provides configuration options to setup ubuntu-advantage - subscriptions. - - .. note:: - Both ``commands`` value can be either a dictionary or a list. If - the configuration provided is a dictionary, the keys are only used - to order the execution of the commands and the dictionary is - merged with any vendor-data ubuntu-advantage configuration - provided. If a ``commands`` is provided as a list, any vendor-data - ubuntu-advantage ``commands`` are ignored. - - Ubuntu-advantage ``commands`` is a dictionary or list of - ubuntu-advantage commands to run on the deployed machine. - These commands can be used to enable or disable subscriptions to - various ubuntu-advantage products. See 'man ubuntu-advantage' for more - information on supported subcommands. - - .. note:: - Each command item can be a string or list. If the item is a list, - 'ubuntu-advantage' can be omitted and it will automatically be - inserted as part of the command. + Attach machine to an existing Ubuntu Advantage support contract and + enable or disable support services such as Livepatch, ESM, + FIPS and FIPS Updates. When attaching a machine to Ubuntu Advantage, + one can also specify services to enable. When the 'enable' + list is present, any named service will be enabled and all absent + services will remain disabled. + + Note that when enabling FIPS or FIPS updates you will need to schedule + a reboot to ensure the machine is running the FIPS-compliant kernel. + See :ref:`Power State Change` for information on how to configure + cloud-init to perform this reboot. """), 'distros': distros, 'examples': [dedent("""\ - # Enable Extended Security Maintenance using your service auth token + # Attach the machine to a Ubuntu Advantage support contract with a + # UA contract token obtained from %s. + ubuntu_advantage: + token: + """ % UA_URL), dedent("""\ + # Attach the machine to an Ubuntu Advantage support contract enabling + # only fips and esm services. Services will only be enabled if + # the environment supports said service. Otherwise warnings will + # be logged for incompatible services specified. ubuntu-advantage: - commands: - 00: ubuntu-advantage enable-esm + token: + enable: + - fips + - esm """), dedent("""\ - # Enable livepatch by providing your livepatch token + # Attach the machine to an Ubuntu Advantage support contract and enable + # the FIPS service. Perform a reboot once cloud-init has + # completed. + power_state: + mode: reboot ubuntu-advantage: - commands: - 00: ubuntu-advantage enable-livepatch - - """), dedent("""\ - # Convenience: the ubuntu-advantage command can be omitted when - # specifying commands as a list and 'ubuntu-advantage' will - # automatically be prepended. - # The following commands are equivalent - ubuntu-advantage: - commands: - 00: ['enable-livepatch', 'my-token'] - 01: ['ubuntu-advantage', 'enable-livepatch', 'my-token'] - 02: ubuntu-advantage enable-livepatch my-token - 03: 'ubuntu-advantage enable-livepatch my-token' - """)], + token: + enable: + - fips + """)], 'frequency': PER_INSTANCE, 'type': 'object', 'properties': { - 'ubuntu-advantage': { + 'ubuntu_advantage': { 'type': 'object', 'properties': { - 'commands': { - 'type': ['object', 'array'], # Array of strings or dict - 'items': { - 'oneOf': [ - {'type': 'array', 'items': {'type': 'string'}}, - {'type': 'string'}] - }, - 'additionalItems': False, # Reject non-string & non-list - 'minItems': 1, - 'minProperties': 1, + 'enable': { + 'type': 'array', + 'items': {'type': 'string'}, + }, + 'token': { + 'type': 'string', + 'description': ( + 'A contract token obtained from %s.' % UA_URL) } }, - 'additionalProperties': False, # Reject keys not in schema - 'required': ['commands'] + 'required': ['token'], + 'additionalProperties': False } } } -# TODO schema for 'assertions' and 'commands' are too permissive at the moment. -# Once python-jsonschema supports schema draft 6 add support for arbitrary -# object keys with 'patternProperties' constraint to validate string values. - __doc__ = get_schema_doc(schema) # Supplement python help() -UA_CMD = "ubuntu-advantage" - - -def run_commands(commands): - """Run the commands provided in ubuntu-advantage:commands config. +LOG = logging.getLogger(__name__) - Commands are run individually. Any errors are collected and reported - after attempting all commands. - @param commands: A list or dict containing commands to run. Keys of a - dict will be used to order the commands provided as dict values. - """ - if not commands: - return - LOG.debug('Running user-provided ubuntu-advantage commands') - if isinstance(commands, dict): - # Sort commands based on dictionary key - commands = [v for _, v in sorted(commands.items())] - elif not isinstance(commands, list): - raise TypeError( - 'commands parameter was not a list or dict: {commands}'.format( - commands=commands)) - - fixed_ua_commands = prepend_base_command('ubuntu-advantage', commands) - - cmd_failures = [] - for command in fixed_ua_commands: - shell = isinstance(command, str) - try: - util.subp(command, shell=shell, status_cb=sys.stderr.write) - except util.ProcessExecutionError as e: - cmd_failures.append(str(e)) - if cmd_failures: - msg = ( - 'Failures running ubuntu-advantage commands:\n' - '{cmd_failures}'.format( - cmd_failures=cmd_failures)) +def configure_ua(token=None, enable=None): + """Call ua commandline client to attach or enable services.""" + error = None + if not token: + error = ('ubuntu_advantage: token must be provided') + LOG.error(error) + raise RuntimeError(error) + + if enable is None: + enable = [] + elif isinstance(enable, six.string_types): + LOG.warning('ubuntu_advantage: enable should be a list, not' + ' a string; treating as a single enable') + enable = [enable] + elif not isinstance(enable, list): + LOG.warning('ubuntu_advantage: enable should be a list, not' + ' a %s; skipping enabling services', + type(enable).__name__) + enable = [] + + attach_cmd = ['ua', 'attach', token] + LOG.debug('Attaching to Ubuntu Advantage. %s', ' '.join(attach_cmd)) + try: + util.subp(attach_cmd) + except util.ProcessExecutionError as e: + msg = 'Failure attaching Ubuntu Advantage:\n{error}'.format( + error=str(e)) util.logexc(LOG, msg) raise RuntimeError(msg) + enable_errors = [] + for service in enable: + try: + cmd = ['ua', 'enable', service] + util.subp(cmd, capture=True) + except util.ProcessExecutionError as e: + enable_errors.append((service, e)) + if enable_errors: + for service, error in enable_errors: + msg = 'Failure enabling "{service}":\n{error}'.format( + service=service, error=str(error)) + util.logexc(LOG, msg) + raise RuntimeError( + 'Failure enabling Ubuntu Advantage service(s): {}'.format( + ', '.join('"{}"'.format(service) + for service, _ in enable_errors))) def maybe_install_ua_tools(cloud): """Install ubuntu-advantage-tools if not present.""" - if util.which('ubuntu-advantage'): + if util.which('ua'): return try: cloud.distro.update_package_sources() @@ -159,14 +152,28 @@ def maybe_install_ua_tools(cloud): def handle(name, cfg, cloud, log, args): - cfgin = cfg.get('ubuntu-advantage') - if cfgin is None: - LOG.debug(("Skipping module named %s," - " no 'ubuntu-advantage' key in configuration"), name) + ua_section = None + if 'ubuntu-advantage' in cfg: + LOG.warning('Deprecated configuration key "ubuntu-advantage" provided.' + ' Expected underscore delimited "ubuntu_advantage"; will' + ' attempt to continue.') + ua_section = cfg['ubuntu-advantage'] + if 'ubuntu_advantage' in cfg: + ua_section = cfg['ubuntu_advantage'] + if ua_section is None: + LOG.debug("Skipping module named %s," + " no 'ubuntu_advantage' configuration found", name) return - validate_cloudconfig_schema(cfg, schema) + if 'commands' in ua_section: + msg = ( + 'Deprecated configuration "ubuntu-advantage: commands" provided.' + ' Expected "token"') + LOG.error(msg) + raise RuntimeError(msg) + maybe_install_ua_tools(cloud) - run_commands(cfgin.get('commands', [])) + configure_ua(token=ua_section.get('token'), + enable=ua_section.get('enable')) # vi: ts=4 expandtab diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py index b7cf9bee..8c4161ef 100644 --- a/cloudinit/config/tests/test_ubuntu_advantage.py +++ b/cloudinit/config/tests/test_ubuntu_advantage.py @@ -1,10 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. -import re -from six import StringIO - from cloudinit.config.cc_ubuntu_advantage import ( - handle, maybe_install_ua_tools, run_commands, schema) + configure_ua, handle, maybe_install_ua_tools, schema) from cloudinit.config.schema import validate_cloudconfig_schema from cloudinit import util from cloudinit.tests.helpers import ( @@ -20,90 +17,120 @@ class FakeCloud(object): self.distro = distro -class TestRunCommands(CiTestCase): +class TestConfigureUA(CiTestCase): with_logs = True allowed_subp = [CiTestCase.SUBP_SHELL_TRUE] def setUp(self): - super(TestRunCommands, self).setUp() + super(TestConfigureUA, self).setUp() self.tmp = self.tmp_dir() @mock.patch('%s.util.subp' % MPATH) - def test_run_commands_on_empty_list(self, m_subp): - """When provided with an empty list, run_commands does nothing.""" - run_commands([]) - self.assertEqual('', self.logs.getvalue()) - m_subp.assert_not_called() - - def test_run_commands_on_non_list_or_dict(self): - """When provided an invalid type, run_commands raises an error.""" - with self.assertRaises(TypeError) as context_manager: - run_commands(commands="I'm Not Valid") + def test_configure_ua_attach_error(self, m_subp): + """Errors from ua attach command are raised.""" + m_subp.side_effect = util.ProcessExecutionError( + 'Invalid token SomeToken') + with self.assertRaises(RuntimeError) as context_manager: + configure_ua(token='SomeToken') self.assertEqual( - "commands parameter was not a list or dict: I'm Not Valid", + 'Failure attaching Ubuntu Advantage:\nUnexpected error while' + ' running command.\nCommand: -\nExit code: -\nReason: -\n' + 'Stdout: Invalid token SomeToken\nStderr: -', str(context_manager.exception)) - def test_run_command_logs_commands_and_exit_codes_to_stderr(self): - """All exit codes are logged to stderr.""" - outfile = self.tmp_path('output.log', dir=self.tmp) - - cmd1 = 'echo "HI" >> %s' % outfile - cmd2 = 'bogus command' - cmd3 = 'echo "MOM" >> %s' % outfile - commands = [cmd1, cmd2, cmd3] - - mock_path = '%s.sys.stderr' % MPATH - with mock.patch(mock_path, new_callable=StringIO) as m_stderr: - with self.assertRaises(RuntimeError) as context_manager: - run_commands(commands=commands) - - self.assertIsNotNone( - re.search(r'bogus: (command )?not found', - str(context_manager.exception)), - msg='Expected bogus command not found') - expected_stderr_log = '\n'.join([ - 'Begin run command: {cmd}'.format(cmd=cmd1), - 'End run command: exit(0)', - 'Begin run command: {cmd}'.format(cmd=cmd2), - 'ERROR: End run command: exit(127)', - 'Begin run command: {cmd}'.format(cmd=cmd3), - 'End run command: exit(0)\n']) - self.assertEqual(expected_stderr_log, m_stderr.getvalue()) - - def test_run_command_as_lists(self): - """When commands are specified as a list, run them in order.""" - outfile = self.tmp_path('output.log', dir=self.tmp) - - cmd1 = 'echo "HI" >> %s' % outfile - cmd2 = 'echo "MOM" >> %s' % outfile - commands = [cmd1, cmd2] - with mock.patch('%s.sys.stderr' % MPATH, new_callable=StringIO): - run_commands(commands=commands) + @mock.patch('%s.util.subp' % MPATH) + def test_configure_ua_attach_with_token(self, m_subp): + """When token is provided, attach the machine to ua using the token.""" + configure_ua(token='SomeToken') + m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken']) + self.assertEqual( + 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', + self.logs.getvalue()) + + @mock.patch('%s.util.subp' % MPATH) + def test_configure_ua_attach_on_service_error(self, m_subp): + """all services should be enabled and then any failures raised""" + def fake_subp(cmd, capture=None): + fail_cmds = [['ua', 'enable', svc] for svc in ['esm', 'cc']] + if cmd in fail_cmds and capture: + svc = cmd[-1] + raise util.ProcessExecutionError( + 'Invalid {} credentials'.format(svc.upper())) + + m_subp.side_effect = fake_subp + + with self.assertRaises(RuntimeError) as context_manager: + configure_ua(token='SomeToken', enable=['esm', 'cc', 'fips']) + self.assertEqual( + m_subp.call_args_list, + [mock.call(['ua', 'attach', 'SomeToken']), + mock.call(['ua', 'enable', 'esm'], capture=True), + mock.call(['ua', 'enable', 'cc'], capture=True), + mock.call(['ua', 'enable', 'fips'], capture=True)]) self.assertIn( - 'DEBUG: Running user-provided ubuntu-advantage commands', + 'WARNING: Failure enabling "esm":\nUnexpected error' + ' while running command.\nCommand: -\nExit code: -\nReason: -\n' + 'Stdout: Invalid ESM credentials\nStderr: -\n', self.logs.getvalue()) - self.assertEqual('HI\nMOM\n', util.load_file(outfile)) self.assertIn( - 'WARNING: Non-ubuntu-advantage commands in ubuntu-advantage' - ' config:', + 'WARNING: Failure enabling "cc":\nUnexpected error' + ' while running command.\nCommand: -\nExit code: -\nReason: -\n' + 'Stdout: Invalid CC credentials\nStderr: -\n', + self.logs.getvalue()) + self.assertEqual( + 'Failure enabling Ubuntu Advantage service(s): "esm", "cc"', + str(context_manager.exception)) + + @mock.patch('%s.util.subp' % MPATH) + def test_configure_ua_attach_with_empty_services(self, m_subp): + """When services is an empty list, do not auto-enable attach.""" + configure_ua(token='SomeToken', enable=[]) + m_subp.assert_called_once_with(['ua', 'attach', 'SomeToken']) + self.assertEqual( + 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', self.logs.getvalue()) - def test_run_command_dict_sorted_as_command_script(self): - """When commands are a dict, sort them and run.""" - outfile = self.tmp_path('output.log', dir=self.tmp) - cmd1 = 'echo "HI" >> %s' % outfile - cmd2 = 'echo "MOM" >> %s' % outfile - commands = {'02': cmd1, '01': cmd2} - with mock.patch('%s.sys.stderr' % MPATH, new_callable=StringIO): - run_commands(commands=commands) + @mock.patch('%s.util.subp' % MPATH) + def test_configure_ua_attach_with_specific_services(self, m_subp): + """When services a list, only enable specific services.""" + configure_ua(token='SomeToken', enable=['fips']) + self.assertEqual( + m_subp.call_args_list, + [mock.call(['ua', 'attach', 'SomeToken']), + mock.call(['ua', 'enable', 'fips'], capture=True)]) + self.assertEqual( + 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', + self.logs.getvalue()) + + @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock()) + @mock.patch('%s.util.subp' % MPATH) + def test_configure_ua_attach_with_string_services(self, m_subp): + """When services a string, treat as singleton list and warn""" + configure_ua(token='SomeToken', enable='fips') + self.assertEqual( + m_subp.call_args_list, + [mock.call(['ua', 'attach', 'SomeToken']), + mock.call(['ua', 'enable', 'fips'], capture=True)]) + self.assertEqual( + 'WARNING: ubuntu_advantage: enable should be a list, not a' + ' string; treating as a single enable\n' + 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', + self.logs.getvalue()) - expected_messages = [ - 'DEBUG: Running user-provided ubuntu-advantage commands'] - for message in expected_messages: - self.assertIn(message, self.logs.getvalue()) - self.assertEqual('MOM\nHI\n', util.load_file(outfile)) + @mock.patch('%s.util.subp' % MPATH) + def test_configure_ua_attach_with_weird_services(self, m_subp): + """When services not string or list, warn but still attach""" + configure_ua(token='SomeToken', enable={'deffo': 'wont work'}) + self.assertEqual( + m_subp.call_args_list, + [mock.call(['ua', 'attach', 'SomeToken'])]) + self.assertEqual( + 'WARNING: ubuntu_advantage: enable should be a list, not a' + ' dict; skipping enabling services\n' + 'DEBUG: Attaching to Ubuntu Advantage. ua attach SomeToken\n', + self.logs.getvalue()) @skipUnlessJsonSchema() @@ -112,90 +139,50 @@ class TestSchema(CiTestCase, SchemaTestCaseMixin): with_logs = True schema = schema - def test_schema_warns_on_ubuntu_advantage_not_as_dict(self): - """If ubuntu-advantage configuration is not a dict, emit a warning.""" - validate_cloudconfig_schema({'ubuntu-advantage': 'wrong type'}, schema) + @mock.patch('%s.maybe_install_ua_tools' % MPATH) + @mock.patch('%s.configure_ua' % MPATH) + def test_schema_warns_on_ubuntu_advantage_not_dict(self, _cfg, _): + """If ubuntu_advantage configuration is not a dict, emit a warning.""" + validate_cloudconfig_schema({'ubuntu_advantage': 'wrong type'}, schema) self.assertEqual( - "WARNING: Invalid config:\nubuntu-advantage: 'wrong type' is not" + "WARNING: Invalid config:\nubuntu_advantage: 'wrong type' is not" " of type 'object'\n", self.logs.getvalue()) - @mock.patch('%s.run_commands' % MPATH) - def test_schema_disallows_unknown_keys(self, _): - """Unknown keys in ubuntu-advantage configuration emit warnings.""" + @mock.patch('%s.maybe_install_ua_tools' % MPATH) + @mock.patch('%s.configure_ua' % MPATH) + def test_schema_disallows_unknown_keys(self, _cfg, _): + """Unknown keys in ubuntu_advantage configuration emit warnings.""" validate_cloudconfig_schema( - {'ubuntu-advantage': {'commands': ['ls'], 'invalid-key': ''}}, + {'ubuntu_advantage': {'token': 'winner', 'invalid-key': ''}}, schema) self.assertIn( - 'WARNING: Invalid config:\nubuntu-advantage: Additional properties' + 'WARNING: Invalid config:\nubuntu_advantage: Additional properties' " are not allowed ('invalid-key' was unexpected)", self.logs.getvalue()) - def test_warn_schema_requires_commands(self): - """Warn when ubuntu-advantage configuration lacks commands.""" - validate_cloudconfig_schema( - {'ubuntu-advantage': {}}, schema) - self.assertEqual( - "WARNING: Invalid config:\nubuntu-advantage: 'commands' is a" - " required property\n", - self.logs.getvalue()) - - @mock.patch('%s.run_commands' % MPATH) - def test_warn_schema_commands_is_not_list_or_dict(self, _): - """Warn when ubuntu-advantage:commands config is not a list or dict.""" + @mock.patch('%s.maybe_install_ua_tools' % MPATH) + @mock.patch('%s.configure_ua' % MPATH) + def test_warn_schema_requires_token(self, _cfg, _): + """Warn if ubuntu_advantage configuration lacks token.""" validate_cloudconfig_schema( - {'ubuntu-advantage': {'commands': 'broken'}}, schema) + {'ubuntu_advantage': {'enable': ['esm']}}, schema) self.assertEqual( - "WARNING: Invalid config:\nubuntu-advantage.commands: 'broken' is" - " not of type 'object', 'array'\n", - self.logs.getvalue()) + "WARNING: Invalid config:\nubuntu_advantage:" + " 'token' is a required property\n", self.logs.getvalue()) - @mock.patch('%s.run_commands' % MPATH) - def test_warn_schema_when_commands_is_empty(self, _): - """Emit warnings when ubuntu-advantage:commands is empty.""" - validate_cloudconfig_schema( - {'ubuntu-advantage': {'commands': []}}, schema) + @mock.patch('%s.maybe_install_ua_tools' % MPATH) + @mock.patch('%s.configure_ua' % MPATH) + def test_warn_schema_services_is_not_list_or_dict(self, _cfg, _): + """Warn when ubuntu_advantage:enable config is not a list.""" validate_cloudconfig_schema( - {'ubuntu-advantage': {'commands': {}}}, schema) + {'ubuntu_advantage': {'enable': 'needslist'}}, schema) self.assertEqual( - "WARNING: Invalid config:\nubuntu-advantage.commands: [] is too" - " short\nWARNING: Invalid config:\nubuntu-advantage.commands: {}" - " does not have enough properties\n", + "WARNING: Invalid config:\nubuntu_advantage: 'token' is a" + " required property\nubuntu_advantage.enable: 'needslist'" + " is not of type 'array'\n", self.logs.getvalue()) - @mock.patch('%s.run_commands' % MPATH) - def test_schema_when_commands_are_list_or_dict(self, _): - """No warnings when ubuntu-advantage:commands are a list or dict.""" - validate_cloudconfig_schema( - {'ubuntu-advantage': {'commands': ['valid']}}, schema) - validate_cloudconfig_schema( - {'ubuntu-advantage': {'commands': {'01': 'also valid'}}}, schema) - self.assertEqual('', self.logs.getvalue()) - - def test_duplicates_are_fine_array_array(self): - """Duplicated commands array/array entries are allowed.""" - self.assertSchemaValid( - {'commands': [["echo", "bye"], ["echo" "bye"]]}, - "command entries can be duplicate.") - - def test_duplicates_are_fine_array_string(self): - """Duplicated commands array/string entries are allowed.""" - self.assertSchemaValid( - {'commands': ["echo bye", "echo bye"]}, - "command entries can be duplicate.") - - def test_duplicates_are_fine_dict_array(self): - """Duplicated commands dict/array entries are allowed.""" - self.assertSchemaValid( - {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}}, - "command entries can be duplicate.") - - def test_duplicates_are_fine_dict_string(self): - """Duplicated commands dict/string entries are allowed.""" - self.assertSchemaValid( - {'commands': {'00': "echo bye", '01': "echo bye"}}, - "command entries can be duplicate.") - class TestHandle(CiTestCase): @@ -205,41 +192,89 @@ class TestHandle(CiTestCase): super(TestHandle, self).setUp() self.tmp = self.tmp_dir() - @mock.patch('%s.run_commands' % MPATH) @mock.patch('%s.validate_cloudconfig_schema' % MPATH) - def test_handle_no_config(self, m_schema, m_run): + def test_handle_no_config(self, m_schema): """When no ua-related configuration is provided, nothing happens.""" cfg = {} handle('ua-test', cfg=cfg, cloud=None, log=self.logger, args=None) self.assertIn( - "DEBUG: Skipping module named ua-test, no 'ubuntu-advantage' key" - " in config", + "DEBUG: Skipping module named ua-test, no 'ubuntu_advantage'" + ' configuration found', self.logs.getvalue()) m_schema.assert_not_called() - m_run.assert_not_called() + @mock.patch('%s.configure_ua' % MPATH) @mock.patch('%s.maybe_install_ua_tools' % MPATH) - def test_handle_tries_to_install_ubuntu_advantage_tools(self, m_install): + def test_handle_tries_to_install_ubuntu_advantage_tools( + self, m_install, m_cfg): """If ubuntu_advantage is provided, try installing ua-tools package.""" - cfg = {'ubuntu-advantage': {}} + cfg = {'ubuntu_advantage': {'token': 'valid'}} mycloud = FakeCloud(None) handle('nomatter', cfg=cfg, cloud=mycloud, log=self.logger, args=None) m_install.assert_called_once_with(mycloud) + @mock.patch('%s.configure_ua' % MPATH) @mock.patch('%s.maybe_install_ua_tools' % MPATH) - def test_handle_runs_commands_provided(self, m_install): - """When commands are specified as a list, run them.""" - outfile = self.tmp_path('output.log', dir=self.tmp) + def test_handle_passes_credentials_and_services_to_configure_ua( + self, m_install, m_configure_ua): + """All ubuntu_advantage config keys are passed to configure_ua.""" + cfg = {'ubuntu_advantage': {'token': 'token', 'enable': ['esm']}} + handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) + m_configure_ua.assert_called_once_with( + token='token', enable=['esm']) + + @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock()) + @mock.patch('%s.configure_ua' % MPATH) + def test_handle_warns_on_deprecated_ubuntu_advantage_key_w_config( + self, m_configure_ua): + """Warning when ubuntu-advantage key is present with new config""" + cfg = {'ubuntu-advantage': {'token': 'token', 'enable': ['esm']}} + handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) + self.assertEqual( + 'WARNING: Deprecated configuration key "ubuntu-advantage"' + ' provided. Expected underscore delimited "ubuntu_advantage";' + ' will attempt to continue.', + self.logs.getvalue().splitlines()[0]) + m_configure_ua.assert_called_once_with( + token='token', enable=['esm']) + + def test_handle_error_on_deprecated_commands_key_dashed(self): + """Error when commands is present in ubuntu-advantage key.""" + cfg = {'ubuntu-advantage': {'commands': 'nogo'}} + with self.assertRaises(RuntimeError) as context_manager: + handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) + self.assertEqual( + 'Deprecated configuration "ubuntu-advantage: commands" provided.' + ' Expected "token"', + str(context_manager.exception)) + + def test_handle_error_on_deprecated_commands_key_underscored(self): + """Error when commands is present in ubuntu_advantage key.""" + cfg = {'ubuntu_advantage': {'commands': 'nogo'}} + with self.assertRaises(RuntimeError) as context_manager: + handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) + self.assertEqual( + 'Deprecated configuration "ubuntu-advantage: commands" provided.' + ' Expected "token"', + str(context_manager.exception)) + @mock.patch('%s.maybe_install_ua_tools' % MPATH, mock.MagicMock()) + @mock.patch('%s.configure_ua' % MPATH) + def test_handle_prefers_new_style_config( + self, m_configure_ua): + """ubuntu_advantage should be preferred over ubuntu-advantage""" cfg = { - 'ubuntu-advantage': {'commands': ['echo "HI" >> %s' % outfile, - 'echo "MOM" >> %s' % outfile]}} - mock_path = '%s.sys.stderr' % MPATH - with self.allow_subp([CiTestCase.SUBP_SHELL_TRUE]): - with mock.patch(mock_path, new_callable=StringIO): - handle('nomatter', cfg=cfg, cloud=None, log=self.logger, - args=None) - self.assertEqual('HI\nMOM\n', util.load_file(outfile)) + 'ubuntu-advantage': {'token': 'nope', 'enable': ['wrong']}, + 'ubuntu_advantage': {'token': 'token', 'enable': ['esm']}, + } + handle('nomatter', cfg=cfg, cloud=None, log=self.logger, args=None) + self.assertEqual( + 'WARNING: Deprecated configuration key "ubuntu-advantage"' + ' provided. Expected underscore delimited "ubuntu_advantage";' + ' will attempt to continue.', + self.logs.getvalue().splitlines()[0]) + m_configure_ua.assert_called_once_with( + token='token', enable=['esm']) class TestMaybeInstallUATools(CiTestCase): @@ -253,7 +288,7 @@ class TestMaybeInstallUATools(CiTestCase): @mock.patch('%s.util.which' % MPATH) def test_maybe_install_ua_tools_noop_when_ua_tools_present(self, m_which): """Do nothing if ubuntu-advantage-tools already exists.""" - m_which.return_value = '/usr/bin/ubuntu-advantage' # already installed + m_which.return_value = '/usr/bin/ua' # already installed distro = mock.MagicMock() distro.update_package_sources.side_effect = RuntimeError( 'Some apt error') -- cgit v1.2.3 From bdd9c0ac9bcd68ec1ac3b2038dad0ba3dbd83341 Mon Sep 17 00:00:00 2001 From: Antonio Romito Date: Tue, 9 Apr 2019 14:54:23 +0000 Subject: cmd:main.py: Fix missing 'modules-init' key in modes dict Cloud-init's main.py will fail when presented with a new stage name 'modules-init' if upgrading an older cloud-init. Fix this by initializing unknown stage names before accessing. LP: #1815109 --- cloudinit/cmd/main.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 933c019a..a5446da7 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -632,13 +632,14 @@ def status_wrapper(name, args, data_d=None, link_d=None): 'start': None, 'finished': None, } + if status is None: status = {'v1': {}} - for m in modes: - status['v1'][m] = nullstatus.copy() status['v1']['datasource'] = None - elif mode not in status['v1']: - status['v1'][mode] = nullstatus.copy() + + for m in modes: + if m not in status['v1']: + status['v1'][m] = nullstatus.copy() v1 = status['v1'] v1['stage'] = mode -- cgit v1.2.3 From b76714c355a87416f9f07156b0f025aceaca7296 Mon Sep 17 00:00:00 2001 From: Risto Oikarinen Date: Tue, 9 Apr 2019 18:05:24 +0000 Subject: Change DataSourceNoCloud to ignore file system label's case. NoCloud data source now accepts both 'cidata' and 'CIDATA' as filesystem labels. This is similar to DataSourceConfigDrive's support for 'config-2' and 'CONFIG-2'. --- cloudinit/sources/DataSourceNoCloud.py | 4 ++- doc/rtd/topics/datasources/nocloud.rst | 2 +- tests/unittests/test_datasource/test_nocloud.py | 42 +++++++++++++++++++++++++ tests/unittests/test_ds_identify.py | 17 ++++++++++ tools/ds-identify | 7 +++-- 5 files changed, 67 insertions(+), 5 deletions(-) diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 6860f0cc..fcf5d589 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -106,7 +106,9 @@ class DataSourceNoCloud(sources.DataSource): fslist = util.find_devs_with("TYPE=vfat") fslist.extend(util.find_devs_with("TYPE=iso9660")) - label_list = util.find_devs_with("LABEL=%s" % label) + label_list = util.find_devs_with("LABEL=%s" % label.upper()) + label_list.extend(util.find_devs_with("LABEL=%s" % label.lower())) + devlist = list(set(fslist) & set(label_list)) devlist.sort(reverse=True) diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst index 08578e86..1c5cf961 100644 --- a/doc/rtd/topics/datasources/nocloud.rst +++ b/doc/rtd/topics/datasources/nocloud.rst @@ -9,7 +9,7 @@ network at all). You can provide meta-data and user-data to a local vm boot via files on a `vfat`_ or `iso9660`_ filesystem. The filesystem volume label must be -``cidata``. +``cidata`` or ``CIDATA``. Alternatively, you can provide meta-data via kernel command line or SMBIOS "serial number" option. The data must be passed in the form of a string: diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index 3429272c..b785362f 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -32,6 +32,36 @@ class TestNoCloudDataSource(CiTestCase): self.mocks.enter_context( mock.patch.object(util, 'read_dmi_data', return_value=None)) + def _test_fs_config_is_read(self, fs_label, fs_label_to_search): + vfat_device = 'device-1' + + def m_mount_cb(device, callback, mtype): + if (device == vfat_device): + return {'meta-data': yaml.dump({'instance-id': 'IID'})} + else: + return {} + + def m_find_devs_with(query='', path=''): + if 'TYPE=vfat' == query: + return [vfat_device] + elif 'LABEL={}'.format(fs_label) == query: + return [vfat_device] + else: + return [] + + self.mocks.enter_context( + mock.patch.object(util, 'find_devs_with', + side_effect=m_find_devs_with)) + self.mocks.enter_context( + mock.patch.object(util, 'mount_cb', + side_effect=m_mount_cb)) + sys_cfg = {'datasource': {'NoCloud': {'fs_label': fs_label_to_search}}} + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + + self.assertEqual(dsrc.metadata.get('instance-id'), 'IID') + self.assertTrue(ret) + def test_nocloud_seed_dir_on_lxd(self, m_is_lxd): md = {'instance-id': 'IID', 'dsmode': 'local'} ud = b"USER_DATA_HERE" @@ -90,6 +120,18 @@ class TestNoCloudDataSource(CiTestCase): ret = dsrc.get_data() self.assertFalse(ret) + def test_fs_config_lowercase_label(self, m_is_lxd): + self._test_fs_config_is_read('cidata', 'cidata') + + def test_fs_config_uppercase_label(self, m_is_lxd): + self._test_fs_config_is_read('CIDATA', 'cidata') + + def test_fs_config_lowercase_label_search_uppercase(self, m_is_lxd): + self._test_fs_config_is_read('cidata', 'CIDATA') + + def test_fs_config_uppercase_label_search_uppercase(self, m_is_lxd): + self._test_fs_config_is_read('CIDATA', 'CIDATA') + def test_no_datasource_expected(self, m_is_lxd): # no source should be found if no cmdline, config, and fs_label=None sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index d00c1b4b..8c18aa1a 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -520,6 +520,10 @@ class TestDsIdentify(DsIdentifyBase): """NoCloud is found with iso9660 filesystem on non-cdrom disk.""" self._test_ds_found('NoCloud') + def test_nocloud_upper(self): + """NoCloud is found with uppercase filesystem label.""" + self._test_ds_found('NoCloudUpper') + def test_nocloud_seed(self): """Nocloud seed directory.""" self._test_ds_found('NoCloud-seed') @@ -713,6 +717,19 @@ VALID_CFG = { 'dev/vdb': 'pretend iso content for cidata\n', } }, + 'NoCloudUpper': { + 'ds': 'NoCloud', + 'mocks': [ + MOCK_VIRT_IS_KVM, + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + BLKID_UEFI_UBUNTU + + [{'DEVNAME': 'vdb', 'TYPE': 'iso9660', 'LABEL': 'CIDATA'}])}, + ], + 'files': { + 'dev/vdb': 'pretend iso content for cidata\n', + } + }, 'NoCloud-seed': { 'ds': 'NoCloud', 'files': { diff --git a/tools/ds-identify b/tools/ds-identify index b78b2731..6518901e 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -620,7 +620,7 @@ dscheck_MAAS() { } dscheck_NoCloud() { - local fslabel="cidata" d="" + local fslabel="cidata CIDATA" d="" case " ${DI_KERNEL_CMDLINE} " in *\ ds=nocloud*) return ${DS_FOUND};; esac @@ -632,9 +632,10 @@ dscheck_NoCloud() { check_seed_dir "$d" meta-data user-data && return ${DS_FOUND} check_writable_seed_dir "$d" meta-data user-data && return ${DS_FOUND} done - if has_fs_with_label "${fslabel}"; then + if has_fs_with_label $fslabel; then return ${DS_FOUND} fi + return ${DS_NOT_FOUND} } @@ -762,7 +763,7 @@ is_cdrom_ovf() { # explicitly skip known labels of other types. rd_rdfe is azure. case "$label" in - config-2|CONFIG-2|rd_rdfe_stable*|cidata) return 1;; + config-2|CONFIG-2|rd_rdfe_stable*|cidata|CIDATA) return 1;; esac local idstr="http://schemas.dmtf.org/ovf/environment/1" -- cgit v1.2.3 From 6322c2ddf4b68a8e7cc467a07fb20a1d151a2ef3 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 10 Apr 2019 20:21:37 +0000 Subject: Revert "DataSource: move update_events from a class to an instance..." Moving update_events from a class attribute to an instance attribute means that it doesn't exist on DataSource objects that are unpickled, causing tracebacks on cloud-init upgrade. As this change is only required for cloud-init installations which don't utilise ds-identify, we're backing it out to be reintroduced once the upgrade path bug has been addressed. This reverts commit f2fd6eac4407e60d0e98826ab03847dda4cde138. --- cloudinit/sources/DataSourceScaleway.py | 3 +-- cloudinit/sources/__init__.py | 6 +++--- cloudinit/sources/tests/test_init.py | 15 --------------- tests/unittests/test_datasource/test_scaleway.py | 7 ------- 4 files changed, 4 insertions(+), 27 deletions(-) diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index 54bfc1fe..b573b382 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -171,11 +171,10 @@ def query_data_api(api_type, api_address, retries, timeout): class DataSourceScaleway(sources.DataSource): dsname = "Scaleway" + update_events = {'network': [EventType.BOOT_NEW_INSTANCE, EventType.BOOT]} def __init__(self, sys_cfg, distro, paths): super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) - self.update_events = { - 'network': {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}} self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}), diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 1604932d..e6966b31 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -164,6 +164,9 @@ class DataSource(object): # A datasource which supports writing network config on each system boot # would call update_events['network'].add(EventType.BOOT). + # Default: generate network config on new instance id (first boot). + update_events = {'network': set([EventType.BOOT_NEW_INSTANCE])} + # N-tuple listing default values for any metadata-related class # attributes cached on an instance by a process_data runs. These attribute # values are reset via clear_cached_attrs during any update_metadata call. @@ -188,9 +191,6 @@ class DataSource(object): self.vendordata = None self.vendordata_raw = None - # Default: generate network config on new instance id (first boot). - self.update_events = {'network': {EventType.BOOT_NEW_INSTANCE}} - self.ds_cfg = util.get_cfg_by_path( self.sys_cfg, ("datasource", self.dsname), {}) if not self.ds_cfg: diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index cb1912be..6378e98b 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -575,21 +575,6 @@ class TestDataSource(CiTestCase): " events: New instance first boot", self.logs.getvalue()) - def test_data_sources_cant_mutate_update_events_for_others(self): - """update_events shouldn't be changed for other DSes (LP: #1819913)""" - - class ModifyingDS(DataSource): - - def __init__(self, sys_cfg, distro, paths): - # This mirrors what DataSourceAzure does which causes LP: - # #1819913 - DataSource.__init__(self, sys_cfg, distro, paths) - self.update_events['network'].add(EventType.BOOT) - - before_update_events = copy.deepcopy(self.datasource.update_events) - ModifyingDS(self.sys_cfg, self.distro, self.paths) - self.assertEqual(before_update_events, self.datasource.update_events) - class TestRedactSensitiveData(CiTestCase): diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py index 3bfd7527..f96bf0a2 100644 --- a/tests/unittests/test_datasource/test_scaleway.py +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -7,7 +7,6 @@ import requests from cloudinit import helpers from cloudinit import settings -from cloudinit.event import EventType from cloudinit.sources import DataSourceScaleway from cloudinit.tests.helpers import mock, HttprettyTestCase, CiTestCase @@ -404,9 +403,3 @@ class TestDataSourceScaleway(HttprettyTestCase): netcfg = self.datasource.network_config self.assertEqual(netcfg, '0xdeadbeef') - - def test_update_events_is_correct(self): - """ensure update_events contains correct data""" - self.assertEqual( - {'network': {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}}, - self.datasource.update_events) -- cgit v1.2.3 From 9fc682c9ebbccab5e958eb882636d969be88beb9 Mon Sep 17 00:00:00 2001 From: Dominic Schlegel Date: Wed, 17 Apr 2019 14:43:47 +0000 Subject: cc_apt_configure: fix typo in apt documentation --- cloudinit/config/cc_apt_configure.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index e18944ec..919d1995 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -127,7 +127,7 @@ to ``^[\\w-]+:\\w`` Source list entries can be specified as a dictionary under the ``sources`` config key, with key in the dict representing a different source file. The key -The key of each source entry will be used as an id that can be referenced in +of each source entry will be used as an id that can be referenced in other config entries, as well as the filename for the source's configuration under ``/etc/apt/sources.list.d``. If the name does not end with ``.list``, it will be appended. If there is no configuration for a key in ``sources``, no -- cgit v1.2.3 From 937555fd422edf8235430afab3c0ab69f9e3b3a4 Mon Sep 17 00:00:00 2001 From: Gonéri Le Bouder Date: Thu, 18 Apr 2019 16:08:20 +0000 Subject: mount_cb: do not pass sync and rw options to mount On FreeBSD, mount_cd9660 does not accept the sync option that is enabled by default. In addition, the sync is only useful with the `rw` mode. However the `rw` mode was never used. This patch removes the `rw` and `sync` parameter of `mount_cb` to simplify the code base and resolve the FreeBSD issue. LP: #1645824 --- cloudinit/sources/DataSourceAzure.py | 2 +- cloudinit/sources/DataSourceConfigDrive.py | 7 ++----- cloudinit/util.py | 15 ++------------- 3 files changed, 5 insertions(+), 19 deletions(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 76b16616..64165259 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -407,7 +407,7 @@ class DataSourceAzure(sources.DataSource): elif cdev.startswith("/dev/"): if util.is_FreeBSD(): ret = util.mount_cb(cdev, load_azure_ds_dir, - mtype="udf", sync=False) + mtype="udf") else: ret = util.mount_cb(cdev, load_azure_ds_dir) else: diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 564e3eb3..571d30dc 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -72,15 +72,12 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): dslist = self.sys_cfg.get('datasource_list') for dev in find_candidate_devs(dslist=dslist): try: - # Set mtype if freebsd and turn off sync - if dev.startswith("/dev/cd"): + if util.is_FreeBSD() and dev.startswith("/dev/cd"): mtype = "cd9660" - sync = False else: mtype = None - sync = True results = util.mount_cb(dev, read_config_drive, - mtype=mtype, sync=sync) + mtype=mtype) found = dev except openstack.NonReadable: pass diff --git a/cloudinit/util.py b/cloudinit/util.py index 385f231c..ea4199cd 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1679,7 +1679,7 @@ def mounts(): return mounted -def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True, +def mount_cb(device, callback, data=None, mtype=None, update_env_for_mount=None): """ Mount the device, call method 'callback' passing the directory @@ -1726,18 +1726,7 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True, for mtype in mtypes: mountpoint = None try: - mountcmd = ['mount'] - mountopts = [] - if rw: - mountopts.append('rw') - else: - mountopts.append('ro') - if sync: - # This seems like the safe approach to do - # (ie where this is on by default) - mountopts.append("sync") - if mountopts: - mountcmd.extend(["-o", ",".join(mountopts)]) + mountcmd = ['mount', '-o', 'ro'] if mtype: mountcmd.extend(['-t', mtype]) mountcmd.append(device) -- cgit v1.2.3 From 947d3c20891815f164f4c7a8884d1f02ae4a9c5b Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Thu, 18 Apr 2019 19:05:33 +0000 Subject: setup.py: install bash completion script in new location Per lintian, this is the path at which bash completion scripts should now be installed. --- setup.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 186e215f..fcaf26ff 100755 --- a/setup.py +++ b/setup.py @@ -245,13 +245,14 @@ if not in_virtualenv(): INITSYS_ROOTS[k] = "/" + INITSYS_ROOTS[k] data_files = [ - (ETC + '/bash_completion.d', ['bash_completion/cloud-init']), (ETC + '/cloud', [render_tmpl("config/cloud.cfg.tmpl")]), (ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')), (ETC + '/cloud/templates', glob('templates/*')), (USR_LIB_EXEC + '/cloud-init', ['tools/ds-identify', 'tools/uncloud-init', 'tools/write-ssh-key-fingerprints']), + (USR + '/share/bash-completion/completions', + ['bash_completion/cloud-init']), (USR + '/share/doc/cloud-init', [f for f in glob('doc/*') if is_f(f)]), (USR + '/share/doc/cloud-init/examples', [f for f in glob('doc/examples/*') if is_f(f)]), -- cgit v1.2.3 From c8c32515778983d244126d4e359be9e91b3ce9e5 Mon Sep 17 00:00:00 2001 From: "Jason Zions (MSFT)" Date: Thu, 18 Apr 2019 21:23:36 +0000 Subject: test_azure: mock util.SeLinuxGuard where needed Mock util.SeLinuxGuard to do nothing within tests that mock functions used by the guard, when those mocks confuse the guard. This has no impact when executing unit tests on systems which do not enable selinux (e.g. Ubuntu). LP: #1825253 --- tests/unittests/test_datasource/test_azure.py | 3 +++ tests/unittests/test_net.py | 5 ++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 53c56cd0..ab77c034 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1375,12 +1375,15 @@ class TestCanDevBeReformatted(CiTestCase): self._domock(p + "util.mount_cb", 'm_mount_cb') self._domock(p + "os.path.realpath", 'm_realpath') self._domock(p + "os.path.exists", 'm_exists') + self._domock(p + "util.SeLinuxGuard", 'm_selguard') self.m_exists.side_effect = lambda p: p in bypath self.m_realpath.side_effect = realpath self.m_has_ntfs_filesystem.side_effect = has_ntfs_fs self.m_mount_cb.side_effect = mount_cb self.m_partitions_on_device.side_effect = partitions_on_device + self.m_selguard.__enter__ = mock.Mock(return_value=False) + self.m_selguard.__exit__ = mock.Mock() def test_three_partitions_is_false(self): """A disk with 3 partitions can not be formatted.""" diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index fd03deb6..ca6ef97d 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -3269,9 +3269,12 @@ class TestNetplanPostcommands(CiTestCase): mock_netplan_generate.assert_called_with(run=True) mock_net_setup_link.assert_called_with(run=True) + @mock.patch('cloudinit.util.SeLinuxGuard') @mock.patch.object(netplan, "get_devicelist") @mock.patch('cloudinit.util.subp') - def test_netplan_postcmds(self, mock_subp, mock_devlist): + def test_netplan_postcmds(self, mock_subp, mock_devlist, mock_sel): + mock_sel.__enter__ = mock.Mock(return_value=False) + mock_sel.__exit__ = mock.Mock() mock_devlist.side_effect = [['lo']] tmp_dir = self.tmp_dir() ns = network_state.parse_net_config_data(self.mycfg, -- cgit v1.2.3 From 69251d5b4431204c1e768fdf6e9f268edceb2e96 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 19 Apr 2019 21:38:44 +0000 Subject: packages: update rpm specs for new bash completion path LP: #1825444 --- packages/redhat/cloud-init.spec.in | 4 +++- packages/suse/cloud-init.spec.in | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in index 6b2022ba..057a5784 100644 --- a/packages/redhat/cloud-init.spec.in +++ b/packages/redhat/cloud-init.spec.in @@ -205,7 +205,9 @@ fi %dir %{_sysconfdir}/cloud/templates %config(noreplace) %{_sysconfdir}/cloud/templates/* %config(noreplace) %{_sysconfdir}/rsyslog.d/21-cloudinit.conf -%{_sysconfdir}/bash_completion.d/cloud-init + +# Bash completion script +%{_datadir}/bash-completion/completions/cloud-init %{_libexecdir}/%{name} %dir %{_sharedstatedir}/cloud diff --git a/packages/suse/cloud-init.spec.in b/packages/suse/cloud-init.spec.in index 26894b34..004b875f 100644 --- a/packages/suse/cloud-init.spec.in +++ b/packages/suse/cloud-init.spec.in @@ -120,7 +120,9 @@ version_pys=$(cd "%{buildroot}" && find . -name version.py -type f) %config(noreplace) %{_sysconfdir}/cloud/cloud.cfg.d/README %dir %{_sysconfdir}/cloud/templates %config(noreplace) %{_sysconfdir}/cloud/templates/* -%{_sysconfdir}/bash_completion.d/cloud-init + +# Bash completion script +%{_datadir}/bash-completion/completions/cloud-init %{_sysconfdir}/dhcp/dhclient-exit-hooks.d/hook-dhclient %{_sysconfdir}/NetworkManager/dispatcher.d/hook-network-manager -- cgit v1.2.3 From 5de83fc54c17b504842a924e7db08e8c2c1cebf9 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Mon, 22 Apr 2019 22:46:40 +0000 Subject: net/sysconfig: only indicate available on known sysconfig distros Restrict the sysconfig renderer availabily to known distros. Ubuntu/Debian systems may include network-manager but they do not have support for reading sysconfig network output; that is enabled via a Network-Manager plugin: ifcfg-rh which is not available in Ubuntu/Debian. LP: #1819994 --- cloudinit/net/sysconfig.py | 6 ++++-- tests/unittests/test_net.py | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 2 deletions(-) diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 09983929..a47da0a8 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -18,6 +18,8 @@ from .network_state import ( LOG = logging.getLogger(__name__) NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf" +KNOWN_DISTROS = [ + 'opensuse', 'sles', 'suse', 'redhat', 'fedora', 'centos'] def _make_header(sep='#'): @@ -717,8 +719,8 @@ class Renderer(renderer.Renderer): def available(target=None): sysconfig = available_sysconfig(target=target) nm = available_nm(target=target) - - return any([nm, sysconfig]) + return (util.get_linux_distro()[0] in KNOWN_DISTROS + and any([nm, sysconfig])) def available_sysconfig(target=None): diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index ca6ef97d..9db01567 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -3829,6 +3829,41 @@ class TestNetRenderers(CiTestCase): self.assertRaises(net.RendererNotFoundError, renderers.select, priority=['sysconfig', 'eni']) + @mock.patch("cloudinit.net.renderers.netplan.available") + @mock.patch("cloudinit.net.renderers.sysconfig.available_sysconfig") + @mock.patch("cloudinit.net.renderers.sysconfig.available_nm") + @mock.patch("cloudinit.net.renderers.eni.available") + @mock.patch("cloudinit.net.renderers.sysconfig.util.get_linux_distro") + def test_sysconfig_selected_on_sysconfig_enabled_distros(self, m_distro, + m_eni, m_sys_nm, + m_sys_scfg, + m_netplan): + """sysconfig only selected on specific distros (rhel/sles).""" + + # Ubuntu with Network-Manager installed + m_eni.return_value = False # no ifupdown (ifquery) + m_sys_scfg.return_value = False # no sysconfig/ifup/ifdown + m_sys_nm.return_value = True # network-manager is installed + m_netplan.return_value = True # netplan is installed + m_distro.return_value = ('ubuntu', None, None) + self.assertEqual('netplan', renderers.select(priority=None)[0]) + + # Centos with Network-Manager installed + m_eni.return_value = False # no ifupdown (ifquery) + m_sys_scfg.return_value = False # no sysconfig/ifup/ifdown + m_sys_nm.return_value = True # network-manager is installed + m_netplan.return_value = False # netplan is not installed + m_distro.return_value = ('centos', None, None) + self.assertEqual('sysconfig', renderers.select(priority=None)[0]) + + # OpenSuse with Network-Manager installed + m_eni.return_value = False # no ifupdown (ifquery) + m_sys_scfg.return_value = False # no sysconfig/ifup/ifdown + m_sys_nm.return_value = True # network-manager is installed + m_netplan.return_value = False # netplan is not installed + m_distro.return_value = ('opensuse', None, None) + self.assertEqual('sysconfig', renderers.select(priority=None)[0]) + class TestGetInterfaces(CiTestCase): _data = {'bonds': ['bond1'], -- cgit v1.2.3 From 3fb55ea85139f2d29ce32f124d099419fbd06f60 Mon Sep 17 00:00:00 2001 From: Chad Miller Date: Tue, 23 Apr 2019 17:07:39 +0000 Subject: tools/read-version: handle errors When the cloned branch was not the canonical upstream and tags were not available, tox would fail because tools/read-version would fail, and tragically never print the advice that is in tools/read-version about how to fix it. This changes tools/read-version to catch the exception that is elsewhere explicitly thrown and treat that too as an error it can handle. --- tools/read-version | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tools/read-version b/tools/read-version index e69c2ce0..6dca659e 100755 --- a/tools/read-version +++ b/tools/read-version @@ -71,9 +71,12 @@ if is_gitdir(_tdir) and which("git"): flags = ['--tags'] cmd = ['git', 'describe', '--abbrev=8', '--match=[0-9]*'] + flags - version = tiny_p(cmd).strip() + try: + version = tiny_p(cmd).strip() + except RuntimeError: + version = None - if not version.startswith(src_version): + if version is None or not version.startswith(src_version): sys.stderr.write("git describe version (%s) differs from " "cloudinit.version (%s)\n" % (version, src_version)) sys.stderr.write( -- cgit v1.2.3 From 784d3300f213c78d197a7ac8ad42cb098fd82356 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sat, 27 Apr 2019 02:40:47 +0000 Subject: git tests: no longer show warning about safe yaml. Currently on 18.04, running tox -e py27 will spew errors like: .tests/unittests/test_net.py:2649: YAMLLoadWarning: calling yaml.load() without Loader=... is deprecated, as the default Loader is unsafe. Please read https://msg.pyyaml.org/load for full details. The change here just uses cloud-init's yaml, which does safeloading by default. --- cloudinit/net/tests/test_init.py | 2 +- tests/unittests/test_net.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py index f55c31e8..6d2affe7 100644 --- a/cloudinit/net/tests/test_init.py +++ b/cloudinit/net/tests/test_init.py @@ -7,11 +7,11 @@ import mock import os import requests import textwrap -import yaml import cloudinit.net as net from cloudinit.util import ensure_file, write_file, ProcessExecutionError from cloudinit.tests.helpers import CiTestCase, HttprettyTestCase +from cloudinit import safeyaml as yaml class TestSysDevPath(CiTestCase): diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 9db01567..e85e9640 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -9,6 +9,7 @@ from cloudinit.net import ( from cloudinit.sources.helpers import openstack from cloudinit import temp_utils from cloudinit import util +from cloudinit import safeyaml as yaml from cloudinit.tests.helpers import ( CiTestCase, FilesystemMockingTestCase, dir2dict, mock, populate_dir) @@ -21,7 +22,7 @@ import json import os import re import textwrap -import yaml +from yaml.serializer import Serializer DHCP_CONTENT_1 = """ @@ -3575,7 +3576,7 @@ class TestNetplanRoundTrip(CiTestCase): # now look for any alias, avoid rendering them entirely # generate the first anchor string using the template # as of this writing, looks like "&id001" - anchor = r'&' + yaml.serializer.Serializer.ANCHOR_TEMPLATE % 1 + anchor = r'&' + Serializer.ANCHOR_TEMPLATE % 1 found_alias = re.search(anchor, content, re.MULTILINE) if found_alias: msg = "Error at: %s\nContent:\n%s" % (found_alias, content) -- cgit v1.2.3 From 86674f013dfcea3c075ab41373ffb475881066f6 Mon Sep 17 00:00:00 2001 From: Anh Vo Date: Mon, 29 Apr 2019 20:22:16 +0000 Subject: Azure: Changes to the Hyper-V KVP Reporter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit  + Truncate KVP Pool file to prevent stale entries from being processed by the Hyper-V KVP reporter.  + Drop filtering of KVPs as it is no longer needed.  + Batch appending of existing KVP entries. --- cloudinit/reporting/handlers.py | 117 +++++++++++++++---------------- tests/unittests/test_reporting_hyperv.py | 104 +++++++++++++-------------- 2 files changed, 106 insertions(+), 115 deletions(-) mode change 100644 => 100755 cloudinit/reporting/handlers.py mode change 100644 => 100755 tests/unittests/test_reporting_hyperv.py diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py old mode 100644 new mode 100755 index 6d23558e..10165aec --- a/cloudinit/reporting/handlers.py +++ b/cloudinit/reporting/handlers.py @@ -5,7 +5,6 @@ import fcntl import json import six import os -import re import struct import threading import time @@ -14,6 +13,7 @@ from cloudinit import log as logging from cloudinit.registry import DictRegistry from cloudinit import (url_helper, util) from datetime import datetime +from six.moves.queue import Empty as QueueEmptyError if six.PY2: from multiprocessing.queues import JoinableQueue as JQueue @@ -129,24 +129,50 @@ class HyperVKvpReportingHandler(ReportingHandler): DESC_IDX_KEY = 'msg_i' JSON_SEPARATORS = (',', ':') KVP_POOL_FILE_GUEST = '/var/lib/hyperv/.kvp_pool_1' + _already_truncated_pool_file = False def __init__(self, kvp_file_path=KVP_POOL_FILE_GUEST, event_types=None): super(HyperVKvpReportingHandler, self).__init__() self._kvp_file_path = kvp_file_path + HyperVKvpReportingHandler._truncate_guest_pool_file( + self._kvp_file_path) + self._event_types = event_types self.q = JQueue() - self.kvp_file = None self.incarnation_no = self._get_incarnation_no() self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX, self.incarnation_no) - self._current_offset = 0 self.publish_thread = threading.Thread( target=self._publish_event_routine) self.publish_thread.daemon = True self.publish_thread.start() + @classmethod + def _truncate_guest_pool_file(cls, kvp_file): + """ + Truncate the pool file if it has not been truncated since boot. + This should be done exactly once for the file indicated by + KVP_POOL_FILE_GUEST constant above. This method takes a filename + so that we can use an arbitrary file during unit testing. + Since KVP is a best-effort telemetry channel we only attempt to + truncate the file once and only if the file has not been modified + since boot. Additional truncation can lead to loss of existing + KVPs. + """ + if cls._already_truncated_pool_file: + return + boot_time = time.time() - float(util.uptime()) + try: + if os.path.getmtime(kvp_file) < boot_time: + with open(kvp_file, "w"): + pass + except (OSError, IOError) as e: + LOG.warning("failed to truncate kvp pool file, %s", e) + finally: + cls._already_truncated_pool_file = True + def _get_incarnation_no(self): """ use the time passed as the incarnation number. @@ -162,20 +188,15 @@ class HyperVKvpReportingHandler(ReportingHandler): def _iterate_kvps(self, offset): """iterate the kvp file from the current offset.""" - try: - with open(self._kvp_file_path, 'rb+') as f: - self.kvp_file = f - fcntl.flock(f, fcntl.LOCK_EX) - f.seek(offset) + with open(self._kvp_file_path, 'rb') as f: + fcntl.flock(f, fcntl.LOCK_EX) + f.seek(offset) + record_data = f.read(self.HV_KVP_RECORD_SIZE) + while len(record_data) == self.HV_KVP_RECORD_SIZE: + kvp_item = self._decode_kvp_item(record_data) + yield kvp_item record_data = f.read(self.HV_KVP_RECORD_SIZE) - while len(record_data) == self.HV_KVP_RECORD_SIZE: - self._current_offset += self.HV_KVP_RECORD_SIZE - kvp_item = self._decode_kvp_item(record_data) - yield kvp_item - record_data = f.read(self.HV_KVP_RECORD_SIZE) - fcntl.flock(f, fcntl.LOCK_UN) - finally: - self.kvp_file = None + fcntl.flock(f, fcntl.LOCK_UN) def _event_key(self, event): """ @@ -207,23 +228,13 @@ class HyperVKvpReportingHandler(ReportingHandler): return {'key': k, 'value': v} - def _update_kvp_item(self, record_data): - if self.kvp_file is None: - raise ReportException( - "kvp file '{0}' not opened." - .format(self._kvp_file_path)) - self.kvp_file.seek(-self.HV_KVP_RECORD_SIZE, 1) - self.kvp_file.write(record_data) - def _append_kvp_item(self, record_data): - with open(self._kvp_file_path, 'rb+') as f: + with open(self._kvp_file_path, 'ab') as f: fcntl.flock(f, fcntl.LOCK_EX) - # seek to end of the file - f.seek(0, 2) - f.write(record_data) + for data in record_data: + f.write(data) f.flush() fcntl.flock(f, fcntl.LOCK_UN) - self._current_offset = f.tell() def _break_down(self, key, meta_data, description): del meta_data[self.MSG_KEY] @@ -279,40 +290,26 @@ class HyperVKvpReportingHandler(ReportingHandler): def _publish_event_routine(self): while True: + items_from_queue = 0 try: event = self.q.get(block=True) - need_append = True + items_from_queue += 1 + encoded_data = [] + while event is not None: + encoded_data += self._encode_event(event) + try: + # get all the rest of the events in the queue + event = self.q.get(block=False) + items_from_queue += 1 + except QueueEmptyError: + event = None try: - if not os.path.exists(self._kvp_file_path): - LOG.warning( - "skip writing events %s to %s. file not present.", - event.as_string(), - self._kvp_file_path) - encoded_event = self._encode_event(event) - # for each encoded_event - for encoded_data in (encoded_event): - for kvp in self._iterate_kvps(self._current_offset): - match = ( - re.match( - r"^{0}\|(\d+)\|.+" - .format(self.EVENT_PREFIX), - kvp['key'] - )) - if match: - match_groups = match.groups(0) - if int(match_groups[0]) < self.incarnation_no: - need_append = False - self._update_kvp_item(encoded_data) - continue - if need_append: - self._append_kvp_item(encoded_data) - except IOError as e: - LOG.warning( - "failed posting event to kvp: %s e:%s", - event.as_string(), e) + self._append_kvp_item(encoded_data) + except (OSError, IOError) as e: + LOG.warning("failed posting events to kvp, %s", e) finally: - self.q.task_done() - + for _ in range(items_from_queue): + self.q.task_done() # when main process exits, q.get() will through EOFError # indicating we should exit this thread. except EOFError: @@ -322,7 +319,7 @@ class HyperVKvpReportingHandler(ReportingHandler): # if the kvp pool already contains a chunk of data, # so defer it to another thread. def publish_event(self, event): - if (not self._event_types or event.event_type in self._event_types): + if not self._event_types or event.event_type in self._event_types: self.q.put(event) def flush(self): diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py old mode 100644 new mode 100755 index 2e64c6c7..d01ed5b3 --- a/tests/unittests/test_reporting_hyperv.py +++ b/tests/unittests/test_reporting_hyperv.py @@ -1,10 +1,12 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit.reporting import events -from cloudinit.reporting import handlers +from cloudinit.reporting.handlers import HyperVKvpReportingHandler import json import os +import struct +import time from cloudinit import util from cloudinit.tests.helpers import CiTestCase @@ -13,7 +15,7 @@ from cloudinit.tests.helpers import CiTestCase class TestKvpEncoding(CiTestCase): def test_encode_decode(self): kvp = {'key': 'key1', 'value': 'value1'} - kvp_reporting = handlers.HyperVKvpReportingHandler() + kvp_reporting = HyperVKvpReportingHandler() data = kvp_reporting._encode_kvp_item(kvp['key'], kvp['value']) self.assertEqual(len(data), kvp_reporting.HV_KVP_RECORD_SIZE) decoded_kvp = kvp_reporting._decode_kvp_item(data) @@ -26,57 +28,9 @@ class TextKvpReporter(CiTestCase): self.tmp_file_path = self.tmp_path('kvp_pool_file') util.ensure_file(self.tmp_file_path) - def test_event_type_can_be_filtered(self): - reporter = handlers.HyperVKvpReportingHandler( - kvp_file_path=self.tmp_file_path, - event_types=['foo', 'bar']) - - reporter.publish_event( - events.ReportingEvent('foo', 'name', 'description')) - reporter.publish_event( - events.ReportingEvent('some_other', 'name', 'description3')) - reporter.q.join() - - kvps = list(reporter._iterate_kvps(0)) - self.assertEqual(1, len(kvps)) - - reporter.publish_event( - events.ReportingEvent('bar', 'name', 'description2')) - reporter.q.join() - kvps = list(reporter._iterate_kvps(0)) - self.assertEqual(2, len(kvps)) - - self.assertIn('foo', kvps[0]['key']) - self.assertIn('bar', kvps[1]['key']) - self.assertNotIn('some_other', kvps[0]['key']) - self.assertNotIn('some_other', kvps[1]['key']) - - def test_events_are_over_written(self): - reporter = handlers.HyperVKvpReportingHandler( - kvp_file_path=self.tmp_file_path) - - self.assertEqual(0, len(list(reporter._iterate_kvps(0)))) - - reporter.publish_event( - events.ReportingEvent('foo', 'name1', 'description')) - reporter.publish_event( - events.ReportingEvent('foo', 'name2', 'description')) - reporter.q.join() - self.assertEqual(2, len(list(reporter._iterate_kvps(0)))) - - reporter2 = handlers.HyperVKvpReportingHandler( - kvp_file_path=self.tmp_file_path) - reporter2.incarnation_no = reporter.incarnation_no + 1 - reporter2.publish_event( - events.ReportingEvent('foo', 'name3', 'description')) - reporter2.q.join() - - self.assertEqual(2, len(list(reporter2._iterate_kvps(0)))) - def test_events_with_higher_incarnation_not_over_written(self): - reporter = handlers.HyperVKvpReportingHandler( + reporter = HyperVKvpReportingHandler( kvp_file_path=self.tmp_file_path) - self.assertEqual(0, len(list(reporter._iterate_kvps(0)))) reporter.publish_event( @@ -86,7 +40,7 @@ class TextKvpReporter(CiTestCase): reporter.q.join() self.assertEqual(2, len(list(reporter._iterate_kvps(0)))) - reporter3 = handlers.HyperVKvpReportingHandler( + reporter3 = HyperVKvpReportingHandler( kvp_file_path=self.tmp_file_path) reporter3.incarnation_no = reporter.incarnation_no - 1 reporter3.publish_event( @@ -95,7 +49,7 @@ class TextKvpReporter(CiTestCase): self.assertEqual(3, len(list(reporter3._iterate_kvps(0)))) def test_finish_event_result_is_logged(self): - reporter = handlers.HyperVKvpReportingHandler( + reporter = HyperVKvpReportingHandler( kvp_file_path=self.tmp_file_path) reporter.publish_event( events.FinishReportingEvent('name2', 'description1', @@ -105,7 +59,7 @@ class TextKvpReporter(CiTestCase): def test_file_operation_issue(self): os.remove(self.tmp_file_path) - reporter = handlers.HyperVKvpReportingHandler( + reporter = HyperVKvpReportingHandler( kvp_file_path=self.tmp_file_path) reporter.publish_event( events.FinishReportingEvent('name2', 'description1', @@ -113,7 +67,7 @@ class TextKvpReporter(CiTestCase): reporter.q.join() def test_event_very_long(self): - reporter = handlers.HyperVKvpReportingHandler( + reporter = HyperVKvpReportingHandler( kvp_file_path=self.tmp_file_path) description = 'ab' * reporter.HV_KVP_EXCHANGE_MAX_VALUE_SIZE long_event = events.FinishReportingEvent( @@ -132,3 +86,43 @@ class TextKvpReporter(CiTestCase): self.assertEqual(msg_slice['msg_i'], i) full_description += msg_slice['msg'] self.assertEqual(description, full_description) + + def test_not_truncate_kvp_file_modified_after_boot(self): + with open(self.tmp_file_path, "wb+") as f: + kvp = {'key': 'key1', 'value': 'value1'} + data = (struct.pack("%ds%ds" % ( + HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_KEY_SIZE, + HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_VALUE_SIZE), + kvp['key'].encode('utf-8'), kvp['value'].encode('utf-8'))) + f.write(data) + cur_time = time.time() + os.utime(self.tmp_file_path, (cur_time, cur_time)) + + # reset this because the unit test framework + # has already polluted the class variable + HyperVKvpReportingHandler._already_truncated_pool_file = False + + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(1, len(kvps)) + + def test_truncate_stale_kvp_file(self): + with open(self.tmp_file_path, "wb+") as f: + kvp = {'key': 'key1', 'value': 'value1'} + data = (struct.pack("%ds%ds" % ( + HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_KEY_SIZE, + HyperVKvpReportingHandler.HV_KVP_EXCHANGE_MAX_VALUE_SIZE), + kvp['key'].encode('utf-8'), kvp['value'].encode('utf-8'))) + f.write(data) + + # set the time ways back to make it look like + # we had an old kvp file + os.utime(self.tmp_file_path, (1000000, 1000000)) + + # reset this because the unit test framework + # has already polluted the class variable + HyperVKvpReportingHandler._already_truncated_pool_file = False + + reporter = HyperVKvpReportingHandler(kvp_file_path=self.tmp_file_path) + kvps = list(reporter._iterate_kvps(0)) + self.assertEqual(0, len(kvps)) -- cgit v1.2.3 From ab6621d849b24bb652243e88c79f6f3b446048d7 Mon Sep 17 00:00:00 2001 From: Anh Vo Date: Wed, 8 May 2019 14:54:03 +0000 Subject: DataSourceAzure: Adjust timeout for polling IMDS If the IMDS primary server is not available, falling back to the secondary server takes about 1s. The net result is that the expected E2E time is slightly more than 1s. This change increases the timeout to 2s to prevent the infinite loop of timeouts. --- cloudinit/sources/DataSourceAzure.py | 15 ++++++++++----- tests/unittests/test_datasource/test_azure.py | 10 +++++++--- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 64165259..b7440c1d 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -57,7 +57,12 @@ AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready" AGENT_SEED_DIR = '/var/lib/waagent' + +# In the event where the IMDS primary server is not +# available, it takes 1s to fallback to the secondary one +IMDS_TIMEOUT_IN_SECONDS = 2 IMDS_URL = "http://169.254.169.254/metadata/" + PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0" # List of static scripts and network config artifacts created by @@ -582,9 +587,9 @@ class DataSourceAzure(sources.DataSource): return self._ephemeral_dhcp_ctx.clean_network() else: - return readurl(url, timeout=1, headers=headers, - exception_cb=exc_cb, infinite=True, - log_req_resp=False).contents + return readurl(url, timeout=IMDS_TIMEOUT_IN_SECONDS, + headers=headers, exception_cb=exc_cb, + infinite=True, log_req_resp=False).contents except UrlError: # Teardown our EphemeralDHCPv4 context on failure as we retry self._ephemeral_dhcp_ctx.clean_network() @@ -1291,8 +1296,8 @@ def _get_metadata_from_imds(retries): headers = {"Metadata": "true"} try: response = readurl( - url, timeout=1, headers=headers, retries=retries, - exception_cb=retry_on_url_exc) + url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers, + retries=retries, exception_cb=retry_on_url_exc) except Exception as e: LOG.debug('Ignoring IMDS instance metadata: %s', e) return {} diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index ab77c034..427ab7e7 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -163,7 +163,8 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): m_readurl.assert_called_with( self.network_md_url, exception_cb=mock.ANY, - headers={'Metadata': 'true'}, retries=2, timeout=1) + headers={'Metadata': 'true'}, retries=2, + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS) @mock.patch('cloudinit.url_helper.time.sleep') @mock.patch(MOCKPATH + 'net.is_up') @@ -1791,7 +1792,8 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): headers={'Metadata': 'true', 'User-Agent': 'Cloud-Init/%s' % vs() - }, method='GET', timeout=1, + }, method='GET', + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, url=full_url)]) self.assertEqual(m_dhcp.call_count, 2) m_net.assert_any_call( @@ -1828,7 +1830,9 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): headers={'Metadata': 'true', 'User-Agent': 'Cloud-Init/%s' % vs()}, - method='GET', timeout=1, url=full_url)]) + method='GET', + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, + url=full_url)]) self.assertEqual(m_dhcp.call_count, 2) m_net.assert_any_call( broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', -- cgit v1.2.3 From 9aa97cfc73b31dc548a240e5f4bd1ef41861cc4d Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 8 May 2019 15:14:31 +0000 Subject: replace remaining occurrences of LOG.warn --- cloudinit/sources/DataSourceCloudStack.py | 2 +- cloudinit/sources/DataSourceEc2.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index d4b758f2..f185dc71 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -95,7 +95,7 @@ class DataSourceCloudStack(sources.DataSource): start_time = time.time() url = uhelp.wait_for_url( urls=urls, max_wait=url_params.max_wait_seconds, - timeout=url_params.timeout_seconds, status_cb=LOG.warn) + timeout=url_params.timeout_seconds, status_cb=LOG.warning) if url: LOG.debug("Using metadata source: '%s'", url) diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index ac28f1db..5c017bfb 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -208,7 +208,7 @@ class DataSourceEc2(sources.DataSource): start_time = time.time() url = uhelp.wait_for_url( urls=urls, max_wait=url_params.max_wait_seconds, - timeout=url_params.timeout_seconds, status_cb=LOG.warn) + timeout=url_params.timeout_seconds, status_cb=LOG.warning) if url: self.metadata_address = url2base[url] -- cgit v1.2.3 From acc25d8d7d603313059ac35b4253b504efc560a9 Mon Sep 17 00:00:00 2001 From: "Jason Zions (MSFT)" Date: Wed, 8 May 2019 22:47:07 +0000 Subject: cc_mounts: check if mount -a on no-change fstab path Under some circumstances, cc_disk_setup may reformat volumes which already appear in /etc/fstab (e.g. Azure ephemeral drive is reformatted from NTFS to ext4 after service-heal). Normally, cc_mounts only calls mount -a if it altered /etc/fstab. With this change cc_mounts will read /proc/mounts and verify if configured mounts are already mounted and if not raise flag to request a mount -a. This handles the case where no changes to fstab occur but a mount -a is required due to change in underlying device which prevented the .mount unit from running until after disk was reformatted. LP: #1825596 --- cloudinit/config/cc_mounts.py | 11 ++++++++ .../unittests/test_handler/test_handler_mounts.py | 30 +++++++++++++++++++++- 2 files changed, 40 insertions(+), 1 deletion(-) diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index 339baba9..123ffb84 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -439,6 +439,7 @@ def handle(_name, cfg, cloud, log, _args): cc_lines = [] needswap = False + need_mount_all = False dirs = [] for line in actlist: # write 'comment' in the fs_mntops, entry, claiming this @@ -449,11 +450,18 @@ def handle(_name, cfg, cloud, log, _args): dirs.append(line[1]) cc_lines.append('\t'.join(line)) + mount_points = [v['mountpoint'] for k, v in util.mounts().items() + if 'mountpoint' in v] for d in dirs: try: util.ensure_dir(d) except Exception: util.logexc(log, "Failed to make '%s' config-mount", d) + # dirs is list of directories on which a volume should be mounted. + # If any of them does not already show up in the list of current + # mount points, we will definitely need to do mount -a. + if not need_mount_all and d not in mount_points: + need_mount_all = True sadds = [WS.sub(" ", n) for n in cc_lines] sdrops = [WS.sub(" ", n) for n in fstab_removed] @@ -473,6 +481,9 @@ def handle(_name, cfg, cloud, log, _args): log.debug("No changes to /etc/fstab made.") else: log.debug("Changes to fstab: %s", sops) + need_mount_all = True + + if need_mount_all: activate_cmds.append(["mount", "-a"]) if uses_systemd: activate_cmds.append(["systemctl", "daemon-reload"]) diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/test_handler/test_handler_mounts.py index 8fea6c2a..0fb160be 100644 --- a/tests/unittests/test_handler/test_handler_mounts.py +++ b/tests/unittests/test_handler/test_handler_mounts.py @@ -154,7 +154,15 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase): return_value=True) self.add_patch('cloudinit.config.cc_mounts.util.subp', - 'mock_util_subp') + 'm_util_subp') + + self.add_patch('cloudinit.config.cc_mounts.util.mounts', + 'mock_util_mounts', + return_value={ + '/dev/sda1': {'fstype': 'ext4', + 'mountpoint': '/', + 'opts': 'rw,relatime,discard' + }}) self.mock_cloud = mock.Mock() self.mock_log = mock.Mock() @@ -230,4 +238,24 @@ class TestFstabHandling(test_helpers.FilesystemMockingTestCase): fstab_new_content = fd.read() self.assertEqual(fstab_expected_content, fstab_new_content) + def test_no_change_fstab_sets_needs_mount_all(self): + '''verify unchanged fstab entries are mounted if not call mount -a''' + fstab_original_content = ( + 'LABEL=cloudimg-rootfs / ext4 defaults 0 0\n' + 'LABEL=UEFI /boot/efi vfat defaults 0 0\n' + '/dev/vdb /mnt auto defaults,noexec,comment=cloudconfig 0 2\n' + ) + fstab_expected_content = fstab_original_content + cc = {'mounts': [ + ['/dev/vdb', '/mnt', 'auto', 'defaults,noexec']]} + with open(cc_mounts.FSTAB_PATH, 'w') as fd: + fd.write(fstab_original_content) + with open(cc_mounts.FSTAB_PATH, 'r') as fd: + fstab_new_content = fd.read() + self.assertEqual(fstab_expected_content, fstab_new_content) + cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, []) + self.m_util_subp.assert_has_calls([ + mock.call(['mount', '-a']), + mock.call(['systemctl', 'daemon-reload'])]) + # vi: ts=4 expandtab -- cgit v1.2.3 From ce5fe3a20e86c4745d0310bb9c344d1344d9684c Mon Sep 17 00:00:00 2001 From: Paride Legovini Date: Thu, 9 May 2019 18:05:25 +0000 Subject: tests: add Eoan release --- tests/cloud_tests/releases.yaml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml index ec5da724..924ad956 100644 --- a/tests/cloud_tests/releases.yaml +++ b/tests/cloud_tests/releases.yaml @@ -129,6 +129,22 @@ features: releases: # UBUNTU ================================================================= + eoan: + # EOL: Jul 2020 + default: + enabled: true + release: eoan + version: 19.10 + os: ubuntu + feature_groups: + - base + - debian_base + - ubuntu_specific + lxd: + sstreams_server: https://cloud-images.ubuntu.com/daily + alias: eoan + setup_overrides: null + override_templates: false disco: # EOL: Jan 2020 default: -- cgit v1.2.3 From 7193b80e4ade638880bd66b1f208c049ffa24479 Mon Sep 17 00:00:00 2001 From: Gonéri Le Bouder Date: Thu, 9 May 2019 18:35:17 +0000 Subject: freebsd: add chpasswd pkg in the image cc_set_passwords.py depends on chpasswd binary. --- tools/build-on-freebsd | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/tools/build-on-freebsd b/tools/build-on-freebsd index d23fde2b..dc3b9747 100755 --- a/tools/build-on-freebsd +++ b/tools/build-on-freebsd @@ -9,6 +9,7 @@ fail() { echo "FAILED:" "$@" 1>&2; exit 1; } depschecked=/tmp/c-i.dependencieschecked pkgs=" bash + chpasswd dmidecode e2fsprogs py27-Jinja2 @@ -17,6 +18,7 @@ pkgs=" py27-configobj py27-jsonpatch py27-jsonpointer + py27-jsonschema py27-oauthlib py27-requests py27-serial @@ -28,12 +30,9 @@ pkgs=" [ -f "$depschecked" ] || pkg install ${pkgs} || fail "install packages" touch $depschecked -# Required but unavailable port/pkg: py27-jsonpatch py27-jsonpointer -# Luckily, the install step will take care of this by installing it from pypi... - # Build the code and install in /usr/local/: -python setup.py build -python setup.py install -O1 --skip-build --prefix /usr/local/ --init-system sysvinit_freebsd +python2.7 setup.py build +python2.7 setup.py install -O1 --skip-build --prefix /usr/local/ --init-system sysvinit_freebsd # Enable cloud-init in /etc/rc.conf: sed -i.bak -e "/cloudinit_enable=.*/d" /etc/rc.conf -- cgit v1.2.3 From acd84e22b3ebe639d05df3357cde98d9b1e5de91 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 9 May 2019 16:18:32 -0600 Subject: release 19.1 Bump the version on cloudinit/version.py to be 19.1 and update ChangeLog LP: #1828479 --- ChangeLog | 117 +++++++++++++++++++++++++++++++++++++++++++++++++++ cloudinit/version.py | 2 +- 2 files changed, 118 insertions(+), 1 deletion(-) diff --git a/ChangeLog b/ChangeLog index 8fa6fdd4..bf48fd48 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,120 @@ +19.1: + - freebsd: add chpasswd pkg in the image [Gonéri Le Bouder] + - tests: add Eoan release [Paride Legovini] + - cc_mounts: check if mount -a on no-change fstab path + [Jason Zions (MSFT)] (LP: #1825596) + - replace remaining occurrences of LOG.warn [Daniel Watkins] + - DataSourceAzure: Adjust timeout for polling IMDS [Anh Vo] + - Azure: Changes to the Hyper-V KVP Reporter [Anh Vo] + - git tests: no longer show warning about safe yaml. + - tools/read-version: handle errors [Chad Miller] + - net/sysconfig: only indicate available on known sysconfig distros + (LP: #1819994) + - packages: update rpm specs for new bash completion path + [Daniel Watkins] (LP: #1825444) + - test_azure: mock util.SeLinuxGuard where needed + [Jason Zions (MSFT)] (LP: #1825253) + - setup.py: install bash completion script in new location [Daniel Watkins] + - mount_cb: do not pass sync and rw options to mount + [Gonéri Le Bouder] (LP: #1645824) + - cc_apt_configure: fix typo in apt documentation [Dominic Schlegel] + - Revert "DataSource: move update_events from a class to an instance..." + [Daniel Watkins] + - Change DataSourceNoCloud to ignore file system label's case. + [Risto Oikarinen] + - cmd:main.py: Fix missing 'modules-init' key in modes dict + [Antonio Romito] (LP: #1815109) + - ubuntu_advantage: rewrite cloud-config module + - Azure: Treat _unset network configuration as if it were absent + [Jason Zions (MSFT)] (LP: #1823084) + - DatasourceAzure: add additional logging for azure datasource [Anh Vo] + - cloud_tests: fix apt_pipelining test-cases + - Azure: Ensure platform random_seed is always serializable as JSON. + [Jason Zions (MSFT)] + - net/sysconfig: write out SUSE-compatible IPv6 config [Robert Schweikert] + - tox: Update testenv for openSUSE Leap to 15.0 [Thomas Bechtold] + - net: Fix ipv6 static routes when using eni renderer + [Raphael Glon] (LP: #1818669) + - Add ubuntu_drivers config module [Daniel Watkins] + - doc: Refresh Azure walinuxagent docs [Daniel Watkins] + - tox: bump pylint version to latest (2.3.1) [Daniel Watkins] + - DataSource: move update_events from a class to an instance attribute + [Daniel Watkins] (LP: #1819913) + - net/sysconfig: Handle default route setup for dhcp configured NICs + [Robert Schweikert] (LP: #1812117) + - DataSourceEc2: update RELEASE_BLOCKER to be more accurate + [Daniel Watkins] + - cloud-init-per: POSIX sh does not support string subst, use sed + (LP: #1819222) + - Support locking user with usermod if passwd is not available. + - Example for Microsoft Azure data disk added. [Anton Olifir] + - clean: correctly determine the path for excluding seed directory + [Daniel Watkins] (LP: #1818571) + - helpers/openstack: Treat unknown link types as physical + [Daniel Watkins] (LP: #1639263) + - drop Python 2.6 support and our NIH version detection [Daniel Watkins] + - tip-pylint: Fix assignment-from-return-none errors + - net: append type:dhcp[46] only if dhcp[46] is True in v2 netconfig + [Kurt Stieger] (LP: #1818032) + - cc_apt_pipelining: stop disabling pipelining by default + [Daniel Watkins] (LP: #1794982) + - tests: fix some slow tests and some leaking state [Daniel Watkins] + - util: don't determine string_types ourselves [Daniel Watkins] + - cc_rsyslog: Escape possible nested set [Daniel Watkins] (LP: #1816967) + - Enable encrypted_data_bag_secret support for Chef + [Eric Williams] (LP: #1817082) + - azure: Filter list of ssh keys pulled from fabric [Jason Zions (MSFT)] + - doc: update merging doc with fixes and some additional details/examples + - tests: integration test failure summary to use traceback if empty error + - This is to fix https://bugs.launchpad.net/cloud-init/+bug/1812676 + [Vitaly Kuznetsov] + - EC2: Rewrite network config on AWS Classic instances every boot + [Guilherme G. Piccoli] (LP: #1802073) + - netinfo: Adjust ifconfig output parsing for FreeBSD ipv6 entries + (LP: #1779672) + - netplan: Don't render yaml aliases when dumping netplan (LP: #1815051) + - add PyCharm IDE .idea/ path to .gitignore [Dominic Schlegel] + - correct grammar issue in instance metadata documentation + [Dominic Schlegel] (LP: #1802188) + - clean: cloud-init clean should not trace when run from within cloud_dir + (LP: #1795508) + - Resolve flake8 comparison and pycodestyle over-ident issues + [Paride Legovini] + - opennebula: also exclude epochseconds from changed environment vars + (LP: #1813641) + - systemd: Render generator from template to account for system + differences. [Robert Schweikert] + - sysconfig: On SUSE, use STARTMODE instead of ONBOOT + [Robert Schweikert] (LP: #1799540) + - flake8: use ==/!= to compare str, bytes, and int literals + [Paride Legovini] + - opennebula: exclude EPOCHREALTIME as known bash env variable with a + delta (LP: #1813383) + - tox: fix disco httpretty dependencies for py37 (LP: #1813361) + - run-container: uncomment baseurl in yum.repos.d/*.repo when using a + proxy [Paride Legovini] + - lxd: install zfs-linux instead of zfs meta package + [Johnson Shi] (LP: #1799779) + - net/sysconfig: do not write a resolv.conf file with only the header. + [Robert Schweikert] + - net: Make sysconfig renderer compatible with Network Manager. + [Eduardo Otubo] + - cc_set_passwords: Fix regex when parsing hashed passwords + [Marlin Cremers] (LP: #1811446) + - net: Wait for dhclient to daemonize before reading lease file + [Jason Zions] (LP: #1794399) + - [Azure] Increase retries when talking to Wireserver during metadata walk + [Jason Zions] + - Add documentation on adding a datasource. + - doc: clean up some datasource documentation. + - ds-identify: fix wrong variable name in ovf_vmware_transport_guestinfo. + - Scaleway: Support ssh keys provided inside an instance tag. [PORTE Loïc] + - OVF: simplify expected return values of transport functions. + - Vmware: Add support for the com.vmware.guestInfo OVF transport. + (LP: #1807466) + - HACKING.rst: change contact info to Josh Powers + - Update to pylint 2.2.2. + 18.5: - tests: add Disco release [Joshua Powers] - net: render 'metric' values in per-subnet routes (LP: #1805871) diff --git a/cloudinit/version.py b/cloudinit/version.py index a2c5d43a..ddcd4368 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -__VERSION__ = "18.5" +__VERSION__ = "19.1" _PACKAGED_VERSION = '@@PACKAGED_VERSION@@' FEATURES = [ -- cgit v1.2.3 From baa478546d8cac98a706010699d64f8c2f70b5bf Mon Sep 17 00:00:00 2001 From: "Jason Zions (MSFT)" Date: Fri, 10 May 2019 18:38:55 +0000 Subject: Azure: Return static fallback address as if failed to find endpoint The Azure data source helper attempts to use information in the dhcp lease to find the Wireserver endpoint (IP address). Under some unusual circumstances, those attempts will fail. This change uses a static address, known to be always correct in the Azure public and sovereign clouds, when the helper fails to locate a valid dhcp lease. This address is not guaranteed to be correct in Azure Stack environments; it's still best to use the information from the lease whenever possible. --- cloudinit/sources/helpers/azure.py | 14 +++++++++++--- tests/unittests/test_datasource/test_azure_helper.py | 9 +++++++-- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index d3af05ee..82c4c8c4 100755 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -20,6 +20,9 @@ from cloudinit.reporting import events LOG = logging.getLogger(__name__) +# This endpoint matches the format as found in dhcp lease files, since this +# value is applied if the endpoint can't be found within a lease file +DEFAULT_WIRESERVER_ENDPOINT = "a8:3f:81:10" azure_ds_reporter = events.ReportEventStack( name="azure-ds", @@ -297,7 +300,12 @@ class WALinuxAgentShim(object): @azure_ds_telemetry_reporter def _get_value_from_leases_file(fallback_lease_file): leases = [] - content = util.load_file(fallback_lease_file) + try: + content = util.load_file(fallback_lease_file) + except IOError as ex: + LOG.error("Failed to read %s: %s", fallback_lease_file, ex) + return None + LOG.debug("content is %s", content) option_name = _get_dhcp_endpoint_option_name() for line in content.splitlines(): @@ -372,9 +380,9 @@ class WALinuxAgentShim(object): fallback_lease_file) value = WALinuxAgentShim._get_value_from_leases_file( fallback_lease_file) - if value is None: - raise ValueError('No endpoint found.') + LOG.warning("No lease found; using default endpoint") + value = DEFAULT_WIRESERVER_ENDPOINT endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value) LOG.debug('Azure endpoint found at %s', endpoint_ip_address) diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index 02556165..bd006aba 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -67,12 +67,17 @@ class TestFindEndpoint(CiTestCase): self.networkd_leases.return_value = None def test_missing_file(self): - self.assertRaises(ValueError, wa_shim.find_endpoint) + """wa_shim find_endpoint uses default endpoint if leasefile not found + """ + self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16") def test_missing_special_azure_line(self): + """wa_shim find_endpoint uses default endpoint if leasefile is found + but does not contain DHCP Option 245 (whose value is the endpoint) + """ self.load_file.return_value = '' self.dhcp_options.return_value = {'eth0': {'key': 'value'}} - self.assertRaises(ValueError, wa_shim.find_endpoint) + self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16") @staticmethod def _build_lease_content(encoded_address): -- cgit v1.2.3 From 0f8695323262e41c699588c7cd140f6b58c62017 Mon Sep 17 00:00:00 2001 From: Gonéri Le Bouder Date: Fri, 24 May 2019 21:39:19 +0000 Subject: freebsd: NoCloud data source support blkid is a Linux-only command. With this patch, cloud-init uses another approach to find the data source on FreeBSD. LP: #1645824 --- cloudinit/sources/DataSourceNoCloud.py | 40 ++++++++++++++----------- config/cloud.cfg.tmpl | 4 +-- tests/unittests/test_datasource/test_nocloud.py | 18 +++++++++++ 3 files changed, 43 insertions(+), 19 deletions(-) diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index fcf5d589..8a9e5dd2 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -35,6 +35,26 @@ class DataSourceNoCloud(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode) + def _get_devices(self, label): + if util.is_FreeBSD(): + devlist = [ + p for p in ['/dev/msdosfs/' + label, '/dev/iso9660/' + label] + if os.path.exists(p)] + else: + # Query optical drive to get it in blkid cache for 2.6 kernels + util.find_devs_with(path="/dev/sr0") + util.find_devs_with(path="/dev/sr1") + + fslist = util.find_devs_with("TYPE=vfat") + fslist.extend(util.find_devs_with("TYPE=iso9660")) + + label_list = util.find_devs_with("LABEL=%s" % label.upper()) + label_list.extend(util.find_devs_with("LABEL=%s" % label.lower())) + + devlist = list(set(fslist) & set(label_list)) + devlist.sort(reverse=True) + return devlist + def _get_data(self): defaults = { "instance-id": "nocloud", @@ -99,20 +119,7 @@ class DataSourceNoCloud(sources.DataSource): label = self.ds_cfg.get('fs_label', "cidata") if label is not None: - # Query optical drive to get it in blkid cache for 2.6 kernels - util.find_devs_with(path="/dev/sr0") - util.find_devs_with(path="/dev/sr1") - - fslist = util.find_devs_with("TYPE=vfat") - fslist.extend(util.find_devs_with("TYPE=iso9660")) - - label_list = util.find_devs_with("LABEL=%s" % label.upper()) - label_list.extend(util.find_devs_with("LABEL=%s" % label.lower())) - - devlist = list(set(fslist) & set(label_list)) - devlist.sort(reverse=True) - - for dev in devlist: + for dev in self._get_devices(label): try: LOG.debug("Attempting to use data from %s", dev) @@ -120,9 +127,8 @@ class DataSourceNoCloud(sources.DataSource): seeded = util.mount_cb(dev, _pp2d_callback, pp2d_kwargs) except ValueError: - if dev in label_list: - LOG.warning("device %s with label=%s not a" - "valid seed.", dev, label) + LOG.warning("device %s with label=%s not a" + "valid seed.", dev, label) continue mydata = _merge_new_seed(mydata, seeded) diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 25db43e0..684c7473 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -32,8 +32,8 @@ preserve_hostname: false {% if variant in ["freebsd"] %} # This should not be required, but leave it in place until the real cause of -# not beeing able to find -any- datasources is resolved. -datasource_list: ['ConfigDrive', 'Azure', 'OpenStack', 'Ec2'] +# not finding -any- datasources is resolved. +datasource_list: ['NoCloud', 'ConfigDrive', 'Azure', 'OpenStack', 'Ec2'] {% endif %} # Example datasource config # datasource: diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index b785362f..18bea0b9 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -278,6 +278,24 @@ class TestNoCloudDataSource(CiTestCase): self.assertEqual(netconf, dsrc.network_config) self.assertNotIn(gateway, str(dsrc.network_config)) + @mock.patch("cloudinit.util.blkid") + def test_nocloud_get_devices_freebsd(self, m_is_lxd, fake_blkid): + populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': b"ud", 'meta-data': "instance-id: IID\n"}) + + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + + self.mocks.enter_context( + mock.patch.object(util, 'is_FreeBSD', return_value=True)) + + self.mocks.enter_context( + mock.patch.object(os.path, 'exists', return_value=True)) + + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc._get_devices('foo') + self.assertEqual(['/dev/msdosfs/foo', '/dev/iso9660/foo'], ret) + fake_blkid.assert_not_called() + class TestParseCommandLineData(CiTestCase): -- cgit v1.2.3 From 6197c347c3960254dbcdb28eb73989d062ad9689 Mon Sep 17 00:00:00 2001 From: Gonéri Le Bouder Date: Tue, 28 May 2019 15:39:48 +0000 Subject: freebsd: ability to grow root file system - UFS file system support - GPT partition table support - add support for newfs's -L parameter (label) - move freebsd specific test from Azure to freebsd --- cloudinit/config/cc_growpart.py | 3 +- cloudinit/config/cc_resizefs.py | 6 +-- cloudinit/util.py | 22 ++++++----- tests/unittests/test_datasource/test_azure.py | 24 ------------ tests/unittests/test_distros/test_freebsd.py | 45 ++++++++++++++++++++++ .../test_handler/test_handler_resizefs.py | 2 +- 6 files changed, 64 insertions(+), 38 deletions(-) create mode 100644 tests/unittests/test_distros/test_freebsd.py diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index bafca9d8..564f376f 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -215,7 +215,8 @@ def device_part_info(devpath): # FreeBSD doesn't know of sysfs so just get everything we need from # the device, like /dev/vtbd0p2. if util.is_FreeBSD(): - m = re.search('^(/dev/.+)p([0-9])$', devpath) + freebsd_part = "/dev/" + util.find_freebsd_part(devpath) + m = re.search('^(/dev/.+)p([0-9])$', freebsd_part) return (m.group(1), m.group(2)) if not os.path.exists(syspath): diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 076b9d5a..afd2e060 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -81,7 +81,7 @@ def _resize_xfs(mount_point, devpth): def _resize_ufs(mount_point, devpth): - return ('growfs', '-y', devpth) + return ('growfs', '-y', mount_point) def _resize_zfs(mount_point, devpth): @@ -101,7 +101,7 @@ def _can_skip_resize_ufs(mount_point, devpth): """ # dumpfs -m / # newfs command for / (/dev/label/rootfs) - newfs -O 2 -U -a 4 -b 32768 -d 32768 -e 4096 -f 4096 -g 16384 + newfs -L rootf -O 2 -U -a 4 -b 32768 -d 32768 -e 4096 -f 4096 -g 16384 -h 64 -i 8192 -j -k 6408 -m 8 -o time -s 58719232 /dev/label/rootf """ cur_fs_sz = None @@ -110,7 +110,7 @@ def _can_skip_resize_ufs(mount_point, devpth): for line in dumpfs_res.splitlines(): if not line.startswith('#'): newfs_cmd = shlex.split(line) - opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:' + opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:L:' optlist, _args = getopt.getopt(newfs_cmd[1:], opt_value) for o, a in optlist: if o == "-s": diff --git a/cloudinit/util.py b/cloudinit/util.py index ea4199cd..aa23b3f3 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2337,17 +2337,21 @@ def parse_mtab(path): return None -def find_freebsd_part(label_part): - if label_part.startswith("/dev/label/"): - target_label = label_part[5:] - (label_part, _err) = subp(['glabel', 'status', '-s']) - for labels in label_part.split("\n"): +def find_freebsd_part(fs): + splitted = fs.split('/') + if len(splitted) == 3: + return splitted[2] + elif splitted[2] in ['label', 'gpt', 'ufs']: + target_label = fs[5:] + (part, _err) = subp(['glabel', 'status', '-s']) + for labels in part.split("\n"): items = labels.split() - if len(items) > 0 and items[0].startswith(target_label): - label_part = items[2] + if len(items) > 0 and items[0] == target_label: + part = items[2] break - label_part = str(label_part) - return label_part + return str(part) + else: + LOG.warning("Unexpected input in find_freebsd_part: %s", fs) def get_path_dev_freebsd(path, mnt_list): diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 427ab7e7..afb614e4 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -6,7 +6,6 @@ from cloudinit import url_helper from cloudinit.sources import ( UNSET, DataSourceAzure as dsaz, InvalidMetaDataException) from cloudinit.util import (b64e, decode_binary, load_file, write_file, - find_freebsd_part, get_path_dev_freebsd, MountFailedError, json_dumps, load_json) from cloudinit.version import version_string as vs from cloudinit.tests.helpers import ( @@ -391,29 +390,6 @@ scbus-1 on xpt0 bus 0 dev = ds.get_resource_disk_on_freebsd(1) self.assertEqual("da1", dev) - @mock.patch('cloudinit.util.subp') - def test_find_freebsd_part_on_Azure(self, mock_subp): - glabel_out = ''' -gptid/fa52d426-c337-11e6-8911-00155d4c5e47 N/A da0p1 - label/rootfs N/A da0p2 - label/swap N/A da0p3 -''' - mock_subp.return_value = (glabel_out, "") - res = find_freebsd_part("/dev/label/rootfs") - self.assertEqual("da0p2", res) - - def test_get_path_dev_freebsd_on_Azure(self): - mnt_list = ''' -/dev/label/rootfs / ufs rw 1 1 -devfs /dev devfs rw,multilabel 0 0 -fdescfs /dev/fd fdescfs rw 0 0 -/dev/da1s1 /mnt/resource ufs rw 2 2 -''' - with mock.patch.object(os.path, 'exists', - return_value=True): - res = get_path_dev_freebsd('/etc', mnt_list) - self.assertIsNotNone(res) - @mock.patch(MOCKPATH + '_is_platform_viable') def test_call_is_platform_viable_seed(self, m_is_platform_viable): """Check seed_dir using _is_platform_viable and return False.""" diff --git a/tests/unittests/test_distros/test_freebsd.py b/tests/unittests/test_distros/test_freebsd.py new file mode 100644 index 00000000..8af253a2 --- /dev/null +++ b/tests/unittests/test_distros/test_freebsd.py @@ -0,0 +1,45 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.util import (find_freebsd_part, get_path_dev_freebsd) +from cloudinit.tests.helpers import (CiTestCase, mock) + +import os + + +class TestDeviceLookUp(CiTestCase): + + @mock.patch('cloudinit.util.subp') + def test_find_freebsd_part_label(self, mock_subp): + glabel_out = ''' +gptid/fa52d426-c337-11e6-8911-00155d4c5e47 N/A da0p1 + label/rootfs N/A da0p2 + label/swap N/A da0p3 +''' + mock_subp.return_value = (glabel_out, "") + res = find_freebsd_part("/dev/label/rootfs") + self.assertEqual("da0p2", res) + + @mock.patch('cloudinit.util.subp') + def test_find_freebsd_part_gpt(self, mock_subp): + glabel_out = ''' + gpt/bootfs N/A vtbd0p1 +gptid/3f4cbe26-75da-11e8-a8f2-002590ec6166 N/A vtbd0p1 + gpt/swapfs N/A vtbd0p2 + gpt/rootfs N/A vtbd0p3 + iso9660/cidata N/A vtbd2 +''' + mock_subp.return_value = (glabel_out, "") + res = find_freebsd_part("/dev/gpt/rootfs") + self.assertEqual("vtbd0p3", res) + + def test_get_path_dev_freebsd_label(self): + mnt_list = ''' +/dev/label/rootfs / ufs rw 1 1 +devfs /dev devfs rw,multilabel 0 0 +fdescfs /dev/fd fdescfs rw 0 0 +/dev/da1s1 /mnt/resource ufs rw 2 2 +''' + with mock.patch.object(os.path, 'exists', + return_value=True): + res = get_path_dev_freebsd('/etc', mnt_list) + self.assertIsNotNone(res) diff --git a/tests/unittests/test_handler/test_handler_resizefs.py b/tests/unittests/test_handler/test_handler_resizefs.py index 35187847..db9a0414 100644 --- a/tests/unittests/test_handler/test_handler_resizefs.py +++ b/tests/unittests/test_handler/test_handler_resizefs.py @@ -147,7 +147,7 @@ class TestResizefs(CiTestCase): def test_resize_ufs_cmd_return(self): mount_point = '/' devpth = '/dev/sda2' - self.assertEqual(('growfs', '-y', devpth), + self.assertEqual(('growfs', '-y', mount_point), _resize_ufs(mount_point, devpth)) @mock.patch('cloudinit.util.is_container', return_value=False) -- cgit v1.2.3 From c951963ffa94145be1bd5f1cef7dba6007f75bd2 Mon Sep 17 00:00:00 2001 From: Gonéri Le Bouder Date: Tue, 28 May 2019 16:23:30 +0000 Subject: freebsd: fix the name of cloudcfg VARIANT config/cloud.cfg.tmpl uses 'freebsd', not 'bsd' to identify FreeBSD. --- tools/render-cloudcfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg index 8b7cb875..0957c324 100755 --- a/tools/render-cloudcfg +++ b/tools/render-cloudcfg @@ -4,7 +4,7 @@ import argparse import os import sys -VARIANTS = ["bsd", "centos", "fedora", "rhel", "suse", "ubuntu", "unknown"] +VARIANTS = ["freebsd", "centos", "fedora", "rhel", "suse", "ubuntu", "unknown"] if "avoid-pep8-E402-import-not-top-of-file": _tdir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) -- cgit v1.2.3 From ded1ec81e3c6c37c5241b12fcc3c41182e675dff Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Wed, 29 May 2019 04:59:43 +0000 Subject: netplan: update netplan key mappings for gratuitous-arp Previous versions of netplan included a misspelling for the bond parameter around gratuitous-arp. This has been fixed and released and cloud-init needs to accept both values. This branch fixes the key that will be rendered and transforms the previous misspelling when capturing network_state. LP: #1827238 --- cloudinit/net/network_state.py | 8 ++++++++ tests/unittests/test_net.py | 46 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 4d19f562..3702130a 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -707,6 +707,14 @@ class NetworkStateInterpreter(object): item_params = dict((key, value) for (key, value) in item_cfg.items() if key not in NETWORK_V2_KEY_FILTER) + # we accept the fixed spelling, but write the old for compatability + # Xenial does not have an updated netplan which supports the + # correct spelling. LP: #1756701 + params = item_params['parameters'] + grat_value = params.pop('gratuitous-arp', None) + if grat_value: + params['gratuitious-arp'] = grat_value + v1_cmd = { 'type': cmd_type, 'name': item_name, diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index e85e9640..b936bc9c 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -407,6 +407,37 @@ network: - maas """ +NETPLAN_BOND_GRAT_ARP = """ +network: + bonds: + bond0: + interfaces: + - ens3 + macaddress: 68:05:ca:64:d3:6c + mtu: 9000 + parameters: + gratuitious-arp: 1 + bond1: + interfaces: + - ens4 + macaddress: 68:05:ca:64:d3:6d + mtu: 9000 + parameters: + gratuitous-arp: 2 + ethernets: + ens3: + dhcp4: false + dhcp6: false + match: + macaddress: 52:54:00:ab:cd:ef + ens4: + dhcp4: false + dhcp6: false + match: + macaddress: 52:54:00:11:22:ff + version: 2 +""" + NETPLAN_DHCP_FALSE = """ version: 2 ethernets: @@ -3589,6 +3620,21 @@ class TestNetplanRoundTrip(CiTestCase): entry['expected_netplan'].splitlines(), files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + def test_render_output_supports_both_grat_arp_spelling(self): + entry = { + 'yaml': NETPLAN_BOND_GRAT_ARP, + 'expected_netplan': NETPLAN_BOND_GRAT_ARP.replace('gratuitous', + 'gratuitious'), + } + network_config = yaml.load(entry['yaml']).get('network') + files = self._render_and_read(network_config=network_config) + print(entry['expected_netplan']) + print('-- expected ^ | v rendered --') + print(files['/etc/netplan/50-cloud-init.yaml']) + self.assertEqual( + entry['expected_netplan'].splitlines(), + files['/etc/netplan/50-cloud-init.yaml'].splitlines()) + class TestEniRoundTrip(CiTestCase): -- cgit v1.2.3 From 19ddb1fb11434f860daee2238cdc23a56b9dc86f Mon Sep 17 00:00:00 2001 From: Paride Legovini Date: Thu, 30 May 2019 16:39:17 +0000 Subject: run-container: centos: comment out the repo mirrorlist In this way only the 'baseurl' mirror is used, which is easier to allow through firewalls and proxies. --- tools/run-container | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/run-container b/tools/run-container index 852f4d1e..1d24e15b 100755 --- a/tools/run-container +++ b/tools/run-container @@ -373,7 +373,7 @@ wait_for_boot() { inside "$name" sh -c "echo proxy=$http_proxy >> /etc/yum.conf" inside "$name" sed -i s/enabled=1/enabled=0/ \ /etc/yum/pluginconf.d/fastestmirror.conf - inside "$name" sh -c "sed -i '/^#baseurl=/s/#//' /etc/yum.repos.d/*.repo" + inside "$name" sh -c "sed -i '/^#baseurl=/s/#// ; s/^mirrorlist/#mirrorlist/' /etc/yum.repos.d/*.repo" else debug 1 "do not know how to configure proxy on $OS_NAME" fi -- cgit v1.2.3 From c3cd42cc655035209329b78b09b3cfb8fc01cf7d Mon Sep 17 00:00:00 2001 From: Brian Murray Date: Fri, 31 May 2019 19:40:07 +0000 Subject: Fix spelling error making 'an Ubuntu' consistent. --- cloudinit/config/cc_ubuntu_advantage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py index f4881233..f846e9a5 100644 --- a/cloudinit/config/cc_ubuntu_advantage.py +++ b/cloudinit/config/cc_ubuntu_advantage.py @@ -36,7 +36,7 @@ schema = { """), 'distros': distros, 'examples': [dedent("""\ - # Attach the machine to a Ubuntu Advantage support contract with a + # Attach the machine to an Ubuntu Advantage support contract with a # UA contract token obtained from %s. ubuntu_advantage: token: -- cgit v1.2.3 From deaeb714a3582ff7f31e411bcdaf9669903e35f0 Mon Sep 17 00:00:00 2001 From: "Mark T. Voelker" Date: Mon, 3 Jun 2019 15:37:42 +0000 Subject: Allow identification of OpenStack by Asset Tag When OpenStack is deployed on some hypervisors (such as VMware vSphere), cloud-init doesn't detect that it needs to probe the metadata service because the DMI product name field can't be set to a field that is recognized by cloud-init. However, the asset tag field can be set via flavor extra specs or image metadata. A similar approach is already used to identify Open Telekom Cloud. This patch allows cloud init to recognize "OpenStack Nova" or "OpenStack Compute" in the asset tag field as an indication that the instance being configured is running on an OpenStack platform. LP: #1669875 --- tests/unittests/test_ds_identify.py | 20 ++++++++++++++++++++ tools/ds-identify | 8 ++++++++ 2 files changed, 28 insertions(+) diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 8c18aa1a..7575223f 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -435,6 +435,14 @@ class TestDsIdentify(DsIdentifyBase): """Open Telecom identification.""" self._test_ds_found('OpenStack-OpenTelekom') + def test_openstack_asset_tag_nova(self): + """OpenStack identification via asset tag OpenStack Nova.""" + self._test_ds_found('OpenStack-AssetTag-Nova') + + def test_openstack_asset_tag_copute(self): + """OpenStack identification via asset tag OpenStack Compute.""" + self._test_ds_found('OpenStack-AssetTag-Compute') + def test_openstack_on_non_intel_is_maybe(self): """On non-Intel, openstack without dmi info is maybe. @@ -759,6 +767,18 @@ VALID_CFG = { 'files': {P_CHASSIS_ASSET_TAG: 'OpenTelekomCloud\n'}, 'mocks': [MOCK_VIRT_IS_XEN], }, + 'OpenStack-AssetTag-Nova': { + # VMware vSphere can't modify product-name, LP: #1669875 + 'ds': 'OpenStack', + 'files': {P_CHASSIS_ASSET_TAG: 'OpenStack Nova\n'}, + 'mocks': [MOCK_VIRT_IS_XEN], + }, + 'OpenStack-AssetTag-Compute': { + # VMware vSphere can't modify product-name, LP: #1669875 + 'ds': 'OpenStack', + 'files': {P_CHASSIS_ASSET_TAG: 'OpenStack Compute\n'}, + 'mocks': [MOCK_VIRT_IS_XEN], + }, 'OVF-seed': { 'ds': 'OVF', 'files': { diff --git a/tools/ds-identify b/tools/ds-identify index 6518901e..e16708f6 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -979,6 +979,14 @@ dscheck_OpenStack() { return ${DS_FOUND} fi + # LP: #1669875 : allow identification of OpenStack by asset tag + if dmi_chassis_asset_tag_matches "$nova"; then + return ${DS_FOUND} + fi + if dmi_chassis_asset_tag_matches "$compute"; then + return ${DS_FOUND} + fi + # LP: #1715241 : arch other than intel are not identified properly. case "$DI_UNAME_MACHINE" in i?86|x86_64) :;; -- cgit v1.2.3 From a0f863da274fcd631441ba38fa9c7dd438a56480 Mon Sep 17 00:00:00 2001 From: Gonéri Le Bouder Date: Mon, 17 Jun 2019 17:43:46 +0000 Subject: tools/build-on-freebsd: update to python3 - use python3 by default - ability to use any Python version through the PYTHON env-var - indent with 4 spaces - use 'set -eux' - remove trailing whitespace - drop the cheetah dep, Jinja2 is enough --- tools/build-on-freebsd | 73 +++++++++++++++++++++++++++----------------------- 1 file changed, 40 insertions(+), 33 deletions(-) diff --git a/tools/build-on-freebsd b/tools/build-on-freebsd index dc3b9747..8ae64567 100755 --- a/tools/build-on-freebsd +++ b/tools/build-on-freebsd @@ -3,36 +3,43 @@ # installing cloud-init. This script takes care of building and installing. It # will optionally make a first run at the end. +set -eux + fail() { echo "FAILED:" "$@" 1>&2; exit 1; } +PYTHON="${PYTHON:-python3}" +if [ ! $(which ${PYTHON}) ]; then + echo "Please install python first." + exit 1 +fi +py_prefix=$(${PYTHON} -c 'import sys; print("py%d%d" % (sys.version_info.major, sys.version_info.minor))') + # Check dependencies: depschecked=/tmp/c-i.dependencieschecked pkgs=" - bash - chpasswd - dmidecode - e2fsprogs - py27-Jinja2 - py27-boto - py27-cheetah - py27-configobj - py27-jsonpatch - py27-jsonpointer - py27-jsonschema - py27-oauthlib - py27-requests - py27-serial - py27-six - py27-yaml - python - sudo + bash + chpasswd + dmidecode + e2fsprogs + $py_prefix-Jinja2 + $py_prefix-boto + $py_prefix-configobj + $py_prefix-jsonpatch + $py_prefix-jsonpointer + $py_prefix-jsonschema + $py_prefix-oauthlib + $py_prefix-requests + $py_prefix-serial + $py_prefix-six + $py_prefix-yaml + sudo " -[ -f "$depschecked" ] || pkg install ${pkgs} || fail "install packages" +[ -f "$depschecked" ] || pkg install --yes ${pkgs} || fail "install packages" touch $depschecked # Build the code and install in /usr/local/: -python2.7 setup.py build -python2.7 setup.py install -O1 --skip-build --prefix /usr/local/ --init-system sysvinit_freebsd +${PYTHON} setup.py build +${PYTHON} setup.py install -O1 --skip-build --prefix /usr/local/ --init-system sysvinit_freebsd # Enable cloud-init in /etc/rc.conf: sed -i.bak -e "/cloudinit_enable=.*/d" /etc/rc.conf @@ -40,21 +47,21 @@ echo 'cloudinit_enable="YES"' >> /etc/rc.conf echo "Installation completed." -if [ "$1" = "run" ]; then - echo "Ok, now let's see if it works." +if [ "$#" -gt 1 ] && [ "$1" = "run" ]; then + echo "Ok, now let's see if it works." - # Backup SSH keys - mv /etc/ssh/ssh_host_* /tmp/ + # Backup SSH keys + mv /etc/ssh/ssh_host_* /tmp/ - # Remove old metadata - rm -rf /var/lib/cloud + # Remove old metadata + rm -rf /var/lib/cloud - # Just log everything, quick&dirty - rm /usr/local/etc/cloud/cloud.cfg.d/05_logging.cfg + # Just log everything, quick&dirty + rm /usr/local/etc/cloud/cloud.cfg.d/05_logging.cfg - # Start: - /usr/local/etc/rc.d/cloudinit start + # Start: + /usr/local/etc/rc.d/cloudinit start - # Restore SSH keys - mv /tmp/ssh_host_* /etc/ssh/ + # Restore SSH keys + mv /tmp/ssh_host_* /etc/ssh/ fi -- cgit v1.2.3 From e1795a5cd1cb216e5514f55bdb1cddc605f40df3 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Wed, 19 Jun 2019 17:16:08 +0000 Subject: cloud-init-generator: use libexec path to ds-identify on redhat systems Update the template to use libexec prefix path to ds-identify on redhat systems. LP: #1833264 --- systemd/cloud-init-generator.tmpl | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl index cfa5eb53..45efa243 100755 --- a/systemd/cloud-init-generator.tmpl +++ b/systemd/cloud-init-generator.tmpl @@ -82,7 +82,12 @@ default() { } check_for_datasource() { - local ds_rc="" dsidentify="/usr/lib/cloud-init/ds-identify" + local ds_rc="" +{% if variant in ["redhat", "fedora", "centos"] %} + local dsidentify="/usr/libexec/cloud-init/ds-identify" +{% else %} + local dsidentify="/usr/lib/cloud-init/ds-identify" +{% endif %} if [ ! -x "$dsidentify" ]; then debug 1 "no ds-identify in $dsidentify. _RET=$FOUND" return 0 -- cgit v1.2.3 From f57a77577dd708c7f57babf8cd63ec18134bf34a Mon Sep 17 00:00:00 2001 From: Penghui Liao Date: Fri, 21 Jun 2019 19:41:43 +0000 Subject: sysconfig: support more bonding options Currently, only a few bonding parameters can be configured on sysconfig systems. This patch aims to support more parameters documented on the docs site. --- cloudinit/net/sysconfig.py | 12 +++++++++ tests/unittests/test_net.py | 60 +++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 67 insertions(+), 5 deletions(-) diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index a47da0a8..be5dede7 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -284,6 +284,18 @@ class Renderer(renderer.Renderer): ('bond_mode', "mode=%s"), ('bond_xmit_hash_policy', "xmit_hash_policy=%s"), ('bond_miimon', "miimon=%s"), + ('bond_min_links', "min_links=%s"), + ('bond_arp_interval', "arp_interval=%s"), + ('bond_arp_ip_target', "arp_ip_target=%s"), + ('bond_arp_validate', "arp_validate=%s"), + ('bond_ad_select', "ad_select=%s"), + ('bond_num_grat_arp', "num_grat_arp=%s"), + ('bond_downdelay', "downdelay=%s"), + ('bond_updelay', "updelay=%s"), + ('bond_lacp_rate', "lacp_rate=%s"), + ('bond_fail_over_mac', "fail_over_mac=%s"), + ('bond_primary', "primary=%s"), + ('bond_primary_reselect', "primary_reselect=%s"), ]) bridge_opts_keys = tuple([ diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index b936bc9c..18efce98 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1540,6 +1540,12 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true bond-mode: active-backup bond_miimon: 100 bond-xmit-hash-policy: "layer3+4" + bond-num-grat-arp: 5 + bond-downdelay: 10 + bond-updelay: 20 + bond-fail-over-mac: active + bond-primary: bond0s0 + bond-primary-reselect: always subnets: - type: static address: 192.168.0.2/24 @@ -1586,9 +1592,15 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true macaddress: aa:bb:cc:dd:e8:ff mtu: 9000 parameters: + down-delay: 10 + fail-over-mac-policy: active + gratuitious-arp: 5 mii-monitor-interval: 100 mode: active-backup + primary: bond0s0 + primary-reselect-policy: always transmit-hash-policy: layer3+4 + up-delay: 20 routes: - to: 10.1.3.0/24 via: 192.168.0.3 @@ -1604,15 +1616,27 @@ iface lo inet loopback auto bond0s0 iface bond0s0 inet manual + bond-downdelay 10 + bond-fail-over-mac active bond-master bond0 bond-mode active-backup + bond-num-grat-arp 5 + bond-primary bond0s0 + bond-primary-reselect always + bond-updelay 20 bond-xmit-hash-policy layer3+4 bond_miimon 100 auto bond0s1 iface bond0s1 inet manual + bond-downdelay 10 + bond-fail-over-mac active bond-master bond0 bond-mode active-backup + bond-num-grat-arp 5 + bond-primary bond0s0 + bond-primary-reselect always + bond-updelay 20 bond-xmit-hash-policy layer3+4 bond_miimon 100 @@ -1620,8 +1644,14 @@ auto bond0 iface bond0 inet static address 192.168.0.2/24 gateway 192.168.0.1 + bond-downdelay 10 + bond-fail-over-mac active bond-mode active-backup + bond-num-grat-arp 5 + bond-primary bond0s0 + bond-primary-reselect always bond-slaves none + bond-updelay 20 bond-xmit-hash-policy layer3+4 bond_miimon 100 hwaddress aa:bb:cc:dd:e8:ff @@ -1666,10 +1696,15 @@ iface bond0 inet6 static - eth0 - vf0 parameters: + down-delay: 10 + fail-over-mac-policy: active + gratuitious-arp: 5 mii-monitor-interval: 100 mode: active-backup - primary: vf0 - transmit-hash-policy: "layer3+4" + primary: bond0s0 + primary-reselect-policy: always + transmit-hash-policy: layer3+4 + up-delay: 20 routes: - to: 10.1.3.0/24 via: 192.168.0.3 @@ -1692,10 +1727,15 @@ iface bond0 inet6 static - eth0 - vf0 parameters: + down-delay: 10 + fail-over-mac-policy: active + gratuitious-arp: 5 mii-monitor-interval: 100 mode: active-backup - primary: vf0 + primary: bond0s0 + primary-reselect-policy: always transmit-hash-policy: layer3+4 + up-delay: 20 routes: - to: 10.1.3.0/24 via: 192.168.0.3 @@ -1720,7 +1760,12 @@ iface bond0 inet6 static 'expected_sysconfig_opensuse': { 'ifcfg-bond0': textwrap.dedent("""\ BONDING_MASTER=yes - BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 miimon=100" + BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """ + """miimon=100 num_grat_arp=5 """ + """downdelay=10 updelay=20 """ + """fail_over_mac=active """ + """primary=bond0s0 """ + """primary_reselect=always" BONDING_SLAVE0=bond0s0 BONDING_SLAVE1=bond0s1 BOOTPROTO=none @@ -1776,7 +1821,12 @@ iface bond0 inet6 static 'expected_sysconfig_rhel': { 'ifcfg-bond0': textwrap.dedent("""\ BONDING_MASTER=yes - BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 miimon=100" + BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """ + """miimon=100 num_grat_arp=5 """ + """downdelay=10 updelay=20 """ + """fail_over_mac=active """ + """primary=bond0s0 """ + """primary_reselect=always" BONDING_SLAVE0=bond0s0 BONDING_SLAVE1=bond0s1 BOOTPROTO=none -- cgit v1.2.3 From feebec1cbb462208003460d68d909e76cb68e0e2 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 25 Jun 2019 16:06:27 +0000 Subject: azure: add region and AZ properties from imds compute location metadata This allows cloud-init query region to show valid region data for Azure --- cloudinit/sources/DataSourceAzure.py | 9 +++++ tests/unittests/test_datasource/test_azure.py | 47 +++++++++++++++++++++++---- 2 files changed, 49 insertions(+), 7 deletions(-) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index b7440c1d..d2fad9bb 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -683,6 +683,11 @@ class DataSourceAzure(sources.DataSource): DS_CFG_KEY_PRESERVE_NTFS, False)) return + @property + def availability_zone(self): + return self.metadata.get( + 'imds', {}).get('compute', {}).get('platformFaultDomain') + @property def network_config(self): """Generate a network config like net.generate_fallback_network() with @@ -701,6 +706,10 @@ class DataSourceAzure(sources.DataSource): self._network_config = parse_network_config(nc_src) return self._network_config + @property + def region(self): + return self.metadata.get('imds', {}).get('compute', {}).get('location') + def _partitions_on_device(devpath, maxnum=16): # return a list of tuples (ptnum, path) for each part on devpath diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index afb614e4..f27ef21b 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -84,6 +84,25 @@ def construct_valid_ovf_env(data=None, pubkeys=None, NETWORK_METADATA = { + "compute": { + "location": "eastus2", + "name": "my-hostname", + "offer": "UbuntuServer", + "osType": "Linux", + "placementGroupId": "", + "platformFaultDomain": "0", + "platformUpdateDomain": "0", + "publisher": "Canonical", + "resourceGroupName": "srugroup1", + "sku": "19.04-DAILY", + "subscriptionId": "12aad61c-6de4-4e53-a6c6-5aff52a83777", + "tags": "", + "version": "19.04.201906190", + "vmId": "ff702a6b-cb6a-4fcd-ad68-b4ce38227642", + "vmScaleSetName": "", + "vmSize": "Standard_DS1_v2", + "zone": "" + }, "network": { "interface": [ { @@ -478,13 +497,7 @@ scbus-1 on xpt0 bus 0 expected_metadata = { 'azure_data': { 'configurationsettype': 'LinuxProvisioningConfiguration'}, - 'imds': {'network': {'interface': [{ - 'ipv4': {'ipAddress': [ - {'privateIpAddress': '10.0.0.4', - 'publicIpAddress': '104.46.124.81'}], - 'subnet': [{'address': '10.0.0.0', 'prefix': '24'}]}, - 'ipv6': {'ipAddress': []}, - 'macAddress': '000D3A047598'}]}}, + 'imds': NETWORK_METADATA, 'instance-id': 'test-instance-id', 'local-hostname': u'myhost', 'random_seed': 'wild'} @@ -612,6 +625,26 @@ scbus-1 on xpt0 bus 0 dsrc.get_data() self.assertEqual(expected_network_config, dsrc.network_config) + def test_availability_zone_set_from_imds(self): + """Datasource.availability returns IMDS platformFaultDomain.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual('0', dsrc.availability_zone) + + def test_region_set_from_imds(self): + """Datasource.region returns IMDS region location.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual('eastus2', dsrc.region) + def test_user_cfg_set_agent_command(self): # set dscfg in via base64 encoded yaml cfg = {'agent_command': "my_command"} -- cgit v1.2.3 From b993b0a308b2e8182beef965ff57e9a7974f5e17 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 25 Jun 2019 20:41:42 +0000 Subject: doc: indicate that netplan is default in Ubuntu now --- doc/rtd/topics/network-config-format-v2.rst | 2 +- doc/rtd/topics/network-config.rst | 9 +++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/doc/rtd/topics/network-config-format-v2.rst b/doc/rtd/topics/network-config-format-v2.rst index ea370ef5..50f5fa61 100644 --- a/doc/rtd/topics/network-config-format-v2.rst +++ b/doc/rtd/topics/network-config-format-v2.rst @@ -14,7 +14,7 @@ it must include ``version: 2`` and one or more of possible device Cloud-init will read this format from system config. For example the following could be present in -``/etc/cloud/cloud.cfg.d/custom-networking.cfg``: +``/etc/cloud/cloud.cfg.d/custom-networking.cfg``:: network: version: 2 diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst index 1e994551..51ced4d1 100644 --- a/doc/rtd/topics/network-config.rst +++ b/doc/rtd/topics/network-config.rst @@ -163,10 +163,11 @@ found in Ubuntu and Debian. - **Netplan** -Since Ubuntu 16.10, codename Yakkety, the ``netplan`` project has been an -optional network configuration tool which consumes :ref:`network_config_v2` -input and renders network configuration for supported backends such as -``systemd-networkd`` and ``NetworkManager``. +Introduced in Ubuntu 16.10 (Yakkety Yak), `netplan `_ has +been the default network configuration tool in Ubuntu since 17.10 (Artful +Aardvark). netplan consumes :ref:`network_config_v2` input and renders +network configuration for supported backends such as ``systemd-networkd`` and +``NetworkManager``. - **Sysconfig** -- cgit v1.2.3 From 5e4792cd11a4754384bd70e4dc94413976017ae8 Mon Sep 17 00:00:00 2001 From: Markus Schade Date: Wed, 3 Jul 2019 17:04:46 +0000 Subject: Add missing dsname for Hetzner Cloud datasource --- cloudinit/sources/DataSourceHetzner.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py index 5c75b65b..50298330 100644 --- a/cloudinit/sources/DataSourceHetzner.py +++ b/cloudinit/sources/DataSourceHetzner.py @@ -28,6 +28,9 @@ MD_WAIT_RETRY = 2 class DataSourceHetzner(sources.DataSource): + + dsname = 'Hetzner' + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.distro = distro -- cgit v1.2.3 From 217c89369c3b16f11333f0090e059f76fc5d7937 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 10 Jul 2019 23:17:12 +0000 Subject: Fix a couple of issues raised by a coverity scan * cc_lxd: fix copy/paste error in debug logging * DataSourceCloudSigma: remove unreachable code * This unreachable code was introduced in a refactor (in 2015) which removed the need for an exception handler, but retained the logging from the exception handler as an unreachable fall-through. --- cloudinit/config/cc_lxd.py | 2 +- cloudinit/sources/DataSourceCloudSigma.py | 8 ++------ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py index 71d13ed8..d9830770 100644 --- a/cloudinit/config/cc_lxd.py +++ b/cloudinit/config/cc_lxd.py @@ -152,7 +152,7 @@ def handle(name, cfg, cloud, log, args): if cmd_attach: log.debug("Setting up default lxd bridge: %s" % - " ".join(cmd_create)) + " ".join(cmd_attach)) _lxc(cmd_attach) elif bridge_cfg: diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py index 2955d3f0..df88f677 100644 --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -42,12 +42,8 @@ class DataSourceCloudSigma(sources.DataSource): if not sys_product_name: LOG.debug("system-product-name not available in dmi data") return False - else: - LOG.debug("detected hypervisor as %s", sys_product_name) - return 'cloudsigma' in sys_product_name.lower() - - LOG.warning("failed to query dmi data for system product name") - return False + LOG.debug("detected hypervisor as %s", sys_product_name) + return 'cloudsigma' in sys_product_name.lower() def _get_data(self): """ -- cgit v1.2.3 From e5f542132568a8da63823f478cea52bfb28ac655 Mon Sep 17 00:00:00 2001 From: Stanislav Makar Date: Mon, 15 Jul 2019 20:12:31 +0000 Subject: net: skip bond interfaces in get_interfaces bonds may inherit mac address from a physical interface LP: #1812857 --- cloudinit/net/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 3642fb1f..e758006f 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -622,6 +622,8 @@ def get_interfaces(): continue if is_vlan(name): continue + if is_bond(name): + continue mac = get_interface_mac(name) # some devices may not have a mac (tun0) if not mac: -- cgit v1.2.3 From a6faf3acef02bd8cd4d46ac9efeebf24b3f21d81 Mon Sep 17 00:00:00 2001 From: Janos Lenart Date: Mon, 15 Jul 2019 21:26:50 +0000 Subject: Update debian eni network configuration location, retain Ubuntu setting On Debian, ifupdown uses `source-directory /etc/network/interfaces.d` (for new installs) to include files. https://salsa.debian.org/debian/ifupdown/blob/master/debian/postinst#L23 The current filename, 50-cloud-init.cfg, does not match against the RE that is used to scan the directory for configurations (ASCII upper- and lower-case letters, ASCII digits, ASCII underscores, and ASCII minus-hyphens): https://salsa.debian.org/debian/ifupdown/blob/master/interfaces.5.pre#L122 Of course many installations use `source /etc/network/interfaces.d/*`, but not all. --- cloudinit/distros/debian.py | 4 ++-- cloudinit/distros/ubuntu.py | 15 +++++++++++++++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index d517fb88..0ad93ffe 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -36,14 +36,14 @@ ENI_HEADER = """# This file is generated from information provided by # network: {config: disabled} """ -NETWORK_CONF_FN = "/etc/network/interfaces.d/50-cloud-init.cfg" +NETWORK_CONF_FN = "/etc/network/interfaces.d/50-cloud-init" LOCALE_CONF_FN = "/etc/default/locale" class Distro(distros.Distro): hostname_conf_fn = "/etc/hostname" network_conf_fn = { - "eni": "/etc/network/interfaces.d/50-cloud-init.cfg", + "eni": "/etc/network/interfaces.d/50-cloud-init", "netplan": "/etc/netplan/50-cloud-init.yaml" } renderer_configs = { diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py index 68154104..e5fcbc58 100644 --- a/cloudinit/distros/ubuntu.py +++ b/cloudinit/distros/ubuntu.py @@ -21,6 +21,21 @@ LOG = logging.getLogger(__name__) class Distro(debian.Distro): + def __init__(self, name, cfg, paths): + super(Distro, self).__init__(name, cfg, paths) + # Ubuntu specific network cfg locations + self.network_conf_fn = { + "eni": "/etc/network/interfaces.d/50-cloud-init.cfg", + "netplan": "/etc/netplan/50-cloud-init.yaml" + } + self.renderer_configs = { + "eni": {"eni_path": self.network_conf_fn["eni"], + "eni_header": debian.ENI_HEADER}, + "netplan": {"netplan_path": self.network_conf_fn["netplan"], + "netplan_header": debian.ENI_HEADER, + "postcmds": True} + } + @property def preferred_ntp_clients(self): """The preferred ntp client is dependent on the version.""" -- cgit v1.2.3 From a24550aee4c7282cd3624bf63f9501444e517678 Mon Sep 17 00:00:00 2001 From: Sam Gilson Date: Mon, 15 Jul 2019 21:50:33 +0000 Subject: Cloud-init analyze module: Added ability to analyze boot events. This branch introduces a new command line feature for cloud-init. Currently, the cloud-init module has the capability to analyze events in cloud-init.log in three ways: 'show', 'blame', 'dump'. These changes add a fourth capability, called 'boot'. Running the command 'cloud-init analyze boot' will provide the user three timestamps. 1) Timestamp for when the kernel starts initializing. 2) Timestamp for when the kernel finishes its initialization. 3) Timestamp for when systemd activates cloud-init. This feature enables cloud-init users to analyze different boot phases. This would aid in debugging performance issues related to cloud-init startup or tracking regression. --- cloudinit/analyze/__main__.py | 88 ++++++++++++++- cloudinit/analyze/show.py | 202 +++++++++++++++++++++++++++++++++-- cloudinit/analyze/tests/test_boot.py | 170 +++++++++++++++++++++++++++++ doc/rtd/topics/analyze.rst | 84 +++++++++++++++ doc/rtd/topics/capabilities.rst | 1 + doc/rtd/topics/debugging.rst | 13 +++ 6 files changed, 546 insertions(+), 12 deletions(-) create mode 100644 cloudinit/analyze/tests/test_boot.py create mode 100644 doc/rtd/topics/analyze.rst diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py index f8613656..99e5c203 100644 --- a/cloudinit/analyze/__main__.py +++ b/cloudinit/analyze/__main__.py @@ -7,7 +7,7 @@ import re import sys from cloudinit.util import json_dumps - +from datetime import datetime from . import dump from . import show @@ -52,9 +52,93 @@ def get_parser(parser=None): dest='outfile', default='-', help='specify where to write output. ') parser_dump.set_defaults(action=('dump', analyze_dump)) + parser_boot = subparsers.add_parser( + 'boot', help='Print list of boot times for kernel and cloud-init') + parser_boot.add_argument('-i', '--infile', action='store', + dest='infile', default='/var/log/cloud-init.log', + help='specify where to read input. ') + parser_boot.add_argument('-o', '--outfile', action='store', + dest='outfile', default='-', + help='specify where to write output.') + parser_boot.set_defaults(action=('boot', analyze_boot)) return parser +def analyze_boot(name, args): + """Report a list of how long different boot operations took. + + For Example: + -- Most Recent Boot Record -- + Kernel Started at: