From 9d7d54dc42a272cc2b506a310d59281cd40e5210 Mon Sep 17 00:00:00 2001 From: Stanislas Date: Tue, 1 Sep 2020 21:57:18 +0200 Subject: network-config-format-v1: fix typo in nameserver example (#564) --- doc/rtd/topics/network-config-format-v1.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'doc/rtd') diff --git a/doc/rtd/topics/network-config-format-v1.rst b/doc/rtd/topics/network-config-format-v1.rst index 9723d689..dfbde514 100644 --- a/doc/rtd/topics/network-config-format-v1.rst +++ b/doc/rtd/topics/network-config-format-v1.rst @@ -332,7 +332,7 @@ the following keys: - type: static address: 192.168.23.14/27 gateway: 192.168.23.1 - - type: nameserver: + - type: nameserver address: - 192.168.23.2 - 8.8.8.8 -- cgit v1.2.3 From e56b55452549cb037da0a4165154ffa494e9678a Mon Sep 17 00:00:00 2001 From: Thomas Stringer Date: Thu, 10 Sep 2020 14:29:54 -0400 Subject: Retrieve SSH keys from IMDS first with OVF as a fallback (#509) * pull ssh keys from imds first and fall back to ovf if unavailable * refactor log and diagnostic messages * refactor the OpenSSLManager instantiation and certificate usage * fix unit test where exception was being silenced for generate cert * fix tests now that certificate is not always generated * add documentation for ssh key retrieval * add ability to check if http client has security enabled * refactor certificate logic to GoalState --- cloudinit/sources/DataSourceAzure.py | 53 +++++++++++++++++- cloudinit/sources/helpers/azure.py | 50 ++++++++++++----- doc/rtd/topics/datasources/azure.rst | 6 ++ tests/unittests/test_datasource/test_azure.py | 64 ++++++++++++++++++---- .../unittests/test_datasource/test_azure_helper.py | 13 +++-- 5 files changed, 156 insertions(+), 30 deletions(-) (limited to 'doc/rtd') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index f3c6452b..e98fd497 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -561,6 +561,40 @@ class DataSourceAzure(sources.DataSource): def device_name_to_device(self, name): return self.ds_cfg['disk_aliases'].get(name) + @azure_ds_telemetry_reporter + def get_public_ssh_keys(self): + """ + Try to get the ssh keys from IMDS first, and if that fails + (i.e. IMDS is unavailable) then fallback to getting the ssh + keys from OVF. + + The benefit to getting keys from IMDS is a large performance + advantage, so this is a strong preference. But we must keep + OVF as a second option for environments that don't have IMDS. + """ + LOG.debug('Retrieving public SSH keys') + ssh_keys = [] + try: + ssh_keys = [ + public_key['keyData'] + for public_key + in self.metadata['imds']['compute']['publicKeys'] + ] + LOG.debug('Retrieved SSH keys from IMDS') + except KeyError: + log_msg = 'Unable to get keys from IMDS, falling back to OVF' + LOG.debug(log_msg) + report_diagnostic_event(log_msg) + try: + ssh_keys = self.metadata['public-keys'] + LOG.debug('Retrieved keys from OVF') + except KeyError: + log_msg = 'No keys available from OVF' + LOG.debug(log_msg) + report_diagnostic_event(log_msg) + + return ssh_keys + def get_config_obj(self): return self.cfg @@ -764,7 +798,22 @@ class DataSourceAzure(sources.DataSource): if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN: self.bounce_network_with_azure_hostname() - pubkey_info = self.cfg.get('_pubkeys', None) + pubkey_info = None + try: + public_keys = self.metadata['imds']['compute']['publicKeys'] + LOG.debug( + 'Successfully retrieved %s key(s) from IMDS', + len(public_keys) + if public_keys is not None + else 0 + ) + except KeyError: + LOG.debug( + 'Unable to retrieve SSH keys from IMDS during ' + 'negotiation, falling back to OVF' + ) + pubkey_info = self.cfg.get('_pubkeys', None) + metadata_func = partial(get_metadata_from_fabric, fallback_lease_file=self. dhclient_lease_file, @@ -1443,7 +1492,7 @@ def get_metadata_from_imds(fallback_nic, retries): @azure_ds_telemetry_reporter def _get_metadata_from_imds(retries): - url = IMDS_URL + "instance?api-version=2017-12-01" + url = IMDS_URL + "instance?api-version=2019-06-01" headers = {"Metadata": "true"} try: response = readurl( diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 507f6ac8..79445a81 100755 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -288,12 +288,16 @@ class InvalidGoalStateXMLException(Exception): class GoalState: - def __init__(self, unparsed_xml: str, - azure_endpoint_client: AzureEndpointHttpClient) -> None: + def __init__( + self, + unparsed_xml: str, + azure_endpoint_client: AzureEndpointHttpClient, + need_certificate: bool = True) -> None: """Parses a GoalState XML string and returns a GoalState object. @param unparsed_xml: string representing a GoalState XML. - @param azure_endpoint_client: instance of AzureEndpointHttpClient + @param azure_endpoint_client: instance of AzureEndpointHttpClient. + @param need_certificate: switch to know if certificates is needed. @return: GoalState object representing the GoalState XML string. """ self.azure_endpoint_client = azure_endpoint_client @@ -322,7 +326,7 @@ class GoalState: url = self._text_from_xpath( './Container/RoleInstanceList/RoleInstance' '/Configuration/Certificates') - if url is not None: + if url is not None and need_certificate: with events.ReportEventStack( name="get-certificates-xml", description="get certificates xml", @@ -741,27 +745,38 @@ class WALinuxAgentShim: GoalState. @return: The list of user's authorized pubkey values. """ - if self.openssl_manager is None: + http_client_certificate = None + if self.openssl_manager is None and pubkey_info is not None: self.openssl_manager = OpenSSLManager() + http_client_certificate = self.openssl_manager.certificate if self.azure_endpoint_client is None: self.azure_endpoint_client = AzureEndpointHttpClient( - self.openssl_manager.certificate) - goal_state = self._fetch_goal_state_from_azure() - ssh_keys = self._get_user_pubkeys(goal_state, pubkey_info) + http_client_certificate) + goal_state = self._fetch_goal_state_from_azure( + need_certificate=http_client_certificate is not None + ) + ssh_keys = None + if pubkey_info is not None: + ssh_keys = self._get_user_pubkeys(goal_state, pubkey_info) health_reporter = GoalStateHealthReporter( goal_state, self.azure_endpoint_client, self.endpoint) health_reporter.send_ready_signal() return {'public-keys': ssh_keys} @azure_ds_telemetry_reporter - def _fetch_goal_state_from_azure(self) -> GoalState: + def _fetch_goal_state_from_azure( + self, + need_certificate: bool) -> GoalState: """Fetches the GoalState XML from the Azure endpoint, parses the XML, and returns a GoalState object. @return: GoalState object representing the GoalState XML """ unparsed_goal_state_xml = self._get_raw_goal_state_xml_from_azure() - return self._parse_raw_goal_state_xml(unparsed_goal_state_xml) + return self._parse_raw_goal_state_xml( + unparsed_goal_state_xml, + need_certificate + ) @azure_ds_telemetry_reporter def _get_raw_goal_state_xml_from_azure(self) -> str: @@ -774,7 +789,11 @@ class WALinuxAgentShim: LOG.info('Registering with Azure...') url = 'http://{}/machine/?comp=goalstate'.format(self.endpoint) try: - response = self.azure_endpoint_client.get(url) + with events.ReportEventStack( + name="goalstate-retrieval", + description="retrieve goalstate", + parent=azure_ds_reporter): + response = self.azure_endpoint_client.get(url) except Exception as e: msg = 'failed to register with Azure: %s' % e LOG.warning(msg) @@ -785,7 +804,9 @@ class WALinuxAgentShim: @azure_ds_telemetry_reporter def _parse_raw_goal_state_xml( - self, unparsed_goal_state_xml: str) -> GoalState: + self, + unparsed_goal_state_xml: str, + need_certificate: bool) -> GoalState: """Parses a GoalState XML string and returns a GoalState object. @param unparsed_goal_state_xml: GoalState XML string @@ -793,7 +814,10 @@ class WALinuxAgentShim: """ try: goal_state = GoalState( - unparsed_goal_state_xml, self.azure_endpoint_client) + unparsed_goal_state_xml, + self.azure_endpoint_client, + need_certificate + ) except Exception as e: msg = 'Error processing GoalState XML: %s' % e LOG.warning(msg) diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst index fdb919a5..e04c3a33 100644 --- a/doc/rtd/topics/datasources/azure.rst +++ b/doc/rtd/topics/datasources/azure.rst @@ -68,6 +68,12 @@ configuration information to the instance. Cloud-init uses the IMDS for: - network configuration for the instance which is applied per boot - a preprovisioing gate which blocks instance configuration until Azure fabric is ready to provision +- retrieving SSH public keys. Cloud-init will first try to utilize SSH keys + returned from IMDS, and if they are not provided from IMDS then it will + fallback to using the OVF file provided from the CD-ROM. There is a large + performance benefit to using IMDS for SSH key retrieval, but in order to + support environments where IMDS is not available then we must continue to + all for keys from OVF Configuration diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 47e03bd1..2dda9925 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -102,7 +102,13 @@ NETWORK_METADATA = { "vmId": "ff702a6b-cb6a-4fcd-ad68-b4ce38227642", "vmScaleSetName": "", "vmSize": "Standard_DS1_v2", - "zone": "" + "zone": "", + "publicKeys": [ + { + "keyData": "key1", + "path": "path1" + } + ] }, "network": { "interface": [ @@ -302,7 +308,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): def setUp(self): super(TestGetMetadataFromIMDS, self).setUp() - self.network_md_url = dsaz.IMDS_URL + "instance?api-version=2017-12-01" + self.network_md_url = dsaz.IMDS_URL + "instance?api-version=2019-06-01" @mock.patch(MOCKPATH + 'readurl') @mock.patch(MOCKPATH + 'EphemeralDHCPv4') @@ -1304,6 +1310,40 @@ scbus-1 on xpt0 bus 0 dsaz.get_hostname(hostname_command=("hostname",)) m_subp.assert_called_once_with(("hostname",), capture=True) + @mock.patch( + 'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates') + def test_get_public_ssh_keys_with_imds(self, m_parse_certificates): + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + dsrc = self._get_ds(data) + dsrc.get_data() + dsrc.setup(True) + ssh_keys = dsrc.get_public_ssh_keys() + self.assertEqual(ssh_keys, ['key1']) + self.assertEqual(m_parse_certificates.call_count, 0) + + @mock.patch(MOCKPATH + 'get_metadata_from_imds') + def test_get_public_ssh_keys_without_imds( + self, + m_get_metadata_from_imds): + m_get_metadata_from_imds.return_value = dict() + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + dsrc = self._get_ds(data) + dsaz.get_metadata_from_fabric.return_value = {'public-keys': ['key2']} + dsrc.get_data() + dsrc.setup(True) + ssh_keys = dsrc.get_public_ssh_keys() + self.assertEqual(ssh_keys, ['key2']) + class TestAzureBounce(CiTestCase): @@ -2094,14 +2134,18 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): md, _ud, cfg, _d = dsa._reprovision() self.assertEqual(md['local-hostname'], hostname) self.assertEqual(cfg['system_info']['default_user']['name'], username) - self.assertEqual(fake_resp.call_args_list, - [mock.call(allow_redirects=True, - headers={'Metadata': 'true', - 'User-Agent': - 'Cloud-Init/%s' % vs()}, - method='GET', - timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, - url=full_url)]) + self.assertIn( + mock.call( + allow_redirects=True, + headers={ + 'Metadata': 'true', + 'User-Agent': 'Cloud-Init/%s' % vs() + }, + method='GET', + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, + url=full_url + ), + fake_resp.call_args_list) self.assertEqual(m_dhcp.call_count, 2) m_net.assert_any_call( broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index 5e6d3d2d..5c31b8be 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -609,11 +609,11 @@ class TestWALinuxAgentShim(CiTestCase): self.GoalState.return_value.container_id = self.test_container_id self.GoalState.return_value.instance_id = self.test_instance_id - def test_azure_endpoint_client_uses_certificate_during_report_ready(self): + def test_http_client_does_not_use_certificate(self): shim = wa_shim() shim.register_with_azure_and_fetch_data() self.assertEqual( - [mock.call(self.OpenSSLManager.return_value.certificate)], + [mock.call(None)], self.AzureEndpointHttpClient.call_args_list) def test_correct_url_used_for_goalstate_during_report_ready(self): @@ -625,8 +625,11 @@ class TestWALinuxAgentShim(CiTestCase): [mock.call('http://test_endpoint/machine/?comp=goalstate')], get.call_args_list) self.assertEqual( - [mock.call(get.return_value.contents, - self.AzureEndpointHttpClient.return_value)], + [mock.call( + get.return_value.contents, + self.AzureEndpointHttpClient.return_value, + False + )], self.GoalState.call_args_list) def test_certificates_used_to_determine_public_keys(self): @@ -701,7 +704,7 @@ class TestWALinuxAgentShim(CiTestCase): shim.register_with_azure_and_fetch_data() shim.clean_up() self.assertEqual( - 1, self.OpenSSLManager.return_value.clean_up.call_count) + 0, self.OpenSSLManager.return_value.clean_up.call_count) def test_fetch_goalstate_during_report_ready_raises_exc_on_get_exc(self): self.AzureEndpointHttpClient.return_value.get \ -- cgit v1.2.3 From a87ccadca114696bbca1e9d681efc8ce8955f454 Mon Sep 17 00:00:00 2001 From: Mina Galić Date: Wed, 16 Sep 2020 21:19:57 +0200 Subject: opennebula.rst: minor readability improvements (#573) - use `sh` as highlight language of the code block - change order so that the confusing indentation is less confusing --- doc/rtd/topics/datasources/opennebula.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'doc/rtd') diff --git a/doc/rtd/topics/datasources/opennebula.rst b/doc/rtd/topics/datasources/opennebula.rst index 8e7c2558..350a3e93 100644 --- a/doc/rtd/topics/datasources/opennebula.rst +++ b/doc/rtd/topics/datasources/opennebula.rst @@ -122,13 +122,13 @@ OpenNebula datasource only in 'net' mode. Example VM's context section ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -:: +.. code-block:: sh CONTEXT=[ - PUBLIC_IP="$NIC[IP]", SSH_KEY="$USER[SSH_KEY] $USER[SSH_KEY1] - $USER[SSH_KEY2] ", + $USER[SSH_KEY2]", + PUBLIC_IP="$NIC[IP]", USER_DATA="#cloud-config # see https://help.ubuntu.com/community/CloudInit -- cgit v1.2.3 From 5fc34d81a002f6ca0706f5285ee15b919c3d8d2e Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 16 Sep 2020 16:49:34 -0400 Subject: boot.rst: add First Boot Determination section (#568) LP: #1888858 --- doc/rtd/topics/boot.rst | 86 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) (limited to 'doc/rtd') diff --git a/doc/rtd/topics/boot.rst b/doc/rtd/topics/boot.rst index 4e79c958..a5282e35 100644 --- a/doc/rtd/topics/boot.rst +++ b/doc/rtd/topics/boot.rst @@ -157,4 +157,90 @@ finished, the ``cloud-init status`` subcommand can help block external scripts until cloud-init is done without having to write your own systemd units dependency chains. See :ref:`cli_status` for more info. +First Boot Determination +************************ + +cloud-init has to determine whether or not the current boot is the first boot +of a new instance or not, so that it applies the appropriate configuration. On +an instance's first boot, it should run all "per-instance" configuration, +whereas on a subsequent boot it should run only "per-boot" configuration. This +section describes how cloud-init performs this determination, as well as why it +is necessary. + +When it runs, cloud-init stores a cache of its internal state for use across +stages and boots. + +If this cache is present, then cloud-init has run on this system before. +[#not-present]_ There are two cases where this could occur. Most commonly, +the instance has been rebooted, and this is a second/subsequent boot. +Alternatively, the filesystem has been attached to a *new* instance, and this +is an instance's first boot. The most obvious case where this happens is when +an instance is launched from an image captured from a launched instance. + +By default, cloud-init attempts to determine which case it is running in by +checking the instance ID in the cache against the instance ID it determines at +runtime. If they do not match, then this is an instance's first boot; +otherwise, it's a subsequent boot. Internally, cloud-init refers to this +behavior as ``check``. + +This behavior is required for images captured from launched instances to +behave correctly, and so is the default which generic cloud images ship with. +However, there are cases where it can cause problems. [#problems]_ For these +cases, cloud-init has support for modifying its behavior to trust the instance +ID that is present in the system unconditionally. This means that cloud-init +will never detect a new instance when the cache is present, and it follows that +the only way to cause cloud-init to detect a new instance (and therefore its +first boot) is to manually remove cloud-init's cache. Internally, this +behavior is referred to as ``trust``. + +To configure which of these behaviors to use, cloud-init exposes the +``manual_cache_clean`` configuration option. When ``false`` (the default), +cloud-init will ``check`` and clean the cache if the instance IDs do not match +(this is the default, as discussed above). When ``true``, cloud-init will +``trust`` the existing cache (and therefore not clean it). + +Manual Cache Cleaning +===================== + +cloud-init ships a command for manually cleaning the cache: ``cloud-init +clean``. See :ref:`cli_clean`'s documentation for further details. + +Reverting ``manual_cache_clean`` Setting +======================================== + +Currently there is no support for switching an instance that is launched with +``manual_cache_clean: true`` from ``trust`` behavior to ``check`` behavior, +other than manually cleaning the cache. + +.. warning:: If you want to capture an instance that is currently in ``trust`` + mode as an image for launching other instances, you **must** manually clean + the cache. If you do not do so, then instances launched from the captured + image will all detect their first boot as a subsequent boot of the captured + instance, and will not apply any per-instance configuration. + + This is a functional issue, but also a potential security one: cloud-init is + responsible for rotating SSH host keys on first boot, and this will not + happen on these instances. + +.. [#not-present] It follows that if this cache is not present, cloud-init has + not run on this system before, so this is unambiguously this instance's + first boot. + +.. [#problems] A couple of ways in which this strict reliance on the presence + of a datasource has been observed to cause problems: + + * If a cloud's metadata service is flaky and cloud-init cannot obtain the + instance ID locally on that platform, cloud-init's instance ID + determination will sometimes fail to determine the current instance ID, + which makes it impossible to determine if this is an instance's first or + subsequent boot (`#1885527`_). + * If cloud-init is used to provision a physical appliance or device and an + attacker can present a datasource to the device with a different instance + ID, then cloud-init's default behavior will detect this as an instance's + first boot and reset the device using the attacker's configuration + (this has been observed with the NoCloud datasource in `#1879530`_). + +.. _#1885527: https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/1885527 +.. _#1879530: https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/1879530 + .. vi: textwidth=79 -- cgit v1.2.3 From 82ffc53273927bfc8d71e7f0c858753552d85cf1 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 1 Oct 2020 15:32:35 -0500 Subject: Initial implementation of integration testing infrastructure (#581) --- .gitignore | 3 + HACKING.rst | 11 +- doc/rtd/index.rst | 3 +- doc/rtd/topics/cloud_tests.rst | 761 ++++++++++++++++++++++++ doc/rtd/topics/integration_tests.rst | 81 +++ doc/rtd/topics/tests.rst | 758 ----------------------- integration-requirements.txt | 2 + tests/integration_tests/conftest.py | 106 ++++ tests/integration_tests/integration_settings.py | 95 +++ tests/integration_tests/platforms.py | 235 ++++++++ tox.ini | 7 + 11 files changed, 1302 insertions(+), 760 deletions(-) create mode 100644 doc/rtd/topics/cloud_tests.rst create mode 100644 doc/rtd/topics/integration_tests.rst delete mode 100644 doc/rtd/topics/tests.rst create mode 100644 tests/integration_tests/conftest.py create mode 100644 tests/integration_tests/integration_settings.py create mode 100644 tests/integration_tests/platforms.py (limited to 'doc/rtd') diff --git a/.gitignore b/.gitignore index 3589b210..5a68bff9 100644 --- a/.gitignore +++ b/.gitignore @@ -27,3 +27,6 @@ cloud-init_*.dsc cloud-init_*.orig.tar.gz cloud-init_*.tar.xz cloud-init_*.upload + +# user test settings +tests/integration_tests/user_settings.py diff --git a/HACKING.rst b/HACKING.rst index 60c7b5e0..4ae7f7b4 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -173,9 +173,18 @@ Cloud Config Modules * Any new modules should use underscores in any new config options and not hyphens (e.g. `new_option` and *not* `new-option`). -Unit Testing +.. _unit_testing: + +Testing ------------ +cloud-init has both unit tests and integration tests. Unit tests can +be found in-tree alongside the source code, as well as +at ``tests/unittests``. Integration tests can be found at +``tests/integration_tests``. Documentation specifically for integration +tests can be found on the :ref:`integration_tests` page, but +the guidelines specified below apply to both types of tests. + cloud-init uses `pytest`_ to run its tests, and has tests written both as ``unittest.TestCase`` sub-classes and as un-subclassed pytest tests. The following guidelines should be followed: diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst index 0015e35a..ddcb0b31 100644 --- a/doc/rtd/index.rst +++ b/doc/rtd/index.rst @@ -75,6 +75,7 @@ Having trouble? We would like to help! topics/dir_layout.rst topics/analyze.rst topics/docs.rst - topics/tests.rst + topics/integration_tests.rst + topics/cloud_tests.rst .. vi: textwidth=79 diff --git a/doc/rtd/topics/cloud_tests.rst b/doc/rtd/topics/cloud_tests.rst new file mode 100644 index 00000000..e4e893d2 --- /dev/null +++ b/doc/rtd/topics/cloud_tests.rst @@ -0,0 +1,761 @@ +************************ +Cloud tests (Deprecated) +************************ + +Cloud tests are longer be maintained. For writing integration +tests, see the :ref:`integration_tests` page. + +Overview +======== + +This page describes the execution, development, and architecture of the +cloud-init integration tests: + +* Execution explains the options available and running of tests +* Development shows how to write test cases +* Architecture explains the internal processes + +Execution +========= + +Overview +-------- + +In order to avoid the need for dependencies and ease the setup and +configuration users can run the integration tests via tox: + +.. code-block:: shell-session + + $ git clone https://github.com/canonical/cloud-init + $ cd cloud-init + $ tox -e citest -- -h + +Everything after the double dash will be passed to the integration tests. +Executing tests has several options: + +* ``run`` an alias to run both ``collect`` and ``verify``. The ``tree_run`` + command does the same thing, except uses a deb built from the current + working tree. + +* ``collect`` deploys on the specified platform and distro, patches with the + requested deb or rpm, and finally collects output of the arbitrary + commands. Similarly, ```tree_collect`` will collect output using a deb + built from the current working tree. + +* ``verify`` given a directory of test data, run the Python unit tests on + it to generate results. + +* ``bddeb`` will build a deb of the current working tree. + +Run +--- + +The first example will provide a complete end-to-end run of data +collection and verification. There are additional examples below +explaining how to run one or the other independently. + +.. code-block:: shell-session + + $ git clone https://github.com/canonical/cloud-init + $ cd cloud-init + $ tox -e citest -- run --verbose \ + --os-name stretch --os-name xenial \ + --deb cloud-init_0.7.8~my_patch_all.deb \ + --preserve-data --data-dir ~/collection \ + --preserve-instance + +The above command will do the following: + +* ``run`` both collect output and run tests the output + +* ``--verbose`` verbose output + +* ``--os-name stretch`` on the Debian Stretch release + +* ``--os-name xenial`` on the Ubuntu Xenial release + +* ``--deb cloud-init_0.7.8~patch_all.deb`` use this deb as the version of + cloud-init to run with + +* ``--preserve-data`` always preserve collected data, do not remove data + after successful test run + +* ``--preserve-instance`` do not destroy the instance after test to allow + for debugging the stopped instance during integration test development. By + default, test instances are destroyed after the test completes. + +* ``--data-dir ~/collection`` write collected data into `~/collection`, + rather than using a temporary directory + +For a more detailed explanation of each option see below. + +.. note:: + By default, data collected by the run command will be written into a + temporary directory and deleted after a successful. If you would + like to preserve this data, please use the option ``--preserve-data``. + +Collect +------- + +If developing tests it may be necessary to see if cloud-config works as +expected and the correct files are pulled down. In this case only a +collect can be ran by running: + +.. code-block:: shell-session + + $ tox -e citest -- collect -n xenial --data-dir /tmp/collection + +The above command will run the collection tests on xenial and place +all results into `/tmp/collection`. + +Verify +------ + +When developing tests it is much easier to simply rerun the verify scripts +without the more lengthy collect process. This can be done by running: + +.. code-block:: shell-session + + $ tox -e citest -- verify --data-dir /tmp/collection + +The above command will run the verify scripts on the data discovered in +`/tmp/collection`. + +TreeRun and TreeCollect +----------------------- + +If working on a cloud-init feature or resolving a bug, it may be useful to +run the current copy of cloud-init in the integration testing environment. +The integration testing suite can automatically build a deb based on the +current working tree of cloud-init and run the test suite using this deb. + +The ``tree_run`` and ``tree_collect`` commands take the same arguments as +the ``run`` and ``collect`` commands. These commands will build a deb and +write it into a temporary file, then start the test suite and pass that deb +in. To build a deb only, and not run the test suite, the ``bddeb`` command +can be used. + +Note that code in the cloud-init working tree that has not been committed +when the cloud-init deb is built will still be included. To build a +cloud-init deb from or use the ``tree_run`` command using a copy of +cloud-init located in a different directory, use the option ``--cloud-init +/path/to/cloud-init``. + +.. code-block:: shell-session + + $ tox -e citest -- tree_run --verbose \ + --os-name xenial --os-name stretch \ + --test modules/final_message --test modules/write_files \ + --result /tmp/result.yaml + +Bddeb +----- + +The ``bddeb`` command can be used to generate a deb file. This is used by +the tree_run and tree_collect commands to build a deb of the current +working tree. It can also be used a user to generate a deb for use in other +situations and avoid needing to have all the build and test dependencies +installed locally. + +* ``--bddeb-args``: arguments to pass through to bddeb +* ``--build-os``: distribution to use as build system (default is xenial) +* ``--build-platform``: platform to use for build system (default is lxd) +* ``--cloud-init``: path to base of cloud-init tree (default is '.') +* ``--deb``: path to write output deb to (default is '.') + +Setup Image +----------- + +By default an image that is used will remain unmodified, but certain +scenarios may require image modification. For example, many images may use +a much older cloud-init. As a result tests looking at newer functionality +will fail because a newer version of cloud-init may be required. The +following options can be used for further customization: + +* ``--deb``: install the specified deb into the image +* ``--rpm``: install the specified rpm into the image +* ``--repo``: enable a repository and upgrade cloud-init afterwards +* ``--ppa``: enable a ppa and upgrade cloud-init afterwards +* ``--upgrade``: upgrade cloud-init from repos +* ``--upgrade-full``: run a full system upgrade +* ``--script``: execute a script in the image. This can perform any setup + required that is not covered by the other options + +Test Case Development +===================== + +Overview +-------- + +As a test writer you need to develop a test configuration and a +verification file: + + * The test configuration specifies a specific cloud-config to be used by + cloud-init and a list of arbitrary commands to capture the output of + (e.g my_test.yaml) + + * The verification file runs tests on the collected output to determine + the result of the test (e.g. my_test.py) + +The names must match, however the extensions will of course be different, +yaml vs py. + +Configuration +------------- + +The test configuration is a YAML file such as *ntp_server.yaml* below: + +.. code-block:: yaml + + # + # Empty NTP config to setup using defaults + # + # NOTE: this should not require apt feature, use 'which' rather than 'dpkg -l' + # NOTE: this should not require no_ntpdate feature, use 'which' to check for + # installation rather than 'dpkg -l', as 'grep ntp' matches 'ntpdate' + # NOTE: the verifier should check for any ntp server not 'ubuntu.pool.ntp.org' + cloud_config: | + #cloud-config + ntp: + servers: + - pool.ntp.org + required_features: + - apt + - no_ntpdate + - ubuntu_ntp + collect_scripts: + ntp_installed_servers: | + #!/bin/bash + dpkg -l | grep ntp | wc -l + ntp_conf_dist_servers: | + #!/bin/bash + ls /etc/ntp.conf.dist | wc -l + ntp_conf_servers: | + #!/bin/bash + cat /etc/ntp.conf | grep '^server' + +There are several keys, 1 required and some optional, in the YAML file: + +1. The required key is ``cloud_config``. This should be a string of valid + YAML that is exactly what would normally be placed in a cloud-config + file, including the cloud-config header. This essentially sets up the + scenario under test. + +2. One optional key is ``collect_scripts``. This key has one or more + sub-keys containing strings of arbitrary commands to execute (e.g. + ```cat /var/log/cloud-config-output.log```). In the example above the + output of dpkg is captured, grep for ntp, and the number of lines + reported. The name of the sub-key is important. The sub-key is used by + the verification script to recall the output of the commands ran. + +3. The optional ``enabled`` key enables or disables the test case. By + default the test case will be enabled. + +4. The optional ``required_features`` key may be used to specify a list + of features flags that an image must have to be able to run the test + case. For example, if a test case relies on an image supporting apt, + then the config for the test case should include ``required_features: + [ apt ]``. + + +Default Collect Scripts +----------------------- + +By default the following files will be collected for every test. There is +no need to specify these items: + +* ``/var/log/cloud-init.log`` +* ``/var/log/cloud-init-output.log`` +* ``/run/cloud-init/.instance-id`` +* ``/run/cloud-init/result.json`` +* ``/run/cloud-init/status.json`` +* ```dpkg-query -W -f='${Version}' cloud-init``` + +Verification +------------ + +The verification script is a Python file with unit tests like the one, +`ntp_server.py`, below: + +.. code-block:: python + + # This file is part of cloud-init. See LICENSE file for license information. + + """cloud-init Integration Test Verify Script""" + from tests.cloud_tests.testcases import base + + + class TestNtp(base.CloudTestCase): + """Test ntp module""" + + def test_ntp_installed(self): + """Test ntp installed""" + out = self.get_data_file('ntp_installed_empty') + self.assertEqual(1, int(out)) + + def test_ntp_dist_entries(self): + """Test dist config file has one entry""" + out = self.get_data_file('ntp_conf_dist_empty') + self.assertEqual(1, int(out)) + + def test_ntp_entires(self): + """Test config entries""" + out = self.get_data_file('ntp_conf_empty') + self.assertIn('pool 0.ubuntu.pool.ntp.org iburst', out) + self.assertIn('pool 1.ubuntu.pool.ntp.org iburst', out) + self.assertIn('pool 2.ubuntu.pool.ntp.org iburst', out) + self.assertIn('pool 3.ubuntu.pool.ntp.org iburst', out) + + # vi: ts=4 expandtab + + +Here is a breakdown of the unit test file: + +* The import statement allows access to the output files. + +* The class can be named anything, but must import the + ``base.CloudTestCase``, either directly or via another test class. + +* There can be 1 to N number of functions with any name, however only + functions starting with ``test_*`` will be executed. + +* There can be 1 to N number of classes in a test module, however only + classes inheriting from ``base.CloudTestCase`` will be loaded. + +* Output from the commands can be accessed via + ``self.get_data_file('key')`` where key is the sub-key of + ``collect_scripts`` above. + +* The cloud config that the test ran with can be accessed via + ``self.cloud_config``, or any entry from the cloud config can be accessed + via ``self.get_config_entry('key')``. + +* See the base ``CloudTestCase`` for additional helper functions. + +Layout +------ + +Integration tests are located under the `tests/cloud_tests` directory. +Test configurations are placed under `configs` and the test verification +scripts under `testcases`: + +.. code-block:: shell-session + + cloud-init$ tree -d tests/cloud_tests/ + tests/cloud_tests/ + ├── configs + │   ├── bugs + │   ├── examples + │   ├── main + │   └── modules + └── testcases + ├── bugs + ├── examples + ├── main + └── modules + +The sub-folders of bugs, examples, main, and modules help organize the +tests. View the README.md in each to understand in more detail each +directory. + +Test Creation Helper +-------------------- + +The integration testing suite has a built in helper to aid in test +development. Help can be invoked via ``tox -e citest -- create --help``. It +can create a template test case config file with user data passed in from +the command line, as well as a template test case verifier module. + +The following would create a test case named ``example`` under the +``modules`` category with the given description, and cloud config data read +in from ``/tmp/user_data``. + +.. code-block:: shell-session + + $ tox -e citest -- create modules/example \ + -d "a simple example test case" -c "$(< /tmp/user_data)" + + +Development Checklist +--------------------- + +* Configuration File + * Named 'your_test.yaml' + * Contains at least a valid cloud-config + * Optionally, commands to capture additional output + * Valid YAML + * Placed in the appropriate sub-folder in the configs directory + * Any image features required for the test are specified +* Verification File + * Named 'your_test.py' + * Valid unit tests validating output collected + * Passes pylint & pep8 checks + * Placed in the appropriate sub-folder in the test cases directory +* Tested by running the test: + + .. code-block:: shell-session + + $ tox -e citest -- run -verbose \ + --os-name \ + --test modules/your_test.yaml \ + [--deb ] + + +Platforms +========= + +EC2 +--- +To run on the EC2 platform it is required that the user has an AWS credentials +configuration file specifying his or her access keys and a default region. +These configuration files are the standard that the AWS cli and other AWS +tools utilize for interacting directly with AWS itself and are normally +generated when running ``aws configure``: + +.. code-block:: shell-session + + $ cat $HOME/.aws/credentials + [default] + aws_access_key_id = + aws_secret_access_key = + +.. code-block:: shell-session + + $ cat $HOME/.aws/config + [default] + region = us-west-2 + + +Azure Cloud +----------- + +To run on Azure Cloud platform users login with Service Principal and export +credentials file. Region is defaulted and can be set in +``tests/cloud_tests/platforms.yaml``. The Service Principal credentials are +the standard authentication for Azure SDK to interact with Azure Services: + +Create Service Principal account or login + +.. code-block:: shell-session + + $ az ad sp create-for-rbac --name "APP_ID" --password "STRONG-SECRET-PASSWORD" + +.. code-block:: shell-session + + $ az login --service-principal --username "APP_ID" --password "STRONG-SECRET-PASSWORD" + +Export credentials + +.. code-block:: shell-session + + $ az ad sp create-for-rbac --sdk-auth > $HOME/.azure/credentials.json + +.. code-block:: json + + { + "clientId": "", + "clientSecret": "", + "subscriptionId": "", + "tenantId": "", + "activeDirectoryEndpointUrl": "https://login.microsoftonline.com", + "resourceManagerEndpointUrl": "https://management.azure.com/", + "activeDirectoryGraphResourceId": "https://graph.windows.net/", + "sqlManagementEndpointUrl": "https://management.core.windows.net:8443/", + "galleryEndpointUrl": "https://gallery.azure.com/", + "managementEndpointUrl": "https://management.core.windows.net/" + } + +Set region in platforms.yaml + +.. code-block:: yaml + + azurecloud: + enabled: true + region: West US 2 + vm_size: Standard_DS1_v2 + storage_sku: standard_lrs + tag: ci + + +Architecture +============ + +The following section outlines the high-level architecture of the +integration process. + +Overview +-------- +The process flow during a complete end-to-end LXD-backed test. + +1. Configuration + * The back end and specific distro releases are verified as supported + * The test or tests that need to be run are determined either by + directory or by individual yaml + +2. Image Creation + * Acquire the request LXD image + * Install the specified cloud-init package + * Clean the image so that it does not appear to have been booted + * A snapshot of the image is created and reused by all tests + +3. Configuration + * For each test, the cloud-config is injected into a copy of the + snapshot and booted + * The framework waits for ``/var/lib/cloud/instance/boot-finished`` + (up to 120 seconds) + * All default commands are ran and output collected + * Any commands the user specified are executed and output collected + +4. Verification + * The default commands are checked for any failures, errors, and + warnings to validate basic functionality of cloud-init completed + successfully + * The user generated unit tests are then ran validating against the + collected output + +5. Results + * If any failures were detected the test suite returns a failure + * Results can be dumped in yaml format to a specified file using the + ``-r .yaml`` option + +Configuring the Test Suite +-------------------------- + +Most of the behavior of the test suite is configurable through several yaml +files. These control the behavior of the test suite's platforms, images, and +tests. The main config files for platforms, images and test cases are +``platforms.yaml``, ``releases.yaml`` and ``testcases.yaml``. + +Config handling +^^^^^^^^^^^^^^^ + +All configurable parts of the test suite use a defaults + overrides system +for managing config entries. All base config items are dictionaries. + +Merging is done on a key-by-key basis, with all keys in the default and +override represented in the final result. If a key exists both in +the defaults and the overrides, then the behavior depends on the type of data +the key refers to. If it is atomic data or a list, then the overrides will +replace the default. If the data is a dictionary then the value will be the +result of merging that dictionary from the default config and that +dictionary from the overrides. + +Merging is done using the function +``tests.cloud_tests.config.merge_config``, which can be examined for more +detail on config merging behavior. + +The following demonstrates merge behavior: + +.. code-block:: yaml + + defaults: + list_item: + - list_entry_1 + - list_entry_2 + int_item_1: 123 + int_item_2: 234 + dict_item: + subkey_1: 1 + subkey_2: 2 + subkey_dict: + subsubkey_1: a + subsubkey_2: b + + overrides: + list_item: + - overridden_list_entry + int_item_1: 0 + dict_item: + subkey_2: false + subkey_dict: + subsubkey_2: 'new value' + + result: + list_item: + - overridden_list_entry + int_item_1: 0 + int_item_2: 234 + dict_item: + subkey_1: 1 + subkey_2: false + subkey_dict: + subsubkey_1: a + subsubkey_2: 'new value' + + +Image Config +------------ + +Image configuration is handled in ``releases.yaml``. The image configuration +controls how platforms locate and acquire images, how the platforms should +interact with the images, how platforms should detect when an image has +fully booted, any options that are required to set the image up, and +features that the image supports. + +Since settings for locating an image and interacting with it differ from +platform to platform, there are 4 levels of settings available for images on +top of the default image settings. The structure of the image config file +is: + +.. code-block:: yaml + + default_release_config: + default: + ... + : + ... + : + ... + + releases: + : + : + ... + : + ... + : + ... + + +The base config is created from the overall defaults and the overrides for +the platform. The overrides are created from the default config for the +image and the platform specific overrides for the image. + +System Boot +^^^^^^^^^^^ + +The test suite must be able to test if a system has fully booted and if +cloud-init has finished running, so that running collect scripts does not +race against the target image booting. This is done using the +``system_ready_script`` and ``cloud_init_ready_script`` image config keys. + +Each of these keys accepts a small bash test statement as a string that must +return 0 or 1. Since this test statement will be added into a larger bash +statement it must be a single statement using the ``[`` test syntax. + +The default image config provides a system ready script that works for any +systemd based image. If the image is not systemd based, then a different +test statement must be provided. The default config also provides a test +for whether or not cloud-init has finished which checks for the file +``/run/cloud-init/result.json``. This should be sufficient for most systems +as writing this file is one of the last things cloud-init does. + +The setting ``boot_timeout`` controls how long, in seconds, the platform +should wait for an image to boot. If the system ready script has not +indicated that the system is fully booted within this time an error will be +raised. + +Feature Flags +^^^^^^^^^^^^^ + +Not all test cases can work on all images due to features the test case +requires not being present on that image. If a test case requires features +in an image that are not likely to be present across all distros and +platforms that the test suite supports, then the test can be skipped +everywhere it is not supported. + +Feature flags, which are names for features supported on some images, but +not all that may be required by test cases. Configuration for feature flags +is provided in ``releases.yaml`` under the ``features`` top level key. The +features config includes a list of all currently defined feature flags, +their meanings, and a list of feature groups. + +Feature groups are groups of features that many images have in common. For +example, the ``Ubuntu_specific`` feature group includes features that +should be present across most Ubuntu releases, but may or may not be for +other distros. Feature groups are specified for an image as a list under +the key ``feature_groups``. + +An image's feature flags are derived from the features groups that that +image has and any feature overrides provided. Feature overrides can be +specified under the ``features`` key which accepts a dictionary of +``{: true/false}`` mappings. If a feature is omitted from an +image's feature flags or set to false in the overrides then the test suite +will skip any tests that require that feature when using that image. + +Feature flags may be overridden at run time using the ``--feature-override`` +command line argument. It accepts a feature flag and value to set in the +format ``=true/false``. Multiple ``--feature-override`` +flags can be used, and will all be applied to all feature flags for images +used during a test. + +Setup Overrides +^^^^^^^^^^^^^^^ + +If an image requires some of the options for image setup to be used, then it +may specify overrides for the command line arguments passed into setup +image. These may be specified as a dictionary under the ``setup_overrides`` +key. When an image is set up, the arguments that control how it is set up +will be the arguments from the command line, with any entries in +``setup_overrides`` used to override these arguments. + +For example, images that do not come with cloud-init already installed +should have ``setup_overrides: {upgrade: true}`` specified so that in the +event that no additional setup options are given, cloud-init will be +installed from the image's repos before running tests. Note that if other +options such as ``--deb`` are passed in on the command line, these will +still work as expected, since apt's policy for cloud-init would prefer the +locally installed deb over an older version from the repos. + +Platform Specific Options +^^^^^^^^^^^^^^^^^^^^^^^^^ + +There are many platform specific options in image configuration that allow +platforms to locate images and that control additional setup that the +platform may have to do to make the image usable. For information on how +these work, please consult the documentation for that platform in the +integration testing suite and the ``releases.yaml`` file for examples. + +Error Handling +-------------- + +The test suite makes an attempt to run as many tests as possible even in the +event of some failing so that automated runs collect as much data as +possible. In the event that something goes wrong while setting up for or +running a test, the test suite will attempt to continue running any tests +which have not been affected by the error. + +For example, if the test suite was told to run tests on one platform for two +releases and an error occurred setting up the first image, all tests for +that image would be skipped, and the test suite would continue to set up +the second image and run tests on it. Or, if the system does not start +properly for one test case out of many to run on that image, that test case +will be skipped and the next one will be run. + +Note that if any errors occur, the test suite will record the failure and +where it occurred in the result data and write it out to the specified +result file. + +Results +------- + +The test suite generates result data that includes how long each stage of +the test suite took and which parts were and were not successful. This data +is dumped to the log after the collect and verify stages, and may also be +written out in yaml format to a file. If part of the setup failed, the +traceback for the failure and the error message will be included in the +result file. If a test verifier finds a problem with the collected data +from a test run, the class, test function and test will be recorded in the +result data. + +Exit Codes +^^^^^^^^^^ + +The test suite counts how many errors occur throughout a run. The exit code +after a run is the number of errors that occurred. If the exit code is +non-zero then something is wrong either with the test suite, the +configuration for an image, a test case, or cloud-init itself. + +Note that the exit code does not always directly correspond to the number +of failed test cases, since in some cases, a single error during image setup +can mean that several test cases are not run. If run is used, then the exit +code will be the sum of the number of errors in the collect and verify +stages. + +Data Dir +^^^^^^^^ + +When using run, the collected data is written into a temporary directory. In +the event that all tests pass, this directory is deleted, but if a test +fails or an error occurs, this data will be left in place, and a message +will be written to the log giving the location of the data. diff --git a/doc/rtd/topics/integration_tests.rst b/doc/rtd/topics/integration_tests.rst new file mode 100644 index 00000000..aeda326c --- /dev/null +++ b/doc/rtd/topics/integration_tests.rst @@ -0,0 +1,81 @@ +.. _integration_tests: + +******************* +Integration Testing +******************* + +Overview +========= + +Integration tests are written using pytest and are located at +``tests/integration_tests``. General design principles +laid out in :ref:`unit_testing` should be followed for integration tests. + +Setup is accomplished via a set of fixtures located in +``tests/integration_tests/conftest.py``. + +Image Setup +=========== + +Image setup occurs once when a test session begins and is implemented +via fixture. Image setup roughly follows these steps: + +* Launch an instance on the specified test platform +* Install the version of cloud-init under test +* Run ``cloud-init clean`` on the instance so subsequent boots + resemble out of the box behavior +* Take a snapshot of the instance to be used as a new image from + which new instances can be launched + +Test Setup +============== +Test setup occurs between image setup and test execution. Test setup +is implemented via one of the ``client`` fixtures. When a client fixture +is used, a test instance from which to run tests is launched prior to +test execution and torn down after. + +Test Definition +=============== +Tests are defined like any other pytest test. The ``user_data`` +mark can be used to supply the cloud-config user data. Platform specific +marks can be used to limit tests to particular platforms. The +client fixture can be used to interact with the launched +test instance. + +A basic example: + +.. code-block:: python + + USER_DATA = """#cloud-config + bootcmd: + - echo 'hello config!' > /tmp/user_data.txt""" + + + class TestSimple: + @pytest.mark.user_data(USER_DATA) + @pytest.mark.ec2 + def test_simple(self, client): + print(client.exec('cloud-init -v')) + +Test Execution +============== +Test execution happens via pytest. To run all integration tests, +you would run: + +.. code-block:: bash + + pytest tests/integration_tests/ + + +Configuration +============= + +All possible configuration values are defined in +``tests/integration_tests/integration_settings.py``. Defaults can be +overridden by supplying values in ``tests/integration_tests/user_settings.py`` +or by providing an environment variable of the same name prepended with +``CLOUD_INIT_``. For example, to set the ``PLATFORM`` setting: + +.. code-block:: bash + + CLOUD_INIT_PLATFORM='ec2' pytest tests/integration_tests/ diff --git a/doc/rtd/topics/tests.rst b/doc/rtd/topics/tests.rst deleted file mode 100644 index f03b5969..00000000 --- a/doc/rtd/topics/tests.rst +++ /dev/null @@ -1,758 +0,0 @@ -******************* -Integration Testing -******************* - -Overview -======== - -This page describes the execution, development, and architecture of the -cloud-init integration tests: - -* Execution explains the options available and running of tests -* Development shows how to write test cases -* Architecture explains the internal processes - -Execution -========= - -Overview --------- - -In order to avoid the need for dependencies and ease the setup and -configuration users can run the integration tests via tox: - -.. code-block:: shell-session - - $ git clone https://github.com/canonical/cloud-init - $ cd cloud-init - $ tox -e citest -- -h - -Everything after the double dash will be passed to the integration tests. -Executing tests has several options: - -* ``run`` an alias to run both ``collect`` and ``verify``. The ``tree_run`` - command does the same thing, except uses a deb built from the current - working tree. - -* ``collect`` deploys on the specified platform and distro, patches with the - requested deb or rpm, and finally collects output of the arbitrary - commands. Similarly, ```tree_collect`` will collect output using a deb - built from the current working tree. - -* ``verify`` given a directory of test data, run the Python unit tests on - it to generate results. - -* ``bddeb`` will build a deb of the current working tree. - -Run ---- - -The first example will provide a complete end-to-end run of data -collection and verification. There are additional examples below -explaining how to run one or the other independently. - -.. code-block:: shell-session - - $ git clone https://github.com/canonical/cloud-init - $ cd cloud-init - $ tox -e citest -- run --verbose \ - --os-name stretch --os-name xenial \ - --deb cloud-init_0.7.8~my_patch_all.deb \ - --preserve-data --data-dir ~/collection \ - --preserve-instance - -The above command will do the following: - -* ``run`` both collect output and run tests the output - -* ``--verbose`` verbose output - -* ``--os-name stretch`` on the Debian Stretch release - -* ``--os-name xenial`` on the Ubuntu Xenial release - -* ``--deb cloud-init_0.7.8~patch_all.deb`` use this deb as the version of - cloud-init to run with - -* ``--preserve-data`` always preserve collected data, do not remove data - after successful test run - -* ``--preserve-instance`` do not destroy the instance after test to allow - for debugging the stopped instance during integration test development. By - default, test instances are destroyed after the test completes. - -* ``--data-dir ~/collection`` write collected data into `~/collection`, - rather than using a temporary directory - -For a more detailed explanation of each option see below. - -.. note:: - By default, data collected by the run command will be written into a - temporary directory and deleted after a successful. If you would - like to preserve this data, please use the option ``--preserve-data``. - -Collect -------- - -If developing tests it may be necessary to see if cloud-config works as -expected and the correct files are pulled down. In this case only a -collect can be ran by running: - -.. code-block:: shell-session - - $ tox -e citest -- collect -n xenial --data-dir /tmp/collection - -The above command will run the collection tests on xenial and place -all results into `/tmp/collection`. - -Verify ------- - -When developing tests it is much easier to simply rerun the verify scripts -without the more lengthy collect process. This can be done by running: - -.. code-block:: shell-session - - $ tox -e citest -- verify --data-dir /tmp/collection - -The above command will run the verify scripts on the data discovered in -`/tmp/collection`. - -TreeRun and TreeCollect ------------------------ - -If working on a cloud-init feature or resolving a bug, it may be useful to -run the current copy of cloud-init in the integration testing environment. -The integration testing suite can automatically build a deb based on the -current working tree of cloud-init and run the test suite using this deb. - -The ``tree_run`` and ``tree_collect`` commands take the same arguments as -the ``run`` and ``collect`` commands. These commands will build a deb and -write it into a temporary file, then start the test suite and pass that deb -in. To build a deb only, and not run the test suite, the ``bddeb`` command -can be used. - -Note that code in the cloud-init working tree that has not been committed -when the cloud-init deb is built will still be included. To build a -cloud-init deb from or use the ``tree_run`` command using a copy of -cloud-init located in a different directory, use the option ``--cloud-init -/path/to/cloud-init``. - -.. code-block:: shell-session - - $ tox -e citest -- tree_run --verbose \ - --os-name xenial --os-name stretch \ - --test modules/final_message --test modules/write_files \ - --result /tmp/result.yaml - -Bddeb ------ - -The ``bddeb`` command can be used to generate a deb file. This is used by -the tree_run and tree_collect commands to build a deb of the current -working tree. It can also be used a user to generate a deb for use in other -situations and avoid needing to have all the build and test dependencies -installed locally. - -* ``--bddeb-args``: arguments to pass through to bddeb -* ``--build-os``: distribution to use as build system (default is xenial) -* ``--build-platform``: platform to use for build system (default is lxd) -* ``--cloud-init``: path to base of cloud-init tree (default is '.') -* ``--deb``: path to write output deb to (default is '.') - -Setup Image ------------ - -By default an image that is used will remain unmodified, but certain -scenarios may require image modification. For example, many images may use -a much older cloud-init. As a result tests looking at newer functionality -will fail because a newer version of cloud-init may be required. The -following options can be used for further customization: - -* ``--deb``: install the specified deb into the image -* ``--rpm``: install the specified rpm into the image -* ``--repo``: enable a repository and upgrade cloud-init afterwards -* ``--ppa``: enable a ppa and upgrade cloud-init afterwards -* ``--upgrade``: upgrade cloud-init from repos -* ``--upgrade-full``: run a full system upgrade -* ``--script``: execute a script in the image. This can perform any setup - required that is not covered by the other options - -Test Case Development -===================== - -Overview --------- - -As a test writer you need to develop a test configuration and a -verification file: - - * The test configuration specifies a specific cloud-config to be used by - cloud-init and a list of arbitrary commands to capture the output of - (e.g my_test.yaml) - - * The verification file runs tests on the collected output to determine - the result of the test (e.g. my_test.py) - -The names must match, however the extensions will of course be different, -yaml vs py. - -Configuration -------------- - -The test configuration is a YAML file such as *ntp_server.yaml* below: - -.. code-block:: yaml - - # - # Empty NTP config to setup using defaults - # - # NOTE: this should not require apt feature, use 'which' rather than 'dpkg -l' - # NOTE: this should not require no_ntpdate feature, use 'which' to check for - # installation rather than 'dpkg -l', as 'grep ntp' matches 'ntpdate' - # NOTE: the verifier should check for any ntp server not 'ubuntu.pool.ntp.org' - cloud_config: | - #cloud-config - ntp: - servers: - - pool.ntp.org - required_features: - - apt - - no_ntpdate - - ubuntu_ntp - collect_scripts: - ntp_installed_servers: | - #!/bin/bash - dpkg -l | grep ntp | wc -l - ntp_conf_dist_servers: | - #!/bin/bash - ls /etc/ntp.conf.dist | wc -l - ntp_conf_servers: | - #!/bin/bash - cat /etc/ntp.conf | grep '^server' - -There are several keys, 1 required and some optional, in the YAML file: - -1. The required key is ``cloud_config``. This should be a string of valid - YAML that is exactly what would normally be placed in a cloud-config - file, including the cloud-config header. This essentially sets up the - scenario under test. - -2. One optional key is ``collect_scripts``. This key has one or more - sub-keys containing strings of arbitrary commands to execute (e.g. - ```cat /var/log/cloud-config-output.log```). In the example above the - output of dpkg is captured, grep for ntp, and the number of lines - reported. The name of the sub-key is important. The sub-key is used by - the verification script to recall the output of the commands ran. - -3. The optional ``enabled`` key enables or disables the test case. By - default the test case will be enabled. - -4. The optional ``required_features`` key may be used to specify a list - of features flags that an image must have to be able to run the test - case. For example, if a test case relies on an image supporting apt, - then the config for the test case should include ``required_features: - [ apt ]``. - - -Default Collect Scripts ------------------------ - -By default the following files will be collected for every test. There is -no need to specify these items: - -* ``/var/log/cloud-init.log`` -* ``/var/log/cloud-init-output.log`` -* ``/run/cloud-init/.instance-id`` -* ``/run/cloud-init/result.json`` -* ``/run/cloud-init/status.json`` -* ```dpkg-query -W -f='${Version}' cloud-init``` - -Verification ------------- - -The verification script is a Python file with unit tests like the one, -`ntp_server.py`, below: - -.. code-block:: python - - # This file is part of cloud-init. See LICENSE file for license information. - - """cloud-init Integration Test Verify Script""" - from tests.cloud_tests.testcases import base - - - class TestNtp(base.CloudTestCase): - """Test ntp module""" - - def test_ntp_installed(self): - """Test ntp installed""" - out = self.get_data_file('ntp_installed_empty') - self.assertEqual(1, int(out)) - - def test_ntp_dist_entries(self): - """Test dist config file has one entry""" - out = self.get_data_file('ntp_conf_dist_empty') - self.assertEqual(1, int(out)) - - def test_ntp_entires(self): - """Test config entries""" - out = self.get_data_file('ntp_conf_empty') - self.assertIn('pool 0.ubuntu.pool.ntp.org iburst', out) - self.assertIn('pool 1.ubuntu.pool.ntp.org iburst', out) - self.assertIn('pool 2.ubuntu.pool.ntp.org iburst', out) - self.assertIn('pool 3.ubuntu.pool.ntp.org iburst', out) - - # vi: ts=4 expandtab - - -Here is a breakdown of the unit test file: - -* The import statement allows access to the output files. - -* The class can be named anything, but must import the - ``base.CloudTestCase``, either directly or via another test class. - -* There can be 1 to N number of functions with any name, however only - functions starting with ``test_*`` will be executed. - -* There can be 1 to N number of classes in a test module, however only - classes inheriting from ``base.CloudTestCase`` will be loaded. - -* Output from the commands can be accessed via - ``self.get_data_file('key')`` where key is the sub-key of - ``collect_scripts`` above. - -* The cloud config that the test ran with can be accessed via - ``self.cloud_config``, or any entry from the cloud config can be accessed - via ``self.get_config_entry('key')``. - -* See the base ``CloudTestCase`` for additional helper functions. - -Layout ------- - -Integration tests are located under the `tests/cloud_tests` directory. -Test configurations are placed under `configs` and the test verification -scripts under `testcases`: - -.. code-block:: shell-session - - cloud-init$ tree -d tests/cloud_tests/ - tests/cloud_tests/ - ├── configs - │   ├── bugs - │   ├── examples - │   ├── main - │   └── modules - └── testcases - ├── bugs - ├── examples - ├── main - └── modules - -The sub-folders of bugs, examples, main, and modules help organize the -tests. View the README.md in each to understand in more detail each -directory. - -Test Creation Helper --------------------- - -The integration testing suite has a built in helper to aid in test -development. Help can be invoked via ``tox -e citest -- create --help``. It -can create a template test case config file with user data passed in from -the command line, as well as a template test case verifier module. - -The following would create a test case named ``example`` under the -``modules`` category with the given description, and cloud config data read -in from ``/tmp/user_data``. - -.. code-block:: shell-session - - $ tox -e citest -- create modules/example \ - -d "a simple example test case" -c "$(< /tmp/user_data)" - - -Development Checklist ---------------------- - -* Configuration File - * Named 'your_test.yaml' - * Contains at least a valid cloud-config - * Optionally, commands to capture additional output - * Valid YAML - * Placed in the appropriate sub-folder in the configs directory - * Any image features required for the test are specified -* Verification File - * Named 'your_test.py' - * Valid unit tests validating output collected - * Passes pylint & pep8 checks - * Placed in the appropriate sub-folder in the test cases directory -* Tested by running the test: - - .. code-block:: shell-session - - $ tox -e citest -- run -verbose \ - --os-name \ - --test modules/your_test.yaml \ - [--deb ] - - -Platforms -========= - -EC2 ---- -To run on the EC2 platform it is required that the user has an AWS credentials -configuration file specifying his or her access keys and a default region. -These configuration files are the standard that the AWS cli and other AWS -tools utilize for interacting directly with AWS itself and are normally -generated when running ``aws configure``: - -.. code-block:: shell-session - - $ cat $HOME/.aws/credentials - [default] - aws_access_key_id = - aws_secret_access_key = - -.. code-block:: shell-session - - $ cat $HOME/.aws/config - [default] - region = us-west-2 - - -Azure Cloud ------------ - -To run on Azure Cloud platform users login with Service Principal and export -credentials file. Region is defaulted and can be set in -``tests/cloud_tests/platforms.yaml``. The Service Principal credentials are -the standard authentication for Azure SDK to interact with Azure Services: - -Create Service Principal account or login - -.. code-block:: shell-session - - $ az ad sp create-for-rbac --name "APP_ID" --password "STRONG-SECRET-PASSWORD" - -.. code-block:: shell-session - - $ az login --service-principal --username "APP_ID" --password "STRONG-SECRET-PASSWORD" - -Export credentials - -.. code-block:: shell-session - - $ az ad sp create-for-rbac --sdk-auth > $HOME/.azure/credentials.json - -.. code-block:: json - - { - "clientId": "", - "clientSecret": "", - "subscriptionId": "", - "tenantId": "", - "activeDirectoryEndpointUrl": "https://login.microsoftonline.com", - "resourceManagerEndpointUrl": "https://management.azure.com/", - "activeDirectoryGraphResourceId": "https://graph.windows.net/", - "sqlManagementEndpointUrl": "https://management.core.windows.net:8443/", - "galleryEndpointUrl": "https://gallery.azure.com/", - "managementEndpointUrl": "https://management.core.windows.net/" - } - -Set region in platforms.yaml - -.. code-block:: yaml - - azurecloud: - enabled: true - region: West US 2 - vm_size: Standard_DS1_v2 - storage_sku: standard_lrs - tag: ci - - -Architecture -============ - -The following section outlines the high-level architecture of the -integration process. - -Overview --------- -The process flow during a complete end-to-end LXD-backed test. - -1. Configuration - * The back end and specific distro releases are verified as supported - * The test or tests that need to be run are determined either by - directory or by individual yaml - -2. Image Creation - * Acquire the request LXD image - * Install the specified cloud-init package - * Clean the image so that it does not appear to have been booted - * A snapshot of the image is created and reused by all tests - -3. Configuration - * For each test, the cloud-config is injected into a copy of the - snapshot and booted - * The framework waits for ``/var/lib/cloud/instance/boot-finished`` - (up to 120 seconds) - * All default commands are ran and output collected - * Any commands the user specified are executed and output collected - -4. Verification - * The default commands are checked for any failures, errors, and - warnings to validate basic functionality of cloud-init completed - successfully - * The user generated unit tests are then ran validating against the - collected output - -5. Results - * If any failures were detected the test suite returns a failure - * Results can be dumped in yaml format to a specified file using the - ``-r .yaml`` option - -Configuring the Test Suite --------------------------- - -Most of the behavior of the test suite is configurable through several yaml -files. These control the behavior of the test suite's platforms, images, and -tests. The main config files for platforms, images and test cases are -``platforms.yaml``, ``releases.yaml`` and ``testcases.yaml``. - -Config handling -^^^^^^^^^^^^^^^ - -All configurable parts of the test suite use a defaults + overrides system -for managing config entries. All base config items are dictionaries. - -Merging is done on a key-by-key basis, with all keys in the default and -override represented in the final result. If a key exists both in -the defaults and the overrides, then the behavior depends on the type of data -the key refers to. If it is atomic data or a list, then the overrides will -replace the default. If the data is a dictionary then the value will be the -result of merging that dictionary from the default config and that -dictionary from the overrides. - -Merging is done using the function -``tests.cloud_tests.config.merge_config``, which can be examined for more -detail on config merging behavior. - -The following demonstrates merge behavior: - -.. code-block:: yaml - - defaults: - list_item: - - list_entry_1 - - list_entry_2 - int_item_1: 123 - int_item_2: 234 - dict_item: - subkey_1: 1 - subkey_2: 2 - subkey_dict: - subsubkey_1: a - subsubkey_2: b - - overrides: - list_item: - - overridden_list_entry - int_item_1: 0 - dict_item: - subkey_2: false - subkey_dict: - subsubkey_2: 'new value' - - result: - list_item: - - overridden_list_entry - int_item_1: 0 - int_item_2: 234 - dict_item: - subkey_1: 1 - subkey_2: false - subkey_dict: - subsubkey_1: a - subsubkey_2: 'new value' - - -Image Config ------------- - -Image configuration is handled in ``releases.yaml``. The image configuration -controls how platforms locate and acquire images, how the platforms should -interact with the images, how platforms should detect when an image has -fully booted, any options that are required to set the image up, and -features that the image supports. - -Since settings for locating an image and interacting with it differ from -platform to platform, there are 4 levels of settings available for images on -top of the default image settings. The structure of the image config file -is: - -.. code-block:: yaml - - default_release_config: - default: - ... - : - ... - : - ... - - releases: - : - : - ... - : - ... - : - ... - - -The base config is created from the overall defaults and the overrides for -the platform. The overrides are created from the default config for the -image and the platform specific overrides for the image. - -System Boot -^^^^^^^^^^^ - -The test suite must be able to test if a system has fully booted and if -cloud-init has finished running, so that running collect scripts does not -race against the target image booting. This is done using the -``system_ready_script`` and ``cloud_init_ready_script`` image config keys. - -Each of these keys accepts a small bash test statement as a string that must -return 0 or 1. Since this test statement will be added into a larger bash -statement it must be a single statement using the ``[`` test syntax. - -The default image config provides a system ready script that works for any -systemd based image. If the image is not systemd based, then a different -test statement must be provided. The default config also provides a test -for whether or not cloud-init has finished which checks for the file -``/run/cloud-init/result.json``. This should be sufficient for most systems -as writing this file is one of the last things cloud-init does. - -The setting ``boot_timeout`` controls how long, in seconds, the platform -should wait for an image to boot. If the system ready script has not -indicated that the system is fully booted within this time an error will be -raised. - -Feature Flags -^^^^^^^^^^^^^ - -Not all test cases can work on all images due to features the test case -requires not being present on that image. If a test case requires features -in an image that are not likely to be present across all distros and -platforms that the test suite supports, then the test can be skipped -everywhere it is not supported. - -Feature flags, which are names for features supported on some images, but -not all that may be required by test cases. Configuration for feature flags -is provided in ``releases.yaml`` under the ``features`` top level key. The -features config includes a list of all currently defined feature flags, -their meanings, and a list of feature groups. - -Feature groups are groups of features that many images have in common. For -example, the ``Ubuntu_specific`` feature group includes features that -should be present across most Ubuntu releases, but may or may not be for -other distros. Feature groups are specified for an image as a list under -the key ``feature_groups``. - -An image's feature flags are derived from the features groups that that -image has and any feature overrides provided. Feature overrides can be -specified under the ``features`` key which accepts a dictionary of -``{: true/false}`` mappings. If a feature is omitted from an -image's feature flags or set to false in the overrides then the test suite -will skip any tests that require that feature when using that image. - -Feature flags may be overridden at run time using the ``--feature-override`` -command line argument. It accepts a feature flag and value to set in the -format ``=true/false``. Multiple ``--feature-override`` -flags can be used, and will all be applied to all feature flags for images -used during a test. - -Setup Overrides -^^^^^^^^^^^^^^^ - -If an image requires some of the options for image setup to be used, then it -may specify overrides for the command line arguments passed into setup -image. These may be specified as a dictionary under the ``setup_overrides`` -key. When an image is set up, the arguments that control how it is set up -will be the arguments from the command line, with any entries in -``setup_overrides`` used to override these arguments. - -For example, images that do not come with cloud-init already installed -should have ``setup_overrides: {upgrade: true}`` specified so that in the -event that no additional setup options are given, cloud-init will be -installed from the image's repos before running tests. Note that if other -options such as ``--deb`` are passed in on the command line, these will -still work as expected, since apt's policy for cloud-init would prefer the -locally installed deb over an older version from the repos. - -Platform Specific Options -^^^^^^^^^^^^^^^^^^^^^^^^^ - -There are many platform specific options in image configuration that allow -platforms to locate images and that control additional setup that the -platform may have to do to make the image usable. For information on how -these work, please consult the documentation for that platform in the -integration testing suite and the ``releases.yaml`` file for examples. - -Error Handling --------------- - -The test suite makes an attempt to run as many tests as possible even in the -event of some failing so that automated runs collect as much data as -possible. In the event that something goes wrong while setting up for or -running a test, the test suite will attempt to continue running any tests -which have not been affected by the error. - -For example, if the test suite was told to run tests on one platform for two -releases and an error occurred setting up the first image, all tests for -that image would be skipped, and the test suite would continue to set up -the second image and run tests on it. Or, if the system does not start -properly for one test case out of many to run on that image, that test case -will be skipped and the next one will be run. - -Note that if any errors occur, the test suite will record the failure and -where it occurred in the result data and write it out to the specified -result file. - -Results -------- - -The test suite generates result data that includes how long each stage of -the test suite took and which parts were and were not successful. This data -is dumped to the log after the collect and verify stages, and may also be -written out in yaml format to a file. If part of the setup failed, the -traceback for the failure and the error message will be included in the -result file. If a test verifier finds a problem with the collected data -from a test run, the class, test function and test will be recorded in the -result data. - -Exit Codes -^^^^^^^^^^ - -The test suite counts how many errors occur throughout a run. The exit code -after a run is the number of errors that occurred. If the exit code is -non-zero then something is wrong either with the test suite, the -configuration for an image, a test case, or cloud-init itself. - -Note that the exit code does not always directly correspond to the number -of failed test cases, since in some cases, a single error during image setup -can mean that several test cases are not run. If run is used, then the exit -code will be the sum of the number of errors in the collect and verify -stages. - -Data Dir -^^^^^^^^ - -When using run, the collected data is written into a temporary directory. In -the event that all tests pass, this directory is deleted, but if a test -fails or an error occurs, this data will be left in place, and a message -will be written to the log giving the location of the data. diff --git a/integration-requirements.txt b/integration-requirements.txt index 13cfb9d7..64455c79 100644 --- a/integration-requirements.txt +++ b/integration-requirements.txt @@ -4,6 +4,8 @@ # Note: Changes to this requirements may require updates to # the packages/pkg-deps.json file as well. # +pytest +git+https://github.com/canonical/pycloudlib.git # ec2 backend boto3==1.14.53 diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py new file mode 100644 index 00000000..a170bfc9 --- /dev/null +++ b/tests/integration_tests/conftest.py @@ -0,0 +1,106 @@ +# This file is part of cloud-init. See LICENSE file for license information. +import os +import logging +import pytest +import sys +from contextlib import contextmanager + +from tests.integration_tests import integration_settings +from tests.integration_tests.platforms import ( + dynamic_client, + LxdContainerClient, + client_name_to_class +) + +log = logging.getLogger('integration_testing') +log.addHandler(logging.StreamHandler(sys.stdout)) +log.setLevel(logging.INFO) + + +def pytest_runtest_setup(item): + """Skip tests on unsupported clouds. + + A test can take any number of marks to specify the platforms it can + run on. If a platform(s) is specified and we're not running on that + platform, then skip the test. If platform specific marks are not + specified, then we assume the test can be run anywhere. + """ + all_platforms = client_name_to_class.keys() + supported_platforms = set(all_platforms).intersection( + mark.name for mark in item.iter_markers()) + current_platform = integration_settings.PLATFORM + if supported_platforms and current_platform not in supported_platforms: + pytest.skip('Cannot run on platform {}'.format(current_platform)) + + +# disable_subp_usage is defined at a higher level, but we don't +# want it applied here +@pytest.fixture() +def disable_subp_usage(request): + pass + + +@pytest.fixture(scope='session', autouse=True) +def setup_image(): + """Setup the target environment with the correct version of cloud-init. + + So we can launch instances / run tests with the correct image + """ + client = dynamic_client() + log.info('Setting up environment for %s', client.datasource) + if integration_settings.CLOUD_INIT_SOURCE == 'NONE': + pass # that was easy + elif integration_settings.CLOUD_INIT_SOURCE == 'IN_PLACE': + if not isinstance(client, LxdContainerClient): + raise ValueError( + 'IN_PLACE as CLOUD_INIT_SOURCE only works for LXD') + # The mount needs to happen after the instance is launched, so + # no further action needed here + elif integration_settings.CLOUD_INIT_SOURCE == 'PROPOSED': + client.launch() + client.install_proposed_image() + elif integration_settings.CLOUD_INIT_SOURCE.startswith('ppa:'): + client.launch() + client.install_ppa(integration_settings.CLOUD_INIT_SOURCE) + elif os.path.isfile(str(integration_settings.CLOUD_INIT_SOURCE)): + client.launch() + client.install_deb() + if client.instance: + # Even if we're keeping instances, we don't want to keep this + # one around as it was just for image creation + client.destroy() + log.info('Done with environment setup') + + +@contextmanager +def _client(request, fixture_utils): + """Fixture implementation for the client fixtures. + + Launch the dynamic IntegrationClient instance using any provided + userdata, yield to the test, then cleanup + """ + user_data = fixture_utils.closest_marker_first_arg_or( + request, 'user_data', None) + with dynamic_client(user_data=user_data) as instance: + yield instance + + +@pytest.yield_fixture +def client(request, fixture_utils): + """Provide a client that runs for every test.""" + with _client(request, fixture_utils) as client: + yield client + + +@pytest.yield_fixture(scope='module') +def module_client(request, fixture_utils): + """Provide a client that runs once per module.""" + with _client(request, fixture_utils) as client: + yield client + + +@pytest.yield_fixture(scope='class') +def class_client(request, fixture_utils): + """Provide a client that runs once per class.""" + with _client(request, fixture_utils) as client: + yield client diff --git a/tests/integration_tests/integration_settings.py b/tests/integration_tests/integration_settings.py new file mode 100644 index 00000000..ddd587db --- /dev/null +++ b/tests/integration_tests/integration_settings.py @@ -0,0 +1,95 @@ +# This file is part of cloud-init. See LICENSE file for license information. +import os + +################################################################## +# LAUNCH SETTINGS +################################################################## + +# Keep instance (mostly for debugging) when test is finished +KEEP_INSTANCE = False + +# One of: +# lxd_container +# ec2 +# gce +# oci +PLATFORM = 'lxd_container' + +# The cloud-specific instance type to run. E.g., a1.medium on AWS +# If the pycloudlib instance provides a default, this can be left None +INSTANCE_TYPE = None + +# Determines the base image to use or generate new images from. +# Can be the name of the OS if running a stock image, +# otherwise the id of the image being used if using a custom image +OS_IMAGE = 'focal' + +# Populate if you want to use a pre-launched instance instead of +# creating a new one. The exact contents will be platform dependent +EXISTING_INSTANCE_ID = None + +################################################################## +# IMAGE GENERATION SETTINGS +################################################################## + +# Depending on where we are in the development / test / SRU cycle, we'll want +# different methods of getting the source code to our SUT. Because of +# this there are a number of different ways to initialize +# the target environment. + +# Can be any of the following: +# NONE +# Don't modify the target environment at all. This will run +# cloud-init with whatever code was baked into the image +# IN_PLACE +# LXD CONTAINER only. Mount the source code as-is directly into +# the container to override the pre-existing cloudinit module. This +# won't work for non-local LXD remotes and won't run any installation +# code. +# PROPOSED +# Install from the Ubuntu proposed repo +# , e.g., ppa:cloud-init-dev/proposed +# Install from a PPA. It MUST start with 'ppa:' +# +# A path to a valid package to be uploaded and installed +CLOUD_INIT_SOURCE = 'NONE' + +################################################################## +# GCE SPECIFIC SETTINGS +################################################################## +# Required for GCE +GCE_PROJECT = None + +# You probably want to override these +GCE_REGION = 'us-central1' +GCE_ZONE = 'a' + +################################################################## +# OCI SPECIFIC SETTINGS +################################################################## +# Compartment-id found at +# https://console.us-phoenix-1.oraclecloud.com/a/identity/compartments +# Required for Oracle +OCI_COMPARTMENT_ID = None + +################################################################## +# USER SETTINGS OVERRIDES +################################################################## +# Bring in any user-file defined settings +try: + from tests.integration_tests.user_settings import * # noqa +except ImportError: + pass + +################################################################## +# ENVIRONMENT SETTINGS OVERRIDES +################################################################## +# Any of the settings in this file can be overridden with an +# environment variable of the same name prepended with CLOUD_INIT_ +# E.g., CLOUD_INIT_PLATFORM +# Perhaps a bit too hacky, but it works :) +current_settings = [var for var in locals() if var.isupper()] +for setting in current_settings: + globals()[setting] = os.getenv( + 'CLOUD_INIT_{}'.format(setting), globals()[setting] + ) diff --git a/tests/integration_tests/platforms.py b/tests/integration_tests/platforms.py new file mode 100644 index 00000000..b42414b9 --- /dev/null +++ b/tests/integration_tests/platforms.py @@ -0,0 +1,235 @@ +# This file is part of cloud-init. See LICENSE file for license information. +from abc import ABC, abstractmethod +import logging +import os +from tempfile import NamedTemporaryFile + +from pycloudlib import EC2, GCE, Azure, OCI, LXD +from pycloudlib.cloud import BaseCloud +from pycloudlib.instance import BaseInstance + +import cloudinit +from cloudinit.subp import subp +from tests.integration_tests import integration_settings + +try: + from typing import Callable, Optional +except ImportError: + pass + + +log = logging.getLogger('integration_testing') + + +class IntegrationClient(ABC): + client = None # type: Optional[BaseCloud] + instance = None # type: Optional[BaseInstance] + datasource = None # type: Optional[str] + use_sudo = True + current_image = None + + def __init__(self, user_data=None, instance_type=None, wait=True, + settings=integration_settings, launch_kwargs=None): + self.user_data = user_data + self.instance_type = settings.INSTANCE_TYPE if \ + instance_type is None else instance_type + self.wait = wait + self.settings = settings + self.launch_kwargs = launch_kwargs if launch_kwargs else {} + self.client = self._get_client() + + @abstractmethod + def _get_client(self): + raise NotImplementedError + + def _get_image(self): + if self.current_image: + return self.current_image + image_id = self.settings.OS_IMAGE + try: + image_id = self.client.released_image(self.settings.OS_IMAGE) + except (ValueError, IndexError): + pass + return image_id + + def launch(self): + if self.settings.EXISTING_INSTANCE_ID: + log.info( + 'Not launching instance due to EXISTING_INSTANCE_ID. ' + 'Instance id: %s', self.settings.EXISTING_INSTANCE_ID) + self.instance = self.client.get_instance( + self.settings.EXISTING_INSTANCE_ID + ) + return + image_id = self._get_image() + launch_args = { + 'image_id': image_id, + 'user_data': self.user_data, + 'wait': self.wait, + } + if self.instance_type: + launch_args['instance_type'] = self.instance_type + launch_args.update(self.launch_kwargs) + self.instance = self.client.launch(**launch_args) + log.info('Launched instance: %s', self.instance) + + def destroy(self): + self.instance.delete() + + def execute(self, command): + return self.instance.execute(command) + + def pull_file(self, remote_file, local_file): + self.instance.pull_file(remote_file, local_file) + + def push_file(self, local_path, remote_path): + self.instance.push_file(local_path, remote_path) + + def read_from_file(self, remote_path) -> str: + tmp_file = NamedTemporaryFile('r') + self.pull_file(remote_path, tmp_file.name) + with tmp_file as f: + contents = f.read() + return contents + + def write_to_file(self, remote_path, contents: str): + # Writes file locally and then pushes it rather + # than writing the file directly on the instance + with NamedTemporaryFile('w', delete=False) as tmp_file: + tmp_file.write(contents) + + try: + self.push_file(tmp_file.name, remote_path) + finally: + os.unlink(tmp_file.name) + + def snapshot(self): + return self.client.snapshot(self.instance, clean=True) + + def _install_new_cloud_init(self, remote_script): + self.execute(remote_script) + version = self.execute('cloud-init -v').split()[-1] + log.info('Installed cloud-init version: %s', version) + self.instance.clean() + image_id = self.snapshot() + log.info('Created new image: %s', image_id) + IntegrationClient.current_image = image_id + + def install_proposed_image(self): + log.info('Installing proposed image') + remote_script = ( + '{sudo} echo deb "http://archive.ubuntu.com/ubuntu ' + '$(lsb_release -sc)-proposed main" | ' + '{sudo} tee /etc/apt/sources.list.d/proposed.list\n' + '{sudo} apt-get update -q\n' + '{sudo} apt-get install -qy cloud-init' + ).format(sudo='sudo' if self.use_sudo else '') + self._install_new_cloud_init(remote_script) + + def install_ppa(self, repo): + log.info('Installing PPA') + remote_script = ( + '{sudo} add-apt-repository {repo} -y && ' + '{sudo} apt-get update -q && ' + '{sudo} apt-get install -qy cloud-init' + ).format(sudo='sudo' if self.use_sudo else '', repo=repo) + self._install_new_cloud_init(remote_script) + + def install_deb(self): + log.info('Installing deb package') + deb_path = integration_settings.CLOUD_INIT_SOURCE + deb_name = os.path.basename(deb_path) + remote_path = '/var/tmp/{}'.format(deb_name) + self.push_file( + local_path=integration_settings.CLOUD_INIT_SOURCE, + remote_path=remote_path) + remote_script = '{sudo} dpkg -i {path}'.format( + sudo='sudo' if self.use_sudo else '', path=remote_path) + self._install_new_cloud_init(remote_script) + + def __enter__(self): + self.launch() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if not self.settings.KEEP_INSTANCE: + self.destroy() + + +class Ec2Client(IntegrationClient): + datasource = 'ec2' + + def _get_client(self): + return EC2(tag='ec2-integration-test') + + +class GceClient(IntegrationClient): + datasource = 'gce' + + def _get_client(self): + return GCE( + tag='gce-integration-test', + project=self.settings.GCE_PROJECT, + region=self.settings.GCE_REGION, + zone=self.settings.GCE_ZONE, + ) + + +class AzureClient(IntegrationClient): + datasource = 'azure' + + def _get_client(self): + return Azure(tag='azure-integration-test') + + +class OciClient(IntegrationClient): + datasource = 'oci' + + def _get_client(self): + return OCI( + tag='oci-integration-test', + compartment_id=self.settings.OCI_COMPARTMENT_ID + ) + + +class LxdContainerClient(IntegrationClient): + datasource = 'lxd_container' + use_sudo = False + + def _get_client(self): + return LXD(tag='lxd-integration-test') + + def _mount_source(self): + command = ( + 'lxc config device add {name} host-cloud-init disk ' + 'source={cloudinit_path} ' + 'path=/usr/lib/python3/dist-packages/cloudinit' + ).format( + name=self.instance.name, cloudinit_path=cloudinit.__path__[0]) + subp(command.split()) + + def launch(self): + super().launch() + if self.settings.CLOUD_INIT_SOURCE == 'IN_PLACE': + self._mount_source() + + +client_name_to_class = { + 'ec2': Ec2Client, + 'gce': GceClient, + # 'azure': AzureClient, # Not supported yet + 'oci': OciClient, + 'lxd_container': LxdContainerClient +} + +try: + dynamic_client = client_name_to_class[ + integration_settings.PLATFORM + ] # type: Callable[..., IntegrationClient] +except KeyError: + raise ValueError( + "{} is an invalid PLATFORM specified in settings. " + "Must be one of {}".format( + integration_settings.PLATFORM, list(client_name_to_class.keys()) + ) + ) diff --git a/tox.ini b/tox.ini index a92c63e0..3bc83a2a 100644 --- a/tox.ini +++ b/tox.ini @@ -139,8 +139,15 @@ deps = [pytest] # TODO: s/--strict/--strict-markers/ once xenial support is dropped +testpaths = cloudinit tests/unittests addopts = --strict markers = allow_subp_for: allow subp usage for the given commands (disable_subp_usage) allow_all_subp: allow all subp usage (disable_subp_usage) ds_sys_cfg: a sys_cfg dict to be used by datasource fixtures + ec2: test will only run on EC2 platform + gce: test will only run on GCE platform + azure: test will only run on Azure platform + oci: test will only run on OCI platform + lxd_container: test will only run in LXD container + user_data: the user data to be passed to the test instance -- cgit v1.2.3 From 5a7f6818083118b45828fa0b334309449881f80a Mon Sep 17 00:00:00 2001 From: Paride Legovini Date: Mon, 19 Oct 2020 22:59:16 +0200 Subject: bddeb: new --packaging-branch argument to pull packaging from branch (#576) bddeb builds a .deb package using the template packaging files in packages/debian/. The new --packaging-branch flag allows to specify a git branch where to pull the packaging (i.e. the debian/ directory) from. This is useful to build a .deb package from master with the very same packaging which is used for the uploads. --- cloudinit/subp.py | 6 ++-- doc/rtd/topics/cloud_tests.rst | 13 ++++--- packages/bddeb | 80 +++++++++++++++++++++++++++++++++++++++++- 3 files changed, 91 insertions(+), 8 deletions(-) (limited to 'doc/rtd') diff --git a/cloudinit/subp.py b/cloudinit/subp.py index 3e4efa42..024e1a98 100644 --- a/cloudinit/subp.py +++ b/cloudinit/subp.py @@ -144,7 +144,7 @@ class ProcessExecutionError(IOError): def subp(args, data=None, rcs=None, env=None, capture=True, combine_capture=False, shell=False, logstring=False, decode="replace", target=None, update_env=None, - status_cb=None): + status_cb=None, cwd=None): """Run a subprocess. :param args: command to run in a list. [cmd, arg1, arg2...] @@ -181,6 +181,8 @@ def subp(args, data=None, rcs=None, env=None, capture=True, :param status_cb: call this fuction with a single string argument before starting and after finishing. + :param cwd: + change the working directory to cwd before executing the command. :return if not capturing, return is (None, None) @@ -254,7 +256,7 @@ def subp(args, data=None, rcs=None, env=None, capture=True, try: sp = subprocess.Popen(bytes_args, stdout=stdout, stderr=stderr, stdin=stdin, - env=env, shell=shell) + env=env, shell=shell, cwd=cwd) (out, err) = sp.communicate(data) except OSError as e: if status_cb: diff --git a/doc/rtd/topics/cloud_tests.rst b/doc/rtd/topics/cloud_tests.rst index e4e893d2..0fbb1301 100644 --- a/doc/rtd/topics/cloud_tests.rst +++ b/doc/rtd/topics/cloud_tests.rst @@ -151,17 +151,20 @@ cloud-init located in a different directory, use the option ``--cloud-init Bddeb ----- -The ``bddeb`` command can be used to generate a deb file. This is used by -the tree_run and tree_collect commands to build a deb of the current -working tree. It can also be used a user to generate a deb for use in other -situations and avoid needing to have all the build and test dependencies -installed locally. +The ``bddeb`` command can be used to generate a deb file. This is used by the +tree_run and tree_collect commands to build a deb of the current working tree +using the packaging template contained in the ``packages/debian/`` directory. +It can also be used to generate a deb for use in other situations and avoid +needing to have all the build and test dependencies installed locally. * ``--bddeb-args``: arguments to pass through to bddeb * ``--build-os``: distribution to use as build system (default is xenial) * ``--build-platform``: platform to use for build system (default is lxd) * ``--cloud-init``: path to base of cloud-init tree (default is '.') * ``--deb``: path to write output deb to (default is '.') +* ``--packaging-branch``: import the ``debian/`` packaging directory + from the specified branch (default: ``ubuntu/devel``) instead of using + the packaging template. Setup Image ----------- diff --git a/packages/bddeb b/packages/bddeb index b0f219b6..a3fb8848 100755 --- a/packages/bddeb +++ b/packages/bddeb @@ -5,6 +5,7 @@ import csv import json import os import shutil +import subprocess import sys UNRELEASED = "UNRELEASED" @@ -99,6 +100,36 @@ def write_debian_folder(root, templ_data, cloud_util_deps): params={'build_depends': ','.join(requires)}) +def write_debian_folder_from_branch(root, templ_data, branch): + """Import a debian package directory from a branch.""" + print("Importing debian/ from branch %s to %s" % (branch, root)) + + p_dumpdeb = subprocess.Popen( + ["git", "archive", branch, "debian"], stdout=subprocess.PIPE + ) + subprocess.check_call( + ["tar", "-v", "-C", root, "-x"], + stdin=p_dumpdeb.stdout + ) + + print("Adding new entry to debian/changelog") + full_deb_version = ( + templ_data["version_long"] + "-1~bddeb" + templ_data["release_suffix"] + ) + subp.subp( + [ + "dch", + "--distribution", + templ_data["debian_release"], + "--newversion", + full_deb_version, + "--controlmaint", + "Snapshot build.", + ], + cwd=root + ) + + def read_version(): return json.loads(run_helper('read-version', ['--json'])) @@ -140,6 +171,15 @@ def get_parser(): parser.add_argument("--signuser", default=False, action='store', help="user to sign, see man dpkg-genchanges") + + parser.add_argument("--packaging-branch", nargs="?", metavar="BRANCH", + const="ubuntu/devel", type=str, + help=( + "Import packaging from %(metavar)s instead of" + " using the packages/debian/* templates" + " (default: %(const)s)" + )) + return parser @@ -147,6 +187,37 @@ def main(): parser = get_parser() args = parser.parse_args() + if args.packaging_branch: + try: + subp.subp( + [ + "git", + "show-ref", + "--quiet", + "--verify", + "refs/heads/" + args.packaging_branch, + ] + ) + except subp.ProcessExecutionError: + print("Couldn't find branch '%s'." % args.packaging_branch) + print("You may need to checkout the branch from the git remote.") + return 1 + try: + subp.subp( + [ + "git", + "cat-file", + "-e", + args.packaging_branch + ":debian/control", + ] + ) + except subp.ProcessExecutionError: + print( + "Couldn't find debian/control in branch '%s'." + " Is it a packaging branch?" % args.packaging_branch + ) + return 1 + if not args.sign: args.debuild_args.extend(['-us', '-uc']) @@ -198,7 +269,14 @@ def main(): xdir = util.abs_join(tdir, "cloud-init-%s" % ver_data['version_long']) templ_data.update(ver_data) - write_debian_folder(xdir, templ_data, cloud_util_deps=args.cloud_utils) + if args.packaging_branch: + write_debian_folder_from_branch( + xdir, templ_data, args.packaging_branch + ) + else: + write_debian_folder( + xdir, templ_data, cloud_util_deps=args.cloud_utils + ) print("Running 'debuild %s' in %r" % (' '.join(args.debuild_args), xdir)) -- cgit v1.2.3 From 72d85ff98f4185db10af980776b1ba46fa340920 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Thu, 22 Oct 2020 11:26:08 -0700 Subject: docs: Add how to use cloud-localds to boot qemu (#617) * docs: Add hot to use cloud-localds to boot qemu There is a complete lack of documentation on using cloud-localds with cloud-init to boot an image locally. Drive by, added some more whitepapers, blogs, and videos * fix line length * * add where cloud-localds comes from * add more specific example with metadata and network config * Add link to cloud-utils package --- doc/rtd/topics/faq.rst | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) (limited to 'doc/rtd') diff --git a/doc/rtd/topics/faq.rst b/doc/rtd/topics/faq.rst index aa1be142..944cc27f 100644 --- a/doc/rtd/topics/faq.rst +++ b/doc/rtd/topics/faq.rst @@ -226,12 +226,65 @@ custom network config. .. _Instance Configuration: https://linuxcontainers.org/lxd/docs/master/instances .. _Custom Network Configuration: https://linuxcontainers.org/lxd/docs/master/cloud-init +cloud-localds +------------- + +The `cloud-localds` command from the `cloud-utils`_ package generates a disk +with user supplied data. The NoCloud datasouce allows users to provide their +own user data, metadata, or network configuration directly to an instance +without running a network service. This is helpful for launching local cloud +images with QEMU for example. + +The following is an example of creating the local disk using the cloud-localds +command: + +.. code-block:: shell-session + + $ cat >user-data < Date: Tue, 27 Oct 2020 20:54:30 +0000 Subject: Update network config docs to clarify MAC address quoting (#623) Also update MAC addresses used in testcases to remove quotes where not required and add single quotes where quotes are required. --- doc/rtd/topics/network-config-format-v1.rst | 36 ++++++++++++++++++-------- doc/rtd/topics/network-config-format-v2.rst | 13 ++++++++-- tests/unittests/test_net.py | 40 ++++++++++++++--------------- 3 files changed, 57 insertions(+), 32 deletions(-) (limited to 'doc/rtd') diff --git a/doc/rtd/topics/network-config-format-v1.rst b/doc/rtd/topics/network-config-format-v1.rst index dfbde514..92e81897 100644 --- a/doc/rtd/topics/network-config-format-v1.rst +++ b/doc/rtd/topics/network-config-format-v1.rst @@ -64,6 +64,14 @@ structure. The MAC Address is a device unique identifier that most Ethernet-based network devices possess. Specifying a MAC Address is optional. +.. note:: + + MAC addresses must be strings. As MAC addresses which consist of only the + digits 0-9 (i.e. no hex a-f) can be interpreted as a base 60 integer per + the `YAML 1.1 spec`_ it is best practice to quote all MAC addresses to ensure + they are parsed as strings regardless of value. + +.. _YAML 1.1 spec: https://yaml.org/type/int.html .. note:: @@ -91,7 +99,7 @@ packet- or frame-based network. Specifying ``mtu`` is optional. # Simple network adapter - type: physical name: interface0 - mac_address: 00:11:22:33:44:55 + mac_address: '00:11:22:33:44:55' # Second nic with Jumbo frames - type: physical name: jumbo0 @@ -124,6 +132,14 @@ bond interfaces. Specifying a MAC Address is optional. If ``mac_address`` is not present, then the bond will use one of the MAC Address values from one of the bond interfaces. +.. note:: + + MAC addresses must be strings. As MAC addresses which consist of only the + digits 0-9 (i.e. no hex a-f) can be interpreted as a base 60 integer per + the `YAML 1.1 spec`_ it is best practice to quote all MAC addresses to ensure + they are parsed as strings regardless of value. + +.. _YAML 1.1 spec: https://yaml.org/type/int.html **bond_interfaces**: ** @@ -194,7 +210,7 @@ Valid ``params`` keys are: # Simple network adapter - type: physical name: interface0 - mac_address: 00:11:22:33:44:55 + mac_address: '00:11:22:33:44:55' # 10G pair - type: physical name: gbe0 @@ -246,7 +262,7 @@ Valid keys are: # Simple network adapter - type: physical name: interface0 - mac_address: 00:11:22:33:44:55 + mac_address: '00:11:22:33:44:55' # Second nic with Jumbo frames - type: physical name: jumbo0 @@ -303,7 +319,7 @@ packet- or frame-based network. Specifying ``mtu`` is optional. # Physical interfaces. - type: physical name: eth0 - mac_address: "c0:d6:9f:2c:e8:80" + mac_address: c0:d6:9f:2c:e8:80 # VLAN interface. - type: vlan name: eth0.101 @@ -327,7 +343,7 @@ the following keys: config: - type: physical name: interface0 - mac_address: 00:11:22:33:44:55 + mac_address: '00:11:22:33:44:55' subnets: - type: static address: 192.168.23.14/27 @@ -358,7 +374,7 @@ has the following keys: config: - type: physical name: interface0 - mac_address: 00:11:22:33:44:55 + mac_address: '00:11:22:33:44:55' subnets: - type: static address: 192.168.23.14/24 @@ -410,7 +426,7 @@ the subnet dictionary. config: - type: physical name: interface0 - mac_address: 00:11:22:33:44:55 + mac_address: '00:11:22:33:44:55' subnets: - type: dhcp @@ -422,7 +438,7 @@ the subnet dictionary. config: - type: physical name: interface0 - mac_address: 00:11:22:33:44:55 + mac_address: '00:11:22:33:44:55' subnets: - type: static address: 192.168.23.14/27 @@ -443,7 +459,7 @@ using the static subnet configuration. config: - type: physical name: interface0 - mac_address: 00:11:22:33:44:55 + mac_address: '00:11:22:33:44:55' subnets: - type: dhcp - type: static @@ -462,7 +478,7 @@ using the static subnet configuration. config: - type: physical name: interface0 - mac_address: 00:11:22:33:44:55 + mac_address: '00:11:22:33:44:55' subnets: - type: dhcp - type: static diff --git a/doc/rtd/topics/network-config-format-v2.rst b/doc/rtd/topics/network-config-format-v2.rst index c93e29be..aa17bef5 100644 --- a/doc/rtd/topics/network-config-format-v2.rst +++ b/doc/rtd/topics/network-config-format-v2.rst @@ -94,7 +94,16 @@ NetworkManager does not. **macaddress**: *<(scalar)>* -Device's MAC address in the form "XX:XX:XX:XX:XX:XX". Globs are not allowed. +Device's MAC address in the form XX:XX:XX:XX:XX:XX. Globs are not allowed. + +.. note:: + + MAC addresses must be strings. As MAC addresses which consist of only the + digits 0-9 (i.e. no hex a-f) can be interpreted as a base 60 integer per + the `YAML 1.1 spec`_ it is best practice to quote all MAC addresses to ensure + they are parsed as strings regardless of value. + +.. _YAML 1.1 spec: https://yaml.org/type/int.html **driver**: *<(scalar)>* @@ -458,7 +467,7 @@ This is a complex example which shows most available features: :: # opaque ID for physical interfaces, only referred to by other stanzas id0: match: - macaddress: 00:11:22:33:44:55 + macaddress: '00:11:22:33:44:55' wakeonlan: true dhcp4: true addresses: diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 207e47bb..642e60cc 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -910,7 +910,7 @@ NETWORK_CONFIGS = { # Physical interfaces. - type: physical name: eth99 - mac_address: "c0:d6:9f:2c:e8:80" + mac_address: c0:d6:9f:2c:e8:80 subnets: - type: dhcp4 - type: static @@ -926,7 +926,7 @@ NETWORK_CONFIGS = { metric: 10000 - type: physical name: eth1 - mac_address: "cf:d6:af:48:e8:80" + mac_address: cf:d6:af:48:e8:80 - type: nameserver address: - 1.2.3.4 @@ -1743,26 +1743,26 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true # Physical interfaces. - type: physical name: eth0 - mac_address: "c0:d6:9f:2c:e8:80" + mac_address: c0:d6:9f:2c:e8:80 - type: physical name: eth1 - mac_address: "aa:d6:9f:2c:e8:80" + mac_address: aa:d6:9f:2c:e8:80 - type: physical name: eth2 - mac_address: "c0:bb:9f:2c:e8:80" + mac_address: c0:bb:9f:2c:e8:80 - type: physical name: eth3 - mac_address: "66:bb:9f:2c:e8:80" + mac_address: 66:bb:9f:2c:e8:80 - type: physical name: eth4 - mac_address: "98:bb:9f:2c:e8:80" + mac_address: 98:bb:9f:2c:e8:80 # specify how ifupdown should treat iface # control is one of ['auto', 'hotplug', 'manual'] # with manual meaning ifup/ifdown should not affect the iface # useful for things like iscsi root + dhcp - type: physical name: eth5 - mac_address: "98:bb:9f:2c:e8:8a" + mac_address: 98:bb:9f:2c:e8:8a subnets: - type: dhcp control: manual @@ -1793,7 +1793,7 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true name: bond0 # if 'mac_address' is omitted, the MAC is taken from # the first slave. - mac_address: "aa:bb:cc:dd:ee:ff" + mac_address: aa:bb:cc:dd:ee:ff bond_interfaces: - eth1 - eth2 @@ -1888,13 +1888,13 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true config: - type: physical name: bond0s0 - mac_address: "aa:bb:cc:dd:e8:00" + mac_address: aa:bb:cc:dd:e8:00 - type: physical name: bond0s1 - mac_address: "aa:bb:cc:dd:e8:01" + mac_address: aa:bb:cc:dd:e8:01 - type: bond name: bond0 - mac_address: "aa:bb:cc:dd:e8:ff" + mac_address: aa:bb:cc:dd:e8:ff mtu: 9000 bond_interfaces: - bond0s0 @@ -2042,12 +2042,12 @@ iface bond0 inet6 static eth0: match: driver: "virtio_net" - macaddress: "aa:bb:cc:dd:e8:00" + macaddress: aa:bb:cc:dd:e8:00 vf0: set-name: vf0 match: driver: "e1000" - macaddress: "aa:bb:cc:dd:e8:01" + macaddress: aa:bb:cc:dd:e8:01 bonds: bond0: addresses: @@ -2221,7 +2221,7 @@ iface bond0 inet6 static config: - type: physical name: en0 - mac_address: "aa:bb:cc:dd:e8:00" + mac_address: aa:bb:cc:dd:e8:00 - type: vlan mtu: 2222 name: en0.99 @@ -2294,13 +2294,13 @@ iface bond0 inet6 static config: - type: physical name: eth0 - mac_address: "52:54:00:12:34:00" + mac_address: '52:54:00:12:34:00' subnets: - type: static address: 2001:1::100/96 - type: physical name: eth1 - mac_address: "52:54:00:12:34:01" + mac_address: '52:54:00:12:34:01' subnets: - type: static address: 2001:1::101/96 @@ -2385,7 +2385,7 @@ iface bond0 inet6 static config: - type: physical name: eth0 - mac_address: "52:54:00:12:34:00" + mac_address: '52:54:00:12:34:00' subnets: - type: static address: 192.168.1.2/24 @@ -2393,12 +2393,12 @@ iface bond0 inet6 static - type: physical name: eth1 mtu: 1480 - mac_address: "52:54:00:12:34:aa" + mac_address: 52:54:00:12:34:aa subnets: - type: manual - type: physical name: eth2 - mac_address: "52:54:00:12:34:ff" + mac_address: 52:54:00:12:34:ff subnets: - type: manual control: manual -- cgit v1.2.3 From 8642e8bce3530d2deb6b02895c08edd291eea48f Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Mon, 2 Nov 2020 07:20:30 -0700 Subject: doc: add example query commands to debug Jinja templates (#645) --- doc/rtd/topics/instancedata.rst | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'doc/rtd') diff --git a/doc/rtd/topics/instancedata.rst b/doc/rtd/topics/instancedata.rst index 255245a4..1850982c 100644 --- a/doc/rtd/topics/instancedata.rst +++ b/doc/rtd/topics/instancedata.rst @@ -592,6 +592,22 @@ see only redacted values. % cloud-init query --format 'cloud: {{ v1.cloud_name }} myregion: {{ % v1.region }}' + # Locally test that your template userdata provided to the vm was rendered as + # intended. + % cloud-init query --format "$(sudo cloud-init query userdata)" + + # The --format command renders jinja templates, this can also be used + # to develop and test jinja template constructs + % cat > test-templating.yaml < Date: Tue, 10 Nov 2020 11:49:27 -0500 Subject: faq.rst: add warning to answer that suggests running `clean` (#661) --- doc/rtd/topics/faq.rst | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'doc/rtd') diff --git a/doc/rtd/topics/faq.rst b/doc/rtd/topics/faq.rst index 944cc27f..d08914b5 100644 --- a/doc/rtd/topics/faq.rst +++ b/doc/rtd/topics/faq.rst @@ -121,6 +121,12 @@ cloud-init: $ sudo cloud-init init --local $ sudo cloud-init init +.. warning:: + + These commands will re-run cloud-init as if this were first boot of a + system: this will, at the very least, cycle SSH host keys and may do + substantially more. Do not run these commands on production systems. + How can I debug my user data? ============================= -- cgit v1.2.3 From f680114446a5a20ce88f3d10d966811a774c8e8f Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 18 Nov 2020 07:23:44 -0700 Subject: cli: add --system param to allow validating system user-data on a machine (#575) Allow root user to validate the userdata provided to the launched machine using `cloud-init devel schema --system` --- cloudinit/config/schema.py | 41 ++++++++--- doc/rtd/topics/faq.rst | 6 +- tests/unittests/test_cli.py | 2 +- tests/unittests/test_handler/test_schema.py | 109 ++++++++++++++++++++-------- 4 files changed, 114 insertions(+), 44 deletions(-) (limited to 'doc/rtd') diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index 8a966aee..456bab2c 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -1,6 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. """schema.py: Set of module functions for processing cloud-config schema.""" +from cloudinit.cmd.devel import read_cfg_paths from cloudinit import importer from cloudinit.util import find_modules, load_file @@ -173,7 +174,8 @@ def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors): def validate_cloudconfig_file(config_path, schema, annotate=False): """Validate cloudconfig file adheres to a specific jsonschema. - @param config_path: Path to the yaml cloud-config file to parse. + @param config_path: Path to the yaml cloud-config file to parse, or None + to default to system userdata from Paths object. @param schema: Dict describing a valid jsonschema to validate against. @param annotate: Boolean set True to print original config file with error annotations on the offending lines. @@ -181,9 +183,24 @@ def validate_cloudconfig_file(config_path, schema, annotate=False): @raises SchemaValidationError containing any of schema_errors encountered. @raises RuntimeError when config_path does not exist. """ - if not os.path.exists(config_path): - raise RuntimeError('Configfile {0} does not exist'.format(config_path)) - content = load_file(config_path, decode=False) + if config_path is None: + # Use system's raw userdata path + if os.getuid() != 0: + raise RuntimeError( + "Unable to read system userdata as non-root user." + " Try using sudo" + ) + paths = read_cfg_paths() + user_data_file = paths.get_ipath_cur("userdata_raw") + content = load_file(user_data_file, decode=False) + else: + if not os.path.exists(config_path): + raise RuntimeError( + 'Configfile {0} does not exist'.format( + config_path + ) + ) + content = load_file(config_path, decode=False) if not content.startswith(CLOUD_CONFIG_HEADER): errors = ( ('format-l1.c1', 'File {0} needs to begin with "{1}"'.format( @@ -425,6 +442,8 @@ def get_parser(parser=None): description='Validate cloud-config files or document schema') parser.add_argument('-c', '--config-file', help='Path of the cloud-config yaml file to validate') + parser.add_argument('--system', action='store_true', default=False, + help='Validate the system cloud-config userdata') parser.add_argument('-d', '--docs', nargs='+', help=('Print schema module docs. Choices: all or' ' space-delimited cc_names.')) @@ -435,11 +454,11 @@ def get_parser(parser=None): def handle_schema_args(name, args): """Handle provided schema args and perform the appropriate actions.""" - exclusive_args = [args.config_file, args.docs] - if not any(exclusive_args) or all(exclusive_args): - error('Expected either --config-file argument or --docs') + exclusive_args = [args.config_file, args.docs, args.system] + if len([arg for arg in exclusive_args if arg]) != 1: + error('Expected one of --config-file, --system or --docs arguments') full_schema = get_schema() - if args.config_file: + if args.config_file or args.system: try: validate_cloudconfig_file( args.config_file, full_schema, args.annotate) @@ -449,7 +468,11 @@ def handle_schema_args(name, args): except RuntimeError as e: error(str(e)) else: - print("Valid cloud-config file {0}".format(args.config_file)) + if args.config_file is None: + cfg_name = "system userdata" + else: + cfg_name = args.config_file + print("Valid cloud-config:", cfg_name) elif args.docs: schema_ids = [subschema['id'] for subschema in full_schema['allOf']] schema_ids += ['all'] diff --git a/doc/rtd/topics/faq.rst b/doc/rtd/topics/faq.rst index d08914b5..27fabf15 100644 --- a/doc/rtd/topics/faq.rst +++ b/doc/rtd/topics/faq.rst @@ -141,12 +141,12 @@ that can validate your user data offline. .. _validate-yaml.py: https://github.com/canonical/cloud-init/blob/master/tools/validate-yaml.py -Another option is to run the following on an instance when debugging: +Another option is to run the following on an instance to debug userdata +provided to the system: .. code-block:: shell-session - $ sudo cloud-init query userdata > user-data.yaml - $ cloud-init devel schema -c user-data.yaml --annotate + $ cloud-init devel schema --system --annotate As launching instances in the cloud can cost money and take a bit longer, sometimes it is easier to launch instances locally using Multipass or LXD: diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index dcf0fe5a..74f85959 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -214,7 +214,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): self.assertEqual(1, exit_code) # Known whitebox output from schema subcommand self.assertEqual( - 'Expected either --config-file argument or --docs\n', + 'Expected one of --config-file, --system or --docs arguments\n', self.stderr.getvalue()) def test_wb_devel_schema_subcommand_doc_content(self): diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py index 44292571..15aa77bb 100644 --- a/tests/unittests/test_handler/test_schema.py +++ b/tests/unittests/test_handler/test_schema.py @@ -9,9 +9,9 @@ from cloudinit.util import write_file from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema from copy import copy +import itertools import os import pytest -from io import StringIO from pathlib import Path from textwrap import dedent from yaml import safe_load @@ -400,50 +400,97 @@ class AnnotatedCloudconfigFileTest(CiTestCase): annotated_cloudconfig_file(parsed_config, content, schema_errors)) -class MainTest(CiTestCase): +class TestMain: - def test_main_missing_args(self): + exclusive_combinations = itertools.combinations( + ["--system", "--docs all", "--config-file something"], 2 + ) + + @pytest.mark.parametrize("params", exclusive_combinations) + def test_main_exclusive_args(self, params, capsys): + """Main exits non-zero and error on required exclusive args.""" + params = list(itertools.chain(*[a.split() for a in params])) + with mock.patch('sys.argv', ['mycmd'] + params): + with pytest.raises(SystemExit) as context_manager: + main() + assert 1 == context_manager.value.code + + _out, err = capsys.readouterr() + expected = ( + 'Expected one of --config-file, --system or --docs arguments\n' + ) + assert expected == err + + def test_main_missing_args(self, capsys): """Main exits non-zero and reports an error on missing parameters.""" with mock.patch('sys.argv', ['mycmd']): - with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: - with self.assertRaises(SystemExit) as context_manager: - main() - self.assertEqual(1, context_manager.exception.code) - self.assertEqual( - 'Expected either --config-file argument or --docs\n', - m_stderr.getvalue()) + with pytest.raises(SystemExit) as context_manager: + main() + assert 1 == context_manager.value.code + + _out, err = capsys.readouterr() + expected = ( + 'Expected one of --config-file, --system or --docs arguments\n' + ) + assert expected == err - def test_main_absent_config_file(self): + def test_main_absent_config_file(self, capsys): """Main exits non-zero when config file is absent.""" myargs = ['mycmd', '--annotate', '--config-file', 'NOT_A_FILE'] with mock.patch('sys.argv', myargs): - with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: - with self.assertRaises(SystemExit) as context_manager: - main() - self.assertEqual(1, context_manager.exception.code) - self.assertEqual( - 'Configfile NOT_A_FILE does not exist\n', - m_stderr.getvalue()) + with pytest.raises(SystemExit) as context_manager: + main() + assert 1 == context_manager.value.code + _out, err = capsys.readouterr() + assert 'Configfile NOT_A_FILE does not exist\n' == err - def test_main_prints_docs(self): + def test_main_prints_docs(self, capsys): """When --docs parameter is provided, main generates documentation.""" myargs = ['mycmd', '--docs', 'all'] with mock.patch('sys.argv', myargs): - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - self.assertEqual(0, main(), 'Expected 0 exit code') - self.assertIn('\nNTP\n---\n', m_stdout.getvalue()) - self.assertIn('\nRuncmd\n------\n', m_stdout.getvalue()) + assert 0 == main(), 'Expected 0 exit code' + out, _err = capsys.readouterr() + assert '\nNTP\n---\n' in out + assert '\nRuncmd\n------\n' in out - def test_main_validates_config_file(self): + def test_main_validates_config_file(self, tmpdir, capsys): """When --config-file parameter is provided, main validates schema.""" - myyaml = self.tmp_path('my.yaml') - myargs = ['mycmd', '--config-file', myyaml] - write_file(myyaml, b'#cloud-config\nntp:') # shortest ntp schema + myyaml = tmpdir.join('my.yaml') + myargs = ['mycmd', '--config-file', myyaml.strpath] + myyaml.write(b'#cloud-config\nntp:') # shortest ntp schema with mock.patch('sys.argv', myargs): - with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: - self.assertEqual(0, main(), 'Expected 0 exit code') - self.assertIn( - 'Valid cloud-config file {0}'.format(myyaml), m_stdout.getvalue()) + assert 0 == main(), 'Expected 0 exit code' + out, _err = capsys.readouterr() + assert 'Valid cloud-config: {0}\n'.format(myyaml) == out + + @mock.patch('cloudinit.config.schema.read_cfg_paths') + @mock.patch('cloudinit.config.schema.os.getuid', return_value=0) + def test_main_validates_system_userdata( + self, m_getuid, m_read_cfg_paths, capsys, paths + ): + """When --system is provided, main validates system userdata.""" + m_read_cfg_paths.return_value = paths + ud_file = paths.get_ipath_cur("userdata_raw") + write_file(ud_file, b'#cloud-config\nntp:') + myargs = ['mycmd', '--system'] + with mock.patch('sys.argv', myargs): + assert 0 == main(), 'Expected 0 exit code' + out, _err = capsys.readouterr() + assert 'Valid cloud-config: system userdata\n' == out + + @mock.patch('cloudinit.config.schema.os.getuid', return_value=1000) + def test_main_system_userdata_requires_root(self, m_getuid, capsys, paths): + """Non-root user can't use --system param""" + myargs = ['mycmd', '--system'] + with mock.patch('sys.argv', myargs): + with pytest.raises(SystemExit) as context_manager: + main() + assert 1 == context_manager.value.code + _out, err = capsys.readouterr() + expected = ( + 'Unable to read system userdata as non-root user. Try using sudo\n' + ) + assert expected == err class CloudTestsIntegrationTest(CiTestCase): -- cgit v1.2.3