From 36f7ba0b3d16b0bd0b98ffd26b24826fe4cfc1c2 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 25 Feb 2020 13:46:05 -0700 Subject: tests: add focal integration tests for ubuntu (#225) --- tests/cloud_tests/releases.yaml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml index 7ddc5b85..83cacb47 100644 --- a/tests/cloud_tests/releases.yaml +++ b/tests/cloud_tests/releases.yaml @@ -131,6 +131,22 @@ features: releases: # UBUNTU ================================================================= + focal: + # EOL: Apr 2025 + default: + enabled: true + release: focal + version: 20.04 + os: ubuntu + feature_groups: + - base + - debian_base + - ubuntu_specific + lxd: + sstreams_server: https://cloud-images.ubuntu.com/daily + alias: focal + setup_overrides: null + override_templates: false eoan: # EOL: Jul 2020 default: -- cgit v1.2.3 From bedda2e6d1f2b696e0f927c153cb046e9e992db1 Mon Sep 17 00:00:00 2001 From: sab-systems Date: Tue, 25 Feb 2020 23:10:26 +0100 Subject: Add physical network type: cascading to openstack helpers (#200) * Add physical network type: cascading to openstack helpers * add new helpers test for checking all openstack KNOWN_PHYSICAL_TYPES get type 'physical'. --- cloudinit/sources/helpers/openstack.py | 1 + cloudinit/sources/helpers/tests/test_openstack.py | 44 +++++++++++++++++++++++ 2 files changed, 45 insertions(+) create mode 100644 cloudinit/sources/helpers/tests/test_openstack.py diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 441db506..e91398ea 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -68,6 +68,7 @@ KNOWN_PHYSICAL_TYPES = ( None, 'bgpovs', # not present in OpenStack upstream but used on OVH cloud. 'bridge', + 'cascading', # not present in OpenStack upstream, used on OpenTelekomCloud 'dvs', 'ethernet', 'hw_veb', diff --git a/cloudinit/sources/helpers/tests/test_openstack.py b/cloudinit/sources/helpers/tests/test_openstack.py new file mode 100644 index 00000000..2bde1e3f --- /dev/null +++ b/cloudinit/sources/helpers/tests/test_openstack.py @@ -0,0 +1,44 @@ +# This file is part of cloud-init. See LICENSE file for license information. +# ./cloudinit/sources/helpers/tests/test_openstack.py + +from cloudinit.sources.helpers import openstack +from cloudinit.tests import helpers as test_helpers + + +class TestConvertNetJson(test_helpers.CiTestCase): + + def test_phy_types(self): + """Verify the different known physical types are handled.""" + # network_data.json example from + # https://docs.openstack.org/nova/latest/user/metadata.html + mac0 = "fa:16:3e:9c:bf:3d" + net_json = { + "links": [ + {"ethernet_mac_address": mac0, "id": "tapcd9f6d46-4a", + "mtu": None, "type": "bridge", + "vif_id": "cd9f6d46-4a3a-43ab-a466-994af9db96fc"} + ], + "networks": [ + {"id": "network0", "link": "tapcd9f6d46-4a", + "network_id": "99e88329-f20d-4741-9593-25bf07847b16", + "type": "ipv4_dhcp"} + ], + "services": [{"address": "8.8.8.8", "type": "dns"}] + } + macs = {mac0: 'eth0'} + + expected = { + 'version': 1, + 'config': [ + {'mac_address': 'fa:16:3e:9c:bf:3d', + 'mtu': None, 'name': 'eth0', + 'subnets': [{'type': 'dhcp4'}], + 'type': 'physical'}, + {'address': '8.8.8.8', 'type': 'nameserver'}]} + + for t in openstack.KNOWN_PHYSICAL_TYPES: + net_json["links"][0]["type"] = t + self.assertEqual( + expected, + openstack.convert_net_json(network_json=net_json, + known_macs=macs)) -- cgit v1.2.3 From 4f940bd1f76f50f947af533661ba6fafa3e60e59 Mon Sep 17 00:00:00 2001 From: "Mark T. Voelker" Date: Thu, 27 Feb 2020 14:08:14 -0500 Subject: Fix docs for OpenStack DMI Asset Tag (#228) In cloud-init 19.2, we added the ability for cloud-init to detect OpenStack platforms by checking for "OpenStack Compute" or "OpenStack Nova" in the chassis asset tag. However, this was never reflected in the documentation. This patch updates the datasources documentation for OpenStack to reflect the possibility of using the chassis asset tag. LP: #1669875 --- doc/rtd/topics/datasources/openstack.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst index 8ce2a53d..ff817e45 100644 --- a/doc/rtd/topics/datasources/openstack.rst +++ b/doc/rtd/topics/datasources/openstack.rst @@ -19,7 +19,8 @@ checks the following environment attributes as a potential OpenStack platform: * **/proc/1/environ**: Nova-lxd contains *product_name=OpenStack Nova* * **DMI product_name**: Either *Openstack Nova* or *OpenStack Compute* - * **DMI chassis_asset_tag** is *OpenTelekomCloud* + * **DMI chassis_asset_tag** is *OpenTelekomCloud* or *OpenStack Nova* + (since 19.2) or *OpenStack Compute* (since 19.2) Configuration -- cgit v1.2.3 From 0140f74dd577c6407197eb82ca47ad5f07cac4f4 Mon Sep 17 00:00:00 2001 From: Nick Wales <588472+nickwales@users.noreply.github.com> Date: Thu, 27 Feb 2020 13:34:24 -0600 Subject: Fixes typo on Amazon Web Services (#217) one line doc fix --- doc/rtd/topics/instancedata.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/rtd/topics/instancedata.rst b/doc/rtd/topics/instancedata.rst index e7dd0d62..4227c4fd 100644 --- a/doc/rtd/topics/instancedata.rst +++ b/doc/rtd/topics/instancedata.rst @@ -103,7 +103,7 @@ v1.cloud_name ------------- Where possible this will indicate the 'name' of the cloud the system is running on. This is different than the 'platform' item. For example, the cloud name of -Amazone Web Services is 'aws', while the platform is 'ec2'. +Amazon Web Services is 'aws', while the platform is 'ec2'. If determining a specific name is not possible or provided in meta-data, then this filed may contain the same content as 'platform'. -- cgit v1.2.3 From 67c8e53cc3fe007bb40d6e9c10549ca8200a9cd7 Mon Sep 17 00:00:00 2001 From: Alexey Vazhnov Date: Fri, 28 Feb 2020 01:16:29 +0300 Subject: docs: typo fixed: dta → data MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- doc/rtd/topics/format.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst index 2b60bdd3..e3e5f8aa 100644 --- a/doc/rtd/topics/format.rst +++ b/doc/rtd/topics/format.rst @@ -126,7 +126,7 @@ Begins with: ``#cloud-config`` or ``Content-Type: text/cloud-config`` when using a MIME archive. .. note:: - New in cloud-init v. 18.4: Cloud config dta can also render cloud instance + New in cloud-init v. 18.4: Cloud config data can also render cloud instance metadata variables using jinja templating. See :ref:`instance_metadata` for more information. -- cgit v1.2.3 From fa1abfec27050a4fb71cad950a17e42f9b43b478 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 3 Mar 2020 15:23:33 -0700 Subject: ec2: only redact token request headers in logs, avoid altering request (#230) Our header redact logic was redacting both logged request headers and the actual source request. This results in DataSourceEc2 sending the invalid header "X-aws-ec2-metadata-token-ttl-seconds: REDACTED" which gets an HTTP status response of 400. Cloud-init retries this failed token request for 2 minutes before falling back to IMDSv1. LP: #1865882 --- cloudinit/tests/test_url_helper.py | 34 +++++++++++++++++++++++++++++++++- cloudinit/url_helper.py | 15 ++++++++------- 2 files changed, 41 insertions(+), 8 deletions(-) diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py index 1674120f..29b39374 100644 --- a/cloudinit/tests/test_url_helper.py +++ b/cloudinit/tests/test_url_helper.py @@ -1,7 +1,8 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit.url_helper import ( - NOT_FOUND, UrlError, oauth_headers, read_file_or_url, retry_on_url_exc) + NOT_FOUND, UrlError, REDACTED, oauth_headers, read_file_or_url, + retry_on_url_exc) from cloudinit.tests.helpers import CiTestCase, mock, skipIf from cloudinit import util from cloudinit import version @@ -50,6 +51,9 @@ class TestOAuthHeaders(CiTestCase): class TestReadFileOrUrl(CiTestCase): + + with_logs = True + def test_read_file_or_url_str_from_file(self): """Test that str(result.contents) on file is text version of contents. It should not be "b'data'", but just "'data'" """ @@ -71,6 +75,34 @@ class TestReadFileOrUrl(CiTestCase): self.assertEqual(result.contents, data) self.assertEqual(str(result), data.decode('utf-8')) + @httpretty.activate + def test_read_file_or_url_str_from_url_redacting_headers_from_logs(self): + """Headers are redacted from logs but unredacted in requests.""" + url = 'http://hostname/path' + headers = {'sensitive': 'sekret', 'server': 'blah'} + httpretty.register_uri(httpretty.GET, url) + + read_file_or_url(url, headers=headers, headers_redact=['sensitive']) + logs = self.logs.getvalue() + for k in headers.keys(): + self.assertEqual(headers[k], httpretty.last_request().headers[k]) + self.assertIn(REDACTED, logs) + self.assertNotIn('sekret', logs) + + @httpretty.activate + def test_read_file_or_url_str_from_url_redacts_noheaders(self): + """When no headers_redact, header values are in logs and requests.""" + url = 'http://hostname/path' + headers = {'sensitive': 'sekret', 'server': 'blah'} + httpretty.register_uri(httpretty.GET, url) + + read_file_or_url(url, headers=headers) + for k in headers.keys(): + self.assertEqual(headers[k], httpretty.last_request().headers[k]) + logs = self.logs.getvalue() + self.assertNotIn(REDACTED, logs) + self.assertIn('sekret', logs) + @mock.patch(M_PATH + 'readurl') def test_read_file_or_url_passes_params_to_readurl(self, m_readurl): """read_file_or_url passes all params through to readurl.""" diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index eeb27aa8..f3c0cf9c 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -281,13 +281,14 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, for (k, v) in req_args.items(): if k == 'data': continue - filtered_req_args[k] = v - if k == 'headers': - for hkey, _hval in v.items(): - if hkey in headers_redact: - filtered_req_args[k][hkey] = ( - copy.deepcopy(req_args[k][hkey])) - filtered_req_args[k][hkey] = REDACTED + if k == 'headers' and headers_redact: + matched_headers = [k for k in headers_redact if v.get(k)] + if matched_headers: + filtered_req_args[k] = copy.deepcopy(v) + for key in matched_headers: + filtered_req_args[k][key] = REDACTED + else: + filtered_req_args[k] = v try: if log_req_resp: -- cgit v1.2.3 From 1d2dfc5d879dc905f440697c2b805c9485dda821 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 4 Mar 2020 13:53:19 -0700 Subject: net: support network-config:disabled on the kernel commandline (#232) Allow disabling cloud-init's network configuration via a plain-text kernel cmdline Cloud-init docs indicate that users can disable cloud-init networking via kernel command line parameter 'network-config='. This does not work unless the payload base64 encoded. Document the base64 encoding requirement and add a plain-text value for disabling cloud-init network config: network-config=disabled Also: - Log an error and ignore any plain-text network-config payloads that are not specifically 'network-config=disabled'. - Log a warning if network-config kernel param is invalid yaml but do not raise an exception, allowing boot to continue and use fallback networking. LP: #1862702 --- cloudinit/net/cmdline.py | 34 ++++++++++++++++++++-------------- doc/rtd/topics/network-config.rst | 22 ++++++++++++++-------- tests/unittests/test_net.py | 17 +++++++++++++++++ 3 files changed, 51 insertions(+), 22 deletions(-) diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py index 64e1c699..814ce411 100755 --- a/cloudinit/net/cmdline.py +++ b/cloudinit/net/cmdline.py @@ -10,6 +10,7 @@ import base64 import glob import gzip import io +import logging import os from cloudinit import util @@ -19,6 +20,8 @@ from . import read_sys_net_safe _OPEN_ISCSI_INTERFACE_FILE = "/run/initramfs/open-iscsi.interface" +KERNEL_CMDLINE_NETWORK_CONFIG_DISABLED = "disabled" + class InitramfsNetworkConfigSource(metaclass=abc.ABCMeta): """ABC for net config sources that read config written by initramfses""" @@ -233,34 +236,35 @@ def read_initramfs_config(): return None -def _decomp_gzip(blob, strict=True): - # decompress blob. raise exception if not compressed unless strict=False. +def _decomp_gzip(blob): + # decompress blob or return original blob with io.BytesIO(blob) as iobuf: gzfp = None try: gzfp = gzip.GzipFile(mode="rb", fileobj=iobuf) return gzfp.read() except IOError: - if strict: - raise return blob finally: if gzfp: gzfp.close() -def _b64dgz(b64str, gzipped="try"): - # decode a base64 string. If gzipped is true, transparently uncompresss - # if gzipped is 'try', then try gunzip, returning the original on fail. - try: - blob = base64.b64decode(b64str) - except TypeError: - raise ValueError("Invalid base64 text: %s" % b64str) +def _b64dgz(data): + """Decode a string base64 encoding, if gzipped, uncompress as well - if not gzipped: - return blob + :return: decompressed unencoded string of the data or empty string on + unencoded data. + """ + try: + blob = base64.b64decode(data) + except (TypeError, ValueError): + logging.error( + "Expected base64 encoded kernel commandline parameter" + " network-config. Ignoring network-config=%s.", data) + return '' - return _decomp_gzip(blob, strict=gzipped != "try") + return _decomp_gzip(blob) def read_kernel_cmdline_config(cmdline=None): @@ -273,6 +277,8 @@ def read_kernel_cmdline_config(cmdline=None): if tok.startswith("network-config="): data64 = tok.split("=", 1)[1] if data64: + if data64 == KERNEL_CMDLINE_NETWORK_CONFIG_DISABLED: + return {"config": "disabled"} return util.load_yaml(_b64dgz(data64)) return None diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst index 1520ba9a..0144dfae 100644 --- a/doc/rtd/topics/network-config.rst +++ b/doc/rtd/topics/network-config.rst @@ -25,17 +25,23 @@ For example, OpenStack may provide network config in the MetaData Service. **System Config** -A ``network:`` entry in /etc/cloud/cloud.cfg.d/* configuration files. +A ``network:`` entry in ``/etc/cloud/cloud.cfg.d/*`` configuration files. **Kernel Command Line** -``ip=`` or ``network-config=`` +``ip=`` or ``network-config=`` User-data cannot change an instance's network configuration. In the absence of network configuration in any of the above sources , `Cloud-init`_ will write out a network configuration that will issue a DHCP request on a "first" network interface. +.. note:: + + The network-config value is expected to be a Base64 encoded YAML string in + :ref:`network_config_v1` or :ref:`network_config_v2` format. Optionally it + can be compressed with ``gzip`` prior to Base64 encoding. + Disabling Network Configuration =============================== @@ -48,19 +54,19 @@ on other methods, such as embedded configuration or other customizations. **Kernel Command Line** -`Cloud-init`_ will check for a parameter ``network-config`` and the -value is expected to be YAML string in the :ref:`network_config_v1` format. -The YAML string may optionally be ``Base64`` encoded, and optionally -compressed with ``gzip``. +`Cloud-init`_ will check additionally check for the parameter +``network-config=disabled`` which will automatically disable any network +configuration. Example disabling kernel command line entry: :: - network-config={config: disabled} + network-config=disabled **cloud config** -In the combined cloud-init configuration dictionary. :: +In the combined cloud-init configuration dictionary, merged from +``/etc/cloud/cloud.cfg`` and ``/etc/cloud/cloud.cfg.d/*``:: network: config: disabled diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index bedd05fe..e03857c4 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -4017,6 +4017,8 @@ class TestEniNetworkStateToEni(CiTestCase): class TestCmdlineConfigParsing(CiTestCase): + with_logs = True + simple_cfg = { 'config': [{"type": "physical", "name": "eth0", "mac_address": "c0:d6:9f:2c:e8:80", @@ -4066,6 +4068,21 @@ class TestCmdlineConfigParsing(CiTestCase): found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline) self.assertEqual(found, self.simple_cfg) + def test_cmdline_with_net_config_disabled(self): + raw_cmdline = 'ro network-config=disabled root=foo' + found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline) + self.assertEqual(found, {'config': 'disabled'}) + + def test_cmdline_with_net_config_unencoded_logs_error(self): + """network-config cannot be unencoded besides 'disabled'.""" + raw_cmdline = 'ro network-config={config:disabled} root=foo' + found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline) + self.assertIsNone(found) + expected_log = ( + 'ERROR: Expected base64 encoded kernel commandline parameter' + ' network-config. Ignoring network-config={config:disabled}.') + self.assertIn(expected_log, self.logs.getvalue()) + def test_cmdline_with_b64_gz(self): data = _gzip_data(json.dumps(self.simple_cfg).encode()) encoded_text = base64.b64encode(data).decode() -- cgit v1.2.3 From fa639704f67539d9c1d8668383f755cb0213fd4a Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 4 Mar 2020 15:19:43 -0700 Subject: instance-data: write redacted cfg to instance-data.json (#233) When cloud-init persisted instance metadata to instance-data.json if failed to redact the sensitive value. Currently, the only sensitive key 'security-credentials' is omitted as cloud-init does not fetch this value from IMDS. Fix this by properly redacting the content from the public instance-metadata.json file while retaining the value in the root-only instance-data-sensitive.json file. LP: #1865947 --- cloudinit/sources/__init__.py | 8 ++--- cloudinit/sources/tests/test_init.py | 57 +++++++++++++++++++++++++++++++----- 2 files changed, 54 insertions(+), 11 deletions(-) diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index dd93cfd8..805d803d 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -315,12 +315,12 @@ class DataSource(metaclass=abc.ABCMeta): except UnicodeDecodeError as e: LOG.warning('Error persisting instance-data.json: %s', str(e)) return False - json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE) - write_json(json_file, processed_data) # World readable json_sensitive_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE) - write_json(json_sensitive_file, - redact_sensitive_keys(processed_data), mode=0o600) + write_json(json_sensitive_file, processed_data, mode=0o600) + json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE) + # World readable + write_json(json_file, redact_sensitive_keys(processed_data)) return True def _get_data(self): diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index f73b37ed..6db127e7 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -318,8 +318,8 @@ class TestDataSource(CiTestCase): self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode)) self.assertEqual(expected, util.load_json(content)) - def test_get_data_writes_json_instance_data_sensitive(self): - """get_data writes INSTANCE_JSON_SENSITIVE_FILE as readonly root.""" + def test_get_data_writes_redacted_public_json_instance_data(self): + """get_data writes redacted content to public INSTANCE_JSON_FILE.""" tmp = self.tmp_dir() datasource = DataSourceTestSubclassNet( self.sys_cfg, self.distro, Paths({'run_dir': tmp}), @@ -333,11 +333,53 @@ class TestDataSource(CiTestCase): ('security-credentials',), datasource.sensitive_metadata_keys) datasource.get_data() json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) - sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp) redacted = util.load_json(util.load_file(json_file)) + expected = { + 'base64_encoded_keys': [], + 'sensitive_keys': ['ds/meta_data/some/security-credentials'], + 'v1': { + '_beta_keys': ['subplatform'], + 'availability-zone': 'myaz', + 'availability_zone': 'myaz', + 'cloud-name': 'subclasscloudname', + 'cloud_name': 'subclasscloudname', + 'instance-id': 'iid-datasource', + 'instance_id': 'iid-datasource', + 'local-hostname': 'test-subclass-hostname', + 'local_hostname': 'test-subclass-hostname', + 'platform': 'mytestsubclass', + 'public_ssh_keys': [], + 'region': 'myregion', + 'subplatform': 'unknown'}, + 'ds': { + '_doc': EXPERIMENTAL_TEXT, + 'meta_data': { + 'availability_zone': 'myaz', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion', + 'some': {'security-credentials': REDACT_SENSITIVE_VALUE}}} + } + self.assertEqual(expected, redacted) + file_stat = os.stat(json_file) + self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode)) + + def test_get_data_writes_json_instance_data_sensitive(self): + """ + get_data writes unmodified data to sensitive file as root-readonly. + """ + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp}), + custom_metadata={ + 'availability_zone': 'myaz', + 'local-hostname': 'test-subclass-hostname', + 'region': 'myregion', + 'some': {'security-credentials': { + 'cred1': 'sekret', 'cred2': 'othersekret'}}}) self.assertEqual( - {'cred1': 'sekret', 'cred2': 'othersekret'}, - redacted['ds']['meta_data']['some']['security-credentials']) + ('security-credentials',), datasource.sensitive_metadata_keys) + datasource.get_data() + sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp) content = util.load_file(sensitive_json_file) expected = { 'base64_encoded_keys': [], @@ -362,9 +404,10 @@ class TestDataSource(CiTestCase): 'availability_zone': 'myaz', 'local-hostname': 'test-subclass-hostname', 'region': 'myregion', - 'some': {'security-credentials': REDACT_SENSITIVE_VALUE}}} + 'some': { + 'security-credentials': + {'cred1': 'sekret', 'cred2': 'othersekret'}}}} } - self.maxDiff = None self.assertEqual(expected, util.load_json(content)) file_stat = os.stat(sensitive_json_file) self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode)) -- cgit v1.2.3 From 1f860e5ac7ebb5b809c72d8703a0b7cb3e84ccd0 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 5 Mar 2020 17:38:28 -0700 Subject: ec2: Do not fallback to IMDSv1 on EC2 (#216) The EC2 Data Source needs to handle 3 states of the Instance Metadata Service configured for a given instance: 1. HttpTokens : optional & HttpEndpoint : enabled Either IMDSv2 or IMDSv1 can be used. 2. HttpTokens : required & HttpEndpoint : enabled Calls to IMDS without a valid token (IMDSv1 or IMDSv2 with expired token) will return a 401 error. 3. HttpEndpoint : disabled The IMDS http endpoint will return a 403 error. Previous work to support IMDSv2 in cloud-init handled case 1 and case 2. This commit handles case 3 by bypassing the retry block when IMDS returns HTTP status code >= 400 on official AWS cloud platform. It shaves 2 minutes when rebooting an instance that has its IMDS http token endpoint disabled but creates some inconsistencies. An instance that doesn't set "manual_cache_clean" to "True" will have its /var/lib/cloud/instance symlink removed altogether after it has failed to find a datasource. --- cloudinit/sources/DataSourceEc2.py | 75 ++++++++++++++++++++++------- tests/unittests/test_datasource/test_ec2.py | 48 ++++++++++++++++++ 2 files changed, 106 insertions(+), 17 deletions(-) diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 0f2bfef4..8f0d73bb 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -29,7 +29,6 @@ STRICT_ID_PATH = ("datasource", "Ec2", "strict_id") STRICT_ID_DEFAULT = "warn" API_TOKEN_ROUTE = 'latest/api/token' -API_TOKEN_DISABLED = '_ec2_disable_api_token' AWS_TOKEN_TTL_SECONDS = '21600' AWS_TOKEN_PUT_HEADER = 'X-aws-ec2-metadata-token' AWS_TOKEN_REQ_HEADER = AWS_TOKEN_PUT_HEADER + '-ttl-seconds' @@ -193,6 +192,12 @@ class DataSourceEc2(sources.DataSource): return self.metadata['instance-id'] def _maybe_fetch_api_token(self, mdurls, timeout=None, max_wait=None): + """ Get an API token for EC2 Instance Metadata Service. + + On EC2. IMDS will always answer an API token, unless + the instance owner has disabled the IMDS HTTP endpoint or + the network topology conflicts with the configured hop-limit. + """ if self.cloud_name != CloudNames.AWS: return @@ -205,18 +210,33 @@ class DataSourceEc2(sources.DataSource): urls.append(cur) url2base[cur] = url - # use the self._status_cb to check for Read errors, which means - # we can't reach the API token URL, so we should disable IMDSv2 + # use the self._imds_exception_cb to check for Read errors LOG.debug('Fetching Ec2 IMDSv2 API Token') - url, response = uhelp.wait_for_url( - urls=urls, max_wait=1, timeout=1, status_cb=self._status_cb, - headers_cb=self._get_headers, request_method=request_method, - headers_redact=AWS_TOKEN_REDACT) + + response = None + url = None + url_params = self.get_url_params() + try: + url, response = uhelp.wait_for_url( + urls=urls, max_wait=url_params.max_wait_seconds, + timeout=url_params.timeout_seconds, status_cb=LOG.warning, + headers_cb=self._get_headers, + exception_cb=self._imds_exception_cb, + request_method=request_method, + headers_redact=AWS_TOKEN_REDACT) + except uhelp.UrlError: + # We use the raised exception to interupt the retry loop. + # Nothing else to do here. + pass if url and response: self._api_token = response return url2base[url] + # If we get here, then wait_for_url timed out, waiting for IMDS + # or the IMDS HTTP endpoint is disabled + return None + def wait_for_metadata_service(self): mcfg = self.ds_cfg @@ -240,9 +260,11 @@ class DataSourceEc2(sources.DataSource): # try the api token path first metadata_address = self._maybe_fetch_api_token(mdurls) - if not metadata_address: - if self._api_token == API_TOKEN_DISABLED: - LOG.warning('Retrying with IMDSv1') + # When running on EC2, we always access IMDS with an API token. + # If we could not get an API token, then we assume the IMDS + # endpoint was disabled and we move on without a data source. + # Fallback to IMDSv1 if not running on EC2 + if not metadata_address and self.cloud_name != CloudNames.AWS: # if we can't get a token, use instance-id path urls = [] url2base = {} @@ -267,6 +289,8 @@ class DataSourceEc2(sources.DataSource): if metadata_address: self.metadata_address = metadata_address LOG.debug("Using metadata source: '%s'", self.metadata_address) + elif self.cloud_name == CloudNames.AWS: + LOG.warning("IMDS's HTTP endpoint is probably disabled") else: LOG.critical("Giving up on md from %s after %s seconds", urls, int(time.time() - start_time)) @@ -496,11 +520,29 @@ class DataSourceEc2(sources.DataSource): self._api_token = None return True # always retry - def _status_cb(self, msg, exc=None): - LOG.warning(msg) - if 'Read timed out' in msg: - LOG.warning('Cannot use Ec2 IMDSv2 API tokens, using IMDSv1') - self._api_token = API_TOKEN_DISABLED + def _imds_exception_cb(self, msg, exception=None): + """Fail quickly on proper AWS if IMDSv2 rejects API token request + + Guidance from Amazon is that if IMDSv2 had disabled token requests + by returning a 403, or cloud-init malformed requests resulting in + other 40X errors, we want the datasource detection to fail quickly + without retries as those symptoms will likely not be resolved by + retries. + + Exceptions such as requests.ConnectionError due to IMDS being + temporarily unroutable or unavailable will still retry due to the + callsite wait_for_url. + """ + if isinstance(exception, uhelp.UrlError): + # requests.ConnectionError will have exception.code == None + if exception.code and exception.code >= 400: + if exception.code == 403: + LOG.warning('Ec2 IMDS endpoint returned a 403 error. ' + 'HTTP endpoint is disabled. Aborting.') + else: + LOG.warning('Fatal error while requesting ' + 'Ec2 IMDSv2 API tokens') + raise exception def _get_headers(self, url=''): """Return a dict of headers for accessing a url. @@ -508,8 +550,7 @@ class DataSourceEc2(sources.DataSource): If _api_token is unset on AWS, attempt to refresh the token via a PUT and then return the updated token header. """ - if self.cloud_name != CloudNames.AWS or (self._api_token == - API_TOKEN_DISABLED): + if self.cloud_name != CloudNames.AWS: return {} # Request a 6 hour token if URL is API_TOKEN_ROUTE request_token_header = {AWS_TOKEN_REQ_HEADER: AWS_TOKEN_TTL_SECONDS} diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index 2a96122f..78e82c7e 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -3,6 +3,7 @@ import copy import httpretty import json +import requests from unittest import mock from cloudinit import helpers @@ -200,6 +201,7 @@ def register_mock_metaserver(base_url, data): class TestEc2(test_helpers.HttprettyTestCase): with_logs = True + maxDiff = None valid_platform_data = { 'uuid': 'ec212f79-87d1-2f1d-588f-d86dc0fd5412', @@ -429,6 +431,52 @@ class TestEc2(test_helpers.HttprettyTestCase): self.assertTrue(ds.get_data()) self.assertFalse(ds.is_classic_instance()) + def test_aws_inaccessible_imds_service_fails_with_retries(self): + """Inaccessibility of http://169.254.169.254 are retried.""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md=None) + + conn_error = requests.exceptions.ConnectionError( + '[Errno 113] no route to host') + + mock_success = mock.MagicMock(contents=b'fakesuccess') + mock_success.ok.return_value = True + + with mock.patch('cloudinit.url_helper.readurl') as m_readurl: + m_readurl.side_effect = (conn_error, conn_error, mock_success) + with mock.patch('cloudinit.url_helper.time.sleep'): + self.assertTrue(ds.wait_for_metadata_service()) + + # Just one /latest/api/token request + self.assertEqual(3, len(m_readurl.call_args_list)) + for readurl_call in m_readurl.call_args_list: + self.assertIn('latest/api/token', readurl_call[0][0]) + + def test_aws_token_403_fails_without_retries(self): + """Verify that 403s fetching AWS tokens are not retried.""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md=None) + token_url = self.data_url('latest', data_item='api/token') + httpretty.register_uri(httpretty.PUT, token_url, body={}, status=403) + self.assertFalse(ds.get_data()) + # Just one /latest/api/token request + logs = self.logs.getvalue() + failed_put_log = '"PUT /latest/api/token HTTP/1.1" 403 0' + expected_logs = [ + 'WARNING: Ec2 IMDS endpoint returned a 403 error. HTTP endpoint is' + ' disabled. Aborting.', + "WARNING: IMDS's HTTP endpoint is probably disabled", + failed_put_log + ] + for log in expected_logs: + self.assertIn(log, logs) + self.assertEqual( + 1, len([l for l in logs.splitlines() if failed_put_log in l])) + def test_aws_token_redacted(self): """Verify that aws tokens are redacted when logged.""" ds = self._setup_ds( -- cgit v1.2.3 From 71af48df3514ca831c90b77dc71ba0a121dec401 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 10 Mar 2020 08:22:22 -0600 Subject: instance-data: add cloud-init merged_cfg and sys_info keys to json (#214) Cloud-config userdata provided as jinja templates are now distro, platform and merged cloud config aware. The cloud-init query command will also surface this config data. Now users can selectively render portions of cloud-config based on: * distro name, version, release * python version * merged cloud config values * machine platform * kernel To support template handling of this config, add new top-level keys to /run/cloud-init/instance-data.json. The new 'merged_cfg' key represents merged cloud config from /etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/*. The new 'sys_info' key which captures distro and platform info from cloudinit.util.system_info. Cloud config userdata templates can render conditional content based on these additional environmental checks such as the following simple example: ``` ## template: jinja #cloud-config runcmd: {% if distro == 'opensuse' %} - sh /custom-setup-sles {% elif distro == 'centos' %} - sh /custom-setup-centos {% elif distro == 'debian' %} - sh /custom-setup-debian {% endif %} ``` To see all values: sudo cloud-init query --all Any keys added to the standardized v1 keys are guaranteed to not change or drop on future released of cloud-init. 'v1' keys will be retained for backward-compatibility even if a new standardized 'v2' set of keys are introduced The following standardized v1 keys are added: * distro, distro_release, distro_version, kernel_version, machine, python_version, system_platform, variant LP: #1865969 --- cloudinit/sources/__init__.py | 43 ++- cloudinit/sources/tests/test_init.py | 98 +++++- cloudinit/util.py | 2 +- doc/rtd/topics/instancedata.rst | 360 +++++++++++++++------ tests/cloud_tests/testcases/base.py | 55 +++- tests/unittests/test_datasource/test_cloudsigma.py | 6 +- 6 files changed, 426 insertions(+), 138 deletions(-) diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 805d803d..a6e6d202 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -89,26 +89,26 @@ def process_instance_metadata(metadata, key_path='', sensitive_keys=()): @return Dict copy of processed metadata. """ md_copy = copy.deepcopy(metadata) - md_copy['base64_encoded_keys'] = [] - md_copy['sensitive_keys'] = [] + base64_encoded_keys = [] + sens_keys = [] for key, val in metadata.items(): if key_path: sub_key_path = key_path + '/' + key else: sub_key_path = key if key in sensitive_keys or sub_key_path in sensitive_keys: - md_copy['sensitive_keys'].append(sub_key_path) + sens_keys.append(sub_key_path) if isinstance(val, str) and val.startswith('ci-b64:'): - md_copy['base64_encoded_keys'].append(sub_key_path) + base64_encoded_keys.append(sub_key_path) md_copy[key] = val.replace('ci-b64:', '') if isinstance(val, dict): return_val = process_instance_metadata( val, sub_key_path, sensitive_keys) - md_copy['base64_encoded_keys'].extend( - return_val.pop('base64_encoded_keys')) - md_copy['sensitive_keys'].extend( - return_val.pop('sensitive_keys')) + base64_encoded_keys.extend(return_val.pop('base64_encoded_keys')) + sens_keys.extend(return_val.pop('sensitive_keys')) md_copy[key] = return_val + md_copy['base64_encoded_keys'] = sorted(base64_encoded_keys) + md_copy['sensitive_keys'] = sorted(sens_keys) return md_copy @@ -193,7 +193,7 @@ class DataSource(metaclass=abc.ABCMeta): # N-tuple of keypaths or keynames redact from instance-data.json for # non-root users - sensitive_metadata_keys = ('security-credentials',) + sensitive_metadata_keys = ('merged_cfg', 'security-credentials',) def __init__(self, sys_cfg, distro, paths, ud_proc=None): self.sys_cfg = sys_cfg @@ -218,14 +218,15 @@ class DataSource(metaclass=abc.ABCMeta): def __str__(self): return type_utils.obj_name(self) - def _get_standardized_metadata(self): + def _get_standardized_metadata(self, instance_data): """Return a dictionary of standardized metadata keys.""" local_hostname = self.get_hostname() instance_id = self.get_instance_id() availability_zone = self.availability_zone # In the event of upgrade from existing cloudinit, pickled datasource # will not contain these new class attributes. So we need to recrawl - # metadata to discover that content. + # metadata to discover that content + sysinfo = instance_data["sys_info"] return { 'v1': { '_beta_keys': ['subplatform'], @@ -233,14 +234,22 @@ class DataSource(metaclass=abc.ABCMeta): 'availability_zone': availability_zone, 'cloud-name': self.cloud_name, 'cloud_name': self.cloud_name, + 'distro': sysinfo["dist"][0], + 'distro_version': sysinfo["dist"][1], + 'distro_release': sysinfo["dist"][2], 'platform': self.platform_type, 'public_ssh_keys': self.get_public_ssh_keys(), + 'python_version': sysinfo["python"], 'instance-id': instance_id, 'instance_id': instance_id, + 'kernel_release': sysinfo["uname"][2], 'local-hostname': local_hostname, 'local_hostname': local_hostname, + 'machine': sysinfo["uname"][4], 'region': self.region, - 'subplatform': self.subplatform}} + 'subplatform': self.subplatform, + 'system_platform': sysinfo["platform"], + 'variant': sysinfo["variant"]}} def clear_cached_attrs(self, attr_defaults=()): """Reset any cached metadata attributes to datasource defaults. @@ -299,9 +308,15 @@ class DataSource(metaclass=abc.ABCMeta): ec2_metadata = getattr(self, 'ec2_metadata') if ec2_metadata != UNSET: instance_data['ds']['ec2_metadata'] = ec2_metadata - instance_data.update( - self._get_standardized_metadata()) instance_data['ds']['_doc'] = EXPERIMENTAL_TEXT + # Add merged cloud.cfg and sys info for jinja templates and cli query + instance_data['merged_cfg'] = copy.deepcopy(self.sys_cfg) + instance_data['merged_cfg']['_doc'] = ( + 'Merged cloud-init system config from /etc/cloud/cloud.cfg and' + ' /etc/cloud/cloud.cfg.d/') + instance_data['sys_info'] = util.system_info() + instance_data.update( + self._get_standardized_metadata(instance_data)) try: # Process content base64encoding unserializable values content = util.json_dumps(instance_data) diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index 6db127e7..541cbbeb 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -55,6 +55,7 @@ class InvalidDataSourceTestSubclassNet(DataSource): class TestDataSource(CiTestCase): with_logs = True + maxDiff = None def setUp(self): super(TestDataSource, self).setUp() @@ -288,27 +289,47 @@ class TestDataSource(CiTestCase): tmp = self.tmp_dir() datasource = DataSourceTestSubclassNet( self.sys_cfg, self.distro, Paths({'run_dir': tmp})) - datasource.get_data() + sys_info = { + "python": "3.7", + "platform": + "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal", + "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah", + "x86_64"], + "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]} + with mock.patch("cloudinit.util.system_info", return_value=sys_info): + datasource.get_data() json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) content = util.load_file(json_file) expected = { 'base64_encoded_keys': [], - 'sensitive_keys': [], + 'merged_cfg': REDACT_SENSITIVE_VALUE, + 'sensitive_keys': ['merged_cfg'], + 'sys_info': sys_info, 'v1': { '_beta_keys': ['subplatform'], 'availability-zone': 'myaz', 'availability_zone': 'myaz', 'cloud-name': 'subclasscloudname', 'cloud_name': 'subclasscloudname', + 'distro': 'ubuntu', + 'distro_release': 'focal', + 'distro_version': '20.04', 'instance-id': 'iid-datasource', 'instance_id': 'iid-datasource', 'local-hostname': 'test-subclass-hostname', 'local_hostname': 'test-subclass-hostname', + 'kernel_release': '5.4.0-24-generic', + 'machine': 'x86_64', 'platform': 'mytestsubclass', 'public_ssh_keys': [], + 'python_version': '3.7', 'region': 'myregion', - 'subplatform': 'unknown'}, + 'system_platform': + 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal', + 'subplatform': 'unknown', + 'variant': 'ubuntu'}, 'ds': { + '_doc': EXPERIMENTAL_TEXT, 'meta_data': {'availability_zone': 'myaz', 'local-hostname': 'test-subclass-hostname', @@ -329,28 +350,49 @@ class TestDataSource(CiTestCase): 'region': 'myregion', 'some': {'security-credentials': { 'cred1': 'sekret', 'cred2': 'othersekret'}}}) - self.assertEqual( - ('security-credentials',), datasource.sensitive_metadata_keys) - datasource.get_data() + self.assertItemsEqual( + ('merged_cfg', 'security-credentials',), + datasource.sensitive_metadata_keys) + sys_info = { + "python": "3.7", + "platform": + "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal", + "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah", + "x86_64"], + "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]} + with mock.patch("cloudinit.util.system_info", return_value=sys_info): + datasource.get_data() json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) redacted = util.load_json(util.load_file(json_file)) expected = { 'base64_encoded_keys': [], - 'sensitive_keys': ['ds/meta_data/some/security-credentials'], + 'merged_cfg': REDACT_SENSITIVE_VALUE, + 'sensitive_keys': [ + 'ds/meta_data/some/security-credentials', 'merged_cfg'], + 'sys_info': sys_info, 'v1': { '_beta_keys': ['subplatform'], 'availability-zone': 'myaz', 'availability_zone': 'myaz', 'cloud-name': 'subclasscloudname', 'cloud_name': 'subclasscloudname', + 'distro': 'ubuntu', + 'distro_release': 'focal', + 'distro_version': '20.04', 'instance-id': 'iid-datasource', 'instance_id': 'iid-datasource', 'local-hostname': 'test-subclass-hostname', 'local_hostname': 'test-subclass-hostname', + 'kernel_release': '5.4.0-24-generic', + 'machine': 'x86_64', 'platform': 'mytestsubclass', 'public_ssh_keys': [], + 'python_version': '3.7', 'region': 'myregion', - 'subplatform': 'unknown'}, + 'system_platform': + 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal', + 'subplatform': 'unknown', + 'variant': 'ubuntu'}, 'ds': { '_doc': EXPERIMENTAL_TEXT, 'meta_data': { @@ -359,7 +401,7 @@ class TestDataSource(CiTestCase): 'region': 'myregion', 'some': {'security-credentials': REDACT_SENSITIVE_VALUE}}} } - self.assertEqual(expected, redacted) + self.assertItemsEqual(expected, redacted) file_stat = os.stat(json_file) self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode)) @@ -376,28 +418,54 @@ class TestDataSource(CiTestCase): 'region': 'myregion', 'some': {'security-credentials': { 'cred1': 'sekret', 'cred2': 'othersekret'}}}) - self.assertEqual( - ('security-credentials',), datasource.sensitive_metadata_keys) - datasource.get_data() + sys_info = { + "python": "3.7", + "platform": + "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal", + "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah", + "x86_64"], + "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]} + + self.assertItemsEqual( + ('merged_cfg', 'security-credentials',), + datasource.sensitive_metadata_keys) + with mock.patch("cloudinit.util.system_info", return_value=sys_info): + datasource.get_data() sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp) content = util.load_file(sensitive_json_file) expected = { 'base64_encoded_keys': [], - 'sensitive_keys': ['ds/meta_data/some/security-credentials'], + 'merged_cfg': { + '_doc': ( + 'Merged cloud-init system config from ' + '/etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/'), + 'datasource': {'_undef': {'key1': False}}}, + 'sensitive_keys': [ + 'ds/meta_data/some/security-credentials', 'merged_cfg'], + 'sys_info': sys_info, 'v1': { '_beta_keys': ['subplatform'], 'availability-zone': 'myaz', 'availability_zone': 'myaz', 'cloud-name': 'subclasscloudname', 'cloud_name': 'subclasscloudname', + 'distro': 'ubuntu', + 'distro_release': 'focal', + 'distro_version': '20.04', 'instance-id': 'iid-datasource', 'instance_id': 'iid-datasource', + 'kernel_release': '5.4.0-24-generic', 'local-hostname': 'test-subclass-hostname', 'local_hostname': 'test-subclass-hostname', + 'machine': 'x86_64', 'platform': 'mytestsubclass', 'public_ssh_keys': [], + 'python_version': '3.7', 'region': 'myregion', - 'subplatform': 'unknown'}, + 'subplatform': 'unknown', + 'system_platform': + 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal', + 'variant': 'ubuntu'}, 'ds': { '_doc': EXPERIMENTAL_TEXT, 'meta_data': { @@ -408,7 +476,7 @@ class TestDataSource(CiTestCase): 'security-credentials': {'cred1': 'sekret', 'cred2': 'othersekret'}}}} } - self.assertEqual(expected, util.load_json(content)) + self.assertItemsEqual(expected, util.load_json(content)) file_stat = os.stat(sensitive_json_file) self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode)) self.assertEqual(expected, util.load_json(content)) diff --git a/cloudinit/util.py b/cloudinit/util.py index c02b3d9a..132f6051 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -656,7 +656,7 @@ def system_info(): 'system': platform.system(), 'release': platform.release(), 'python': platform.python_version(), - 'uname': platform.uname(), + 'uname': list(platform.uname()), 'dist': get_linux_distro() } system = info['system'].lower() diff --git a/doc/rtd/topics/instancedata.rst b/doc/rtd/topics/instancedata.rst index 4227c4fd..845098bb 100644 --- a/doc/rtd/topics/instancedata.rst +++ b/doc/rtd/topics/instancedata.rst @@ -76,6 +76,11 @@ There are three basic top-level keys: 'security sensitive'. Only the keys listed here will be redacted from instance-data.json for non-root users. +* **merged_cfg**: Merged cloud-init 'system_config' from `/etc/cloud/cloud.cfg` + and `/etc/cloud/cloud-cfg.d`. Values under this key could contain sensitive + information such as passwords, so it is included in the **sensitive-keys** + list which is only readable by root. + * **ds**: Datasource-specific metadata crawled for the specific cloud platform. It should closely represent the structure of the cloud metadata crawled. The structure of content and details provided are entirely @@ -83,6 +88,9 @@ There are three basic top-level keys: The content exposed under the 'ds' key is currently **experimental** and expected to change slightly in the upcoming cloud-init release. +* **sys_info**: Information about the underlying os, python, architecture and + kernel. This represents the data collected by `cloudinit.util.system_info`. + * **v1**: Standardized cloud-init metadata keys, these keys are guaranteed to exist on all cloud platforms. They will also retain their current behavior and format and will be carried forward even if cloud-init introduces a new @@ -117,6 +125,21 @@ Example output: - nocloud - ovf +v1.distro, v1.distro_version, v1.distro_release +----------------------------------------------- +This shall be the distro name, version and release as determined by +`cloudinit.util.get_linux_distro`. + +Example output: + +- centos, 7.5, core +- debian, 9, stretch +- freebsd, 12.0-release-p10, +- opensuse, 42.3, x86_64 +- opensuse-tumbleweed, 20180920, x86_64 +- redhat, 7.5, 'maipo' +- sles, 12.3, x86_64 +- ubuntu, 20.04, focal v1.instance_id -------------- @@ -126,6 +149,14 @@ Examples output: - i- +v1.kernel_release +----------------- +This shall be the running kernel `uname -r` + +Example output: + +- 5.3.0-1010-aws + v1.local_hostname ----------------- The internal or local hostname of the system. @@ -135,6 +166,17 @@ Examples output: - ip-10-41-41-70 - +v1.machine +---------- +This shall be the running cpu machine architecture `uname -m` + +Example output: + +- x86_64 +- i686 +- ppc64le +- s390x + v1.platform ------------- An attempt to identify the cloud platfrom instance that the system is running @@ -154,7 +196,7 @@ v1.subplatform Additional platform details describing the specific source or type of metadata used. The format of subplatform will be: -`` (`` +`` ()`` Examples output: @@ -171,6 +213,15 @@ Examples output: - ['ssh-rsa AA...', ...] +v1.python_version +----------------- +The version of python that is running cloud-init as determined by +`cloudinit.util.system_info` + +Example output: + +- 3.7.6 + v1.region --------- The physical region/data center in which the instance is deployed. @@ -192,164 +243,265 @@ Examples output: Example Output -------------- -Below is an example of ``/run/cloud-init/instance_data.json`` on an EC2 -instance: +Below is an example of ``/run/cloud-init/instance-data-sensitive.json`` on an +EC2 instance: .. sourcecode:: json { + "_beta_keys": [ + "subplatform" + ], + "availability_zone": "us-east-1b", "base64_encoded_keys": [], + "merged_cfg": { + "_doc": "Merged cloud-init system config from /etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/", + "_log": [ + "[loggers]\nkeys=root,cloudinit\n\n[handlers]\nkeys=consoleHandler,cloudLogHandler\n\n[formatters]\nkeys=simpleFormatter,arg0Formatter\n\n[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler,cloudLogHandler\n\n[logger_cloudinit]\nlevel=DEBUG\nqualname=cloudinit\nhandlers=\npropagate=1\n\n[handler_consoleHandler]\nclass=StreamHandler\nlevel=WARNING\nformatter=arg0Formatter\nargs=(sys.stderr,)\n\n[formatter_arg0Formatter]\nformat=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s\n\n[formatter_simpleFormatter]\nformat=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s\n", + "[handler_cloudLogHandler]\nclass=FileHandler\nlevel=DEBUG\nformatter=arg0Formatter\nargs=('/var/log/cloud-init.log',)\n", + "[handler_cloudLogHandler]\nclass=handlers.SysLogHandler\nlevel=DEBUG\nformatter=simpleFormatter\nargs=(\"/dev/log\", handlers.SysLogHandler.LOG_USER)\n" + ], + "cloud_config_modules": [ + "emit_upstart", + "snap", + "ssh-import-id", + "locale", + "set-passwords", + "grub-dpkg", + "apt-pipelining", + "apt-configure", + "ubuntu-advantage", + "ntp", + "timezone", + "disable-ec2-metadata", + "runcmd", + "byobu" + ], + "cloud_final_modules": [ + "package-update-upgrade-install", + "fan", + "landscape", + "lxd", + "ubuntu-drivers", + "puppet", + "chef", + "mcollective", + "salt-minion", + "rightscale_userdata", + "scripts-vendor", + "scripts-per-once", + "scripts-per-boot", + "scripts-per-instance", + "scripts-user", + "ssh-authkey-fingerprints", + "keys-to-console", + "phone-home", + "final-message", + "power-state-change" + ], + "cloud_init_modules": [ + "migrator", + "seed_random", + "bootcmd", + "write-files", + "growpart", + "resizefs", + "disk_setup", + "mounts", + "set_hostname", + "update_hostname", + "update_etc_hosts", + "ca-certs", + "rsyslog", + "users-groups", + "ssh" + ], + "datasource_list": [ + "Ec2", + "None" + ], + "def_log_file": "/var/log/cloud-init.log", + "disable_root": true, + "log_cfgs": [ + [ + "[loggers]\nkeys=root,cloudinit\n\n[handlers]\nkeys=consoleHandler,cloudLogHandler\n\n[formatters]\nkeys=simpleFormatter,arg0Formatter\n\n[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler,cloudLogHandler\n\n[logger_cloudinit]\nlevel=DEBUG\nqualname=cloudinit\nhandlers=\npropagate=1\n\n[handler_consoleHandler]\nclass=StreamHandler\nlevel=WARNING\nformatter=arg0Formatter\nargs=(sys.stderr,)\n\n[formatter_arg0Formatter]\nformat=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s\n\n[formatter_simpleFormatter]\nformat=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s\n", + "[handler_cloudLogHandler]\nclass=FileHandler\nlevel=DEBUG\nformatter=arg0Formatter\nargs=('/var/log/cloud-init.log',)\n" + ] + ], + "output": { + "all": "| tee -a /var/log/cloud-init-output.log" + }, + "preserve_hostname": false, + "syslog_fix_perms": [ + "syslog:adm", + "root:adm", + "root:wheel", + "root:root" + ], + "users": [ + "default" + ], + "vendor_data": { + "enabled": true, + "prefix": [] + } + }, + "cloud_name": "aws", + "distro": "ubuntu", + "distro_release": "focal", + "distro_version": "20.04", "ds": { "_doc": "EXPERIMENTAL: The structure and format of content scoped under the 'ds' key may change in subsequent releases of cloud-init.", "_metadata_api_version": "2016-09-02", "dynamic": { - "instance-identity": { + "instance_identity": { "document": { - "accountId": "437526006925", + "accountId": "329910648901", "architecture": "x86_64", - "availabilityZone": "us-east-2b", + "availabilityZone": "us-east-1b", "billingProducts": null, "devpayProductCodes": null, - "imageId": "ami-079638aae7046bdd2", - "instanceId": "i-075f088c72ad3271c", + "imageId": "ami-02e8aa396f8be3b6d", + "instanceId": "i-0929128ff2f73a2f1", "instanceType": "t2.micro", "kernelId": null, "marketplaceProductCodes": null, - "pendingTime": "2018-10-05T20:10:43Z", - "privateIp": "10.41.41.95", + "pendingTime": "2020-02-27T20:46:18Z", + "privateIp": "172.31.81.43", "ramdiskId": null, - "region": "us-east-2", + "region": "us-east-1", "version": "2017-09-30" }, "pkcs7": [ - "MIAGCSqGSIb3DQEHAqCAMIACAQExCzAJBgUrDgMCGgUAMIAGCSqGSIb3DQEHAaCAJIAEggHbewog", - "ICJkZXZwYXlQcm9kdWN0Q29kZXMiIDogbnVsbCwKICAibWFya2V0cGxhY2VQcm9kdWN0Q29kZXMi", - "IDogbnVsbCwKICAicHJpdmF0ZUlwIiA6ICIxMC40MS40MS45NSIsCiAgInZlcnNpb24iIDogIjIw", - "MTctMDktMzAiLAogICJpbnN0YW5jZUlkIiA6ICJpLTA3NWYwODhjNzJhZDMyNzFjIiwKICAiYmls", - "bGluZ1Byb2R1Y3RzIiA6IG51bGwsCiAgImluc3RhbmNlVHlwZSIgOiAidDIubWljcm8iLAogICJh", - "Y2NvdW50SWQiIDogIjQzNzUyNjAwNjkyNSIsCiAgImF2YWlsYWJpbGl0eVpvbmUiIDogInVzLWVh", - "c3QtMmIiLAogICJrZXJuZWxJZCIgOiBudWxsLAogICJyYW1kaXNrSWQiIDogbnVsbCwKICAiYXJj", - "aGl0ZWN0dXJlIiA6ICJ4ODZfNjQiLAogICJpbWFnZUlkIiA6ICJhbWktMDc5NjM4YWFlNzA0NmJk", - "ZDIiLAogICJwZW5kaW5nVGltZSIgOiAiMjAxOC0xMC0wNVQyMDoxMDo0M1oiLAogICJyZWdpb24i", - "IDogInVzLWVhc3QtMiIKfQAAAAAAADGCARcwggETAgEBMGkwXDELMAkGA1UEBhMCVVMxGTAXBgNV", - "BAgTEFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0FtYXpvbiBX", - "ZWIgU2VydmljZXMgTExDAgkAlrpI2eVeGmcwCQYFKw4DAhoFAKBdMBgGCSqGSIb3DQEJAzELBgkq", - "hkiG9w0BBwEwHAYJKoZIhvcNAQkFMQ8XDTE4MTAwNTIwMTA0OFowIwYJKoZIhvcNAQkEMRYEFK0k", - "Tz6n1A8/zU1AzFj0riNQORw2MAkGByqGSM44BAMELjAsAhRNrr174y98grPBVXUforN/6wZp8AIU", - "JLZBkrB2GJA8A4WJ1okq++jSrBIAAAAAAAA=" + "MIAGCSqGSIb3DQ...", + "REDACTED", + "AhQUgq0iPWqPTVnT96tZE6L1XjjLHQAAAAAAAA==" ], "rsa2048": [ - "MIAGCSqGSIb3DQEHAqCAMIACAQExDzANBglghkgBZQMEAgEFADCABgkqhkiG9w0BBwGggCSABIIB", - "23sKICAiZGV2cGF5UHJvZHVjdENvZGVzIiA6IG51bGwsCiAgIm1hcmtldHBsYWNlUHJvZHVjdENv", - "ZGVzIiA6IG51bGwsCiAgInByaXZhdGVJcCIgOiAiMTAuNDEuNDEuOTUiLAogICJ2ZXJzaW9uIiA6", - "ICIyMDE3LTA5LTMwIiwKICAiaW5zdGFuY2VJZCIgOiAiaS0wNzVmMDg4YzcyYWQzMjcxYyIsCiAg", - "ImJpbGxpbmdQcm9kdWN0cyIgOiBudWxsLAogICJpbnN0YW5jZVR5cGUiIDogInQyLm1pY3JvIiwK", - "ICAiYWNjb3VudElkIiA6ICI0Mzc1MjYwMDY5MjUiLAogICJhdmFpbGFiaWxpdHlab25lIiA6ICJ1", - "cy1lYXN0LTJiIiwKICAia2VybmVsSWQiIDogbnVsbCwKICAicmFtZGlza0lkIiA6IG51bGwsCiAg", - "ImFyY2hpdGVjdHVyZSIgOiAieDg2XzY0IiwKICAiaW1hZ2VJZCIgOiAiYW1pLTA3OTYzOGFhZTcw", - "NDZiZGQyIiwKICAicGVuZGluZ1RpbWUiIDogIjIwMTgtMTAtMDVUMjA6MTA6NDNaIiwKICAicmVn", - "aW9uIiA6ICJ1cy1lYXN0LTIiCn0AAAAAAAAxggH/MIIB+wIBATBpMFwxCzAJBgNVBAYTAlVTMRkw", - "FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6", - "b24gV2ViIFNlcnZpY2VzIExMQwIJAM07oeX4xevdMA0GCWCGSAFlAwQCAQUAoGkwGAYJKoZIhvcN", - "AQkDMQsGCSqGSIb3DQEHATAcBgkqhkiG9w0BCQUxDxcNMTgxMDA1MjAxMDQ4WjAvBgkqhkiG9w0B", - "CQQxIgQgkYz0pZk3zJKBi4KP4egeOKJl/UYwu5UdE7id74pmPwMwDQYJKoZIhvcNAQEBBQAEggEA", - "dC3uIGGNul1OC1mJKSH3XoBWsYH20J/xhIdftYBoXHGf2BSFsrs9ZscXd2rKAKea4pSPOZEYMXgz", - "lPuT7W0WU89N3ZKviy/ReMSRjmI/jJmsY1lea6mlgcsJXreBXFMYucZvyeWGHdnCjamoKWXkmZlM", - "mSB1gshWy8Y7DzoKviYPQZi5aI54XK2Upt4kGme1tH1NI2Cq+hM4K+adxTbNhS3uzvWaWzMklUuU", - "QHX2GMmjAVRVc8vnA8IAsBCJJp+gFgYzi09IK+cwNgCFFPADoG6jbMHHf4sLB3MUGpiA+G9JlCnM", - "fmkjI2pNRB8spc0k4UG4egqLrqCz67WuK38tjwAAAAAAAA==" + "MIAGCSqGSIb...", + "REDACTED", + "clYQvuE45xXm7Yreg3QtQbrP//owl1eZHj6s350AAAAAAAA=" ], "signature": [ - "Tsw6h+V3WnxrNVSXBYIOs1V4j95YR1mLPPH45XnhX0/Ei3waJqf7/7EEKGYP1Cr4PTYEULtZ7Mvf", - "+xJpM50Ivs2bdF7o0c4vnplRWe3f06NI9pv50dr110j/wNzP4MZ1pLhJCqubQOaaBTF3LFutgRrt", - "r4B0mN3p7EcqD8G+ll0=" + "dA+QV+LLCWCRNddnrKleYmh2GvYo+t8urDkdgmDSsPi", + "REDACTED", + "kDT4ygyJLFkd3b4qjAs=" ] } }, - "meta-data": { - "ami-id": "ami-079638aae7046bdd2", - "ami-launch-index": "0", - "ami-manifest-path": "(unknown)", - "block-device-mapping": { + "meta_data": { + "ami_id": "ami-02e8aa396f8be3b6d", + "ami_launch_index": "0", + "ami_manifest_path": "(unknown)", + "block_device_mapping": { "ami": "/dev/sda1", - "ephemeral0": "sdb", - "ephemeral1": "sdc", "root": "/dev/sda1" }, - "hostname": "ip-10-41-41-95.us-east-2.compute.internal", - "instance-action": "none", - "instance-id": "i-075f088c72ad3271c", - "instance-type": "t2.micro", - "local-hostname": "ip-10-41-41-95.us-east-2.compute.internal", - "local-ipv4": "10.41.41.95", - "mac": "06:74:8f:39:cd:a6", + "hostname": "ip-172-31-81-43.ec2.internal", + "instance_action": "none", + "instance_id": "i-0929128ff2f73a2f1", + "instance_type": "t2.micro", + "local_hostname": "ip-172-31-81-43.ec2.internal", + "local_ipv4": "172.31.81.43", + "mac": "12:7e:c9:93:29:af", "metrics": { "vhostmd": "" }, "network": { "interfaces": { "macs": { - "06:74:8f:39:cd:a6": { - "device-number": "0", - "interface-id": "eni-052058bbd7831eaae", - "ipv4-associations": { - "18.218.221.122": "10.41.41.95" - }, - "local-hostname": "ip-10-41-41-95.us-east-2.compute.internal", - "local-ipv4s": "10.41.41.95", - "mac": "06:74:8f:39:cd:a6", - "owner-id": "437526006925", - "public-hostname": "ec2-18-218-221-122.us-east-2.compute.amazonaws.com", - "public-ipv4s": "18.218.221.122", - "security-group-ids": "sg-828247e9", - "security-groups": "Cloud-init integration test secgroup", - "subnet-id": "subnet-282f3053", - "subnet-ipv4-cidr-block": "10.41.41.0/24", - "subnet-ipv6-cidr-blocks": "2600:1f16:b80:ad00::/64", - "vpc-id": "vpc-252ef24d", - "vpc-ipv4-cidr-block": "10.41.0.0/16", - "vpc-ipv4-cidr-blocks": "10.41.0.0/16", - "vpc-ipv6-cidr-blocks": "2600:1f16:b80:ad00::/56" - } + "12:7e:c9:93:29:af": { + "device_number": "0", + "interface_id": "eni-0c07a0474339b801d", + "ipv4_associations": { + "3.89.187.177": "172.31.81.43" + }, + "local_hostname": "ip-172-31-81-43.ec2.internal", + "local_ipv4s": "172.31.81.43", + "mac": "12:7e:c9:93:29:af", + "owner_id": "329910648901", + "public_hostname": "ec2-3-89-187-177.compute-1.amazonaws.com", + "public_ipv4s": "3.89.187.177", + "security_group_ids": "sg-0100038b68aa79986", + "security_groups": "launch-wizard-3", + "subnet_id": "subnet-04e2d12a", + "subnet_ipv4_cidr_block": "172.31.80.0/20", + "vpc_id": "vpc-210b4b5b", + "vpc_ipv4_cidr_block": "172.31.0.0/16", + "vpc_ipv4_cidr_blocks": "172.31.0.0/16" + } } } }, "placement": { - "availability-zone": "us-east-2b" + "availability_zone": "us-east-1b" }, "profile": "default-hvm", - "public-hostname": "ec2-18-218-221-122.us-east-2.compute.amazonaws.com", - "public-ipv4": "18.218.221.122", - "public-keys": { - "cloud-init-integration": [ - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL7uWGj8cgWyIOaspgKdVy0cKJ+UTjfv7jBOjG2H/GN8bJVXy72XAvnhM0dUM+CCs8FOf0YlPX+Frvz2hKInrmRhZVwRSL129PasD12MlI3l44u6IwS1o/W86Q+tkQYEljtqDOo0a+cOsaZkvUNzUyEXUwz/lmYa6G4hMKZH4NBj7nbAAF96wsMCoyNwbWryBnDYUr6wMbjRR1J9Pw7Xh7WRC73wy4Va2YuOgbD3V/5ZrFPLbWZW/7TFXVrql04QVbyei4aiFR5n//GvoqwQDNe58LmbzX/xvxyKJYdny2zXmdAhMxbrpFQsfpkJ9E/H5w0yOdSvnWbUoG5xNGoOB cloud-init-integration" - ] - }, - "reservation-id": "r-0594a20e31f6cfe46", - "security-groups": "Cloud-init integration test secgroup", + "public_hostname": "ec2-3-89-187-177.compute-1.amazonaws.com", + "public_ipv4": "3.89.187.177", + "reservation_id": "r-0c481643d15766a02", + "security_groups": "launch-wizard-3", "services": { "domain": "amazonaws.com", "partition": "aws" } } }, + "instance_id": "i-0929128ff2f73a2f1", + "kernel_release": "5.3.0-1010-aws", + "local_hostname": "ip-172-31-81-43", + "machine": "x86_64", + "platform": "ec2", + "public_ssh_keys": [], + "python_version": "3.7.6", + "region": "us-east-1", "sensitive_keys": [], + "subplatform": "metadata (http://169.254.169.254)", + "sys_info": { + "dist": [ + "ubuntu", + "20.04", + "focal" + ], + "platform": "Linux-5.3.0-1010-aws-x86_64-with-Ubuntu-20.04-focal", + "python": "3.7.6", + "release": "5.3.0-1010-aws", + "system": "Linux", + "uname": [ + "Linux", + "ip-172-31-81-43", + "5.3.0-1010-aws", + "#11-Ubuntu SMP Thu Jan 16 07:59:32 UTC 2020", + "x86_64", + "x86_64" + ], + "variant": "ubuntu" + }, + "system_platform": "Linux-5.3.0-1010-aws-x86_64-with-Ubuntu-20.04-focal", + "userdata": "#cloud-config\nssh_import_id: []\n...", "v1": { "_beta_keys": [ "subplatform" ], - "availability-zone": "us-east-2b", - "availability_zone": "us-east-2b", + "availability_zone": "us-east-1b", "cloud_name": "aws", - "instance_id": "i-075f088c72ad3271c", - "local_hostname": "ip-10-41-41-95", + "distro": "ubuntu", + "distro_release": "focal", + "distro_version": "20.04", + "instance_id": "i-0929128ff2f73a2f1", + "kernel": "5.3.0-1010-aws", + "local_hostname": "ip-172-31-81-43", + "machine": "x86_64", "platform": "ec2", - "public_ssh_keys": [ - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL7uWGj8cgWyIOaspgKdVy0cKJ+UTjfv7jBOjG2H/GN8bJVXy72XAvnhM0dUM+CCs8FOf0YlPX+Frvz2hKInrmRhZVwRSL129PasD12MlI3l44u6IwS1o/W86Q+tkQYEljtqDOo0a+cOsaZkvUNzUyEXUwz/lmYa6G4hMKZH4NBj7nbAAF96wsMCoyNwbWryBnDYUr6wMbjRR1J9Pw7Xh7WRC73wy4Va2YuOgbD3V/5ZrFPLbWZW/7TFXVrql04QVbyei4aiFR5n//GvoqwQDNe58LmbzX/xvxyKJYdny2zXmdAhMxbrpFQsfpkJ9E/H5w0yOdSvnWbUoG5xNGoOB cloud-init-integration" - ], - "region": "us-east-2", - "subplatform": "metadata (http://169.254.169.254)" - } + "public_ssh_keys": [], + "python": "3.7.6", + "region": "us-east-1", + "subplatform": "metadata (http://169.254.169.254)", + "system_platform": "Linux-5.3.0-1010-aws-x86_64-with-Ubuntu-20.04-focal", + "variant": "ubuntu" + }, + "variant": "ubuntu", + "vendordata": "" } diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py index fd12d87b..5976e234 100644 --- a/tests/cloud_tests/testcases/base.py +++ b/tests/cloud_tests/testcases/base.py @@ -172,9 +172,7 @@ class CloudTestCase(unittest2.TestCase): 'Skipping instance-data.json test.' ' OS: %s not bionic or newer' % self.os_name) instance_data = json.loads(out) - self.assertItemsEqual( - [], - instance_data['base64_encoded_keys']) + self.assertItemsEqual(['ci_cfg'], instance_data['sensitive_keys']) ds = instance_data.get('ds', {}) v1_data = instance_data.get('v1', {}) metadata = ds.get('meta-data', {}) @@ -201,6 +199,23 @@ class CloudTestCase(unittest2.TestCase): self.assertIn('i-', v1_data['instance_id']) self.assertIn('ip-', v1_data['local_hostname']) self.assertIsNotNone(v1_data['region'], 'expected ec2 region') + self.assertIsNotNone( + re.match(r'\d\.\d+\.\d+-\d+-aws', v1_data['kernel_release'])) + self.assertEqual( + 'redacted for non-root user', instance_data['merged_cfg']) + self.assertEqual(self.os_cfg['os'], v1_data['variant']) + self.assertEqual(self.os_cfg['os'], v1_data['distro']) + self.assertEqual( + self.os_cfg['os'], instance_data["sys_info"]['dist'][0], + "Unexpected sys_info dist value") + self.assertEqual(self.os_name, v1_data['distro_release']) + self.assertEqual( + str(self.os_cfg['version']), v1_data['distro_version']) + self.assertEqual('x86_64', v1_data['machine']) + self.assertIsNotNone( + re.match(r'3.\d\.\d', v1_data['python_version']), + "unexpected python version: {ver}".format( + ver=v1_data["python_version"])) def test_instance_data_json_lxd(self): """Validate instance-data.json content by lxd platform. @@ -237,6 +252,23 @@ class CloudTestCase(unittest2.TestCase): self.assertIsNone( v1_data['region'], 'found unexpected lxd region %s' % v1_data['region']) + self.assertIsNotNone( + re.match(r'\d\.\d+\.\d+-\d+', v1_data['kernel_release'])) + self.assertEqual( + 'redacted for non-root user', instance_data['merged_cfg']) + self.assertEqual(self.os_cfg['os'], v1_data['variant']) + self.assertEqual(self.os_cfg['os'], v1_data['distro']) + self.assertEqual( + self.os_cfg['os'], instance_data["sys_info"]['dist'][0], + "Unexpected sys_info dist value") + self.assertEqual(self.os_name, v1_data['distro_release']) + self.assertEqual( + str(self.os_cfg['version']), v1_data['distro_version']) + self.assertEqual('x86_64', v1_data['machine']) + self.assertIsNotNone( + re.match(r'3.\d\.\d', v1_data['python_version']), + "unexpected python version: {ver}".format( + ver=v1_data["python_version"])) def test_instance_data_json_kvm(self): """Validate instance-data.json content by nocloud-kvm platform. @@ -278,6 +310,23 @@ class CloudTestCase(unittest2.TestCase): self.assertIsNone( v1_data['region'], 'found unexpected lxd region %s' % v1_data['region']) + self.assertIsNotNone( + re.match(r'\d\.\d+\.\d+-\d+', v1_data['kernel_release'])) + self.assertEqual( + 'redacted for non-root user', instance_data['merged_cfg']) + self.assertEqual(self.os_cfg['os'], v1_data['variant']) + self.assertEqual(self.os_cfg['os'], v1_data['distro']) + self.assertEqual( + self.os_cfg['os'], instance_data["sys_info"]['dist'][0], + "Unexpected sys_info dist value") + self.assertEqual(self.os_name, v1_data['distro_release']) + self.assertEqual( + str(self.os_cfg['version']), v1_data['distro_version']) + self.assertEqual('x86_64', v1_data['machine']) + self.assertIsNotNone( + re.match(r'3.\d\.\d', v1_data['python_version']), + "unexpected python version: {ver}".format( + ver=v1_data["python_version"])) class PasswordListTest(CloudTestCase): diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py index d62d542b..7aa3b1d1 100644 --- a/tests/unittests/test_datasource/test_cloudsigma.py +++ b/tests/unittests/test_datasource/test_cloudsigma.py @@ -3,6 +3,7 @@ import copy from cloudinit.cs_utils import Cepko +from cloudinit import distros from cloudinit import helpers from cloudinit import sources from cloudinit.sources import DataSourceCloudSigma @@ -47,8 +48,11 @@ class DataSourceCloudSigmaTest(test_helpers.CiTestCase): self.paths = helpers.Paths({'run_dir': self.tmp_dir()}) self.add_patch(DS_PATH + '.is_running_in_cloudsigma', "m_is_container", return_value=True) + + distro_cls = distros.fetch("ubuntu") + distro = distro_cls("ubuntu", cfg={}, paths=self.paths) self.datasource = DataSourceCloudSigma.DataSourceCloudSigma( - "", "", paths=self.paths) + sys_cfg={}, distro=distro, paths=self.paths) self.datasource.cepko = CepkoMock(SERVER_CONTEXT) def test_get_hostname(self): -- cgit v1.2.3 From 986f37b017134ced5d9dd38b420350916297002b Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 10 Mar 2020 13:26:05 -0400 Subject: cloudinit: move to pytest for running tests (#211) As the nose docs[0] themselves note, it has been in maintenance mode for the past several years. pytest is an actively developed, featureful and popular alternative that the nose docs themselves recommend. See [1] for more details about the thinking here. (This PR also removes stale tox definitions, instead of modifying them.) [0] https://nose.readthedocs.io/en/latest/ [1] https://lists.launchpad.net/cloud-init/msg00245.html --- .travis.yml | 6 ++--- Makefile | 6 ++--- packages/pkg-deps.json | 3 +++ test-requirements.txt | 7 ++--- tools/run-container | 12 ++++----- tools/tox-venv | 2 +- tox.ini | 73 ++++++++++++++++++++------------------------------ 7 files changed, 46 insertions(+), 63 deletions(-) diff --git a/.travis.yml b/.travis.yml index d2651c0b..1cde317a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -16,13 +16,13 @@ matrix: - python: 3.6 env: TOXENV=py3 - NOSE_VERBOSE=2 # List all tests run by nose + PYTEST_ADDOPTS=-v # List all tests run by pytest - install: - git fetch --unshallow - sudo apt-get build-dep -y cloud-init - sudo apt-get install -y --install-recommends sbuild ubuntu-dev-tools fakeroot tox # These are build deps but not pulled in by the build-dep call above - - sudo apt-get install -y --install-recommends dh-systemd python3-coverage python3-contextlib2 + - sudo apt-get install -y --install-recommends dh-systemd python3-coverage python3-contextlib2 python3-pytest python3-pytest-cov - pip install . - pip install tox # bionic has lxd from deb installed, remove it first to ensure @@ -46,7 +46,7 @@ matrix: - python: 3.5 env: TOXENV=xenial - NOSE_VERBOSE=2 # List all tests run by nose + PYTEST_ADDOPTS=-v # List all tests run by pytest # Travis doesn't support Python 3.4 on bionic, so use xenial dist: xenial - python: 3.6 diff --git a/Makefile b/Makefile index 315e6b45..5f575633 100644 --- a/Makefile +++ b/Makefile @@ -2,8 +2,6 @@ CWD=$(shell pwd) PYVER ?= $(shell for p in python3 python2; do \ out=$$(command -v $$p 2>&1) && echo $$p && exit; done; exit 1) -noseopts ?= -v - YAML_FILES=$(shell find cloudinit tests tools -name "*.yaml" -type f ) YAML_FILES+=$(shell find doc/examples -name "cloud-config*.txt" -type f ) @@ -48,10 +46,10 @@ pyflakes3: @$(CWD)/tools/run-pyflakes3 unittest: clean_pyc - nosetests $(noseopts) tests/unittests cloudinit + python -m pytest -v tests/unittests cloudinit unittest3: clean_pyc - nosetests3 $(noseopts) tests/unittests cloudinit + python3 -m pytest -v tests/unittests cloudinit ci-deps-ubuntu: @$(PYVER) $(CWD)/tools/read-dependencies --distro ubuntu --test-distro diff --git a/packages/pkg-deps.json b/packages/pkg-deps.json index cf065219..64d6d78d 100644 --- a/packages/pkg-deps.json +++ b/packages/pkg-deps.json @@ -45,6 +45,9 @@ "pyserial" : { "2" : "pyserial" }, + "pytest": { + "3": "python36-pytest" + }, "requests" : { "3" : "python36-requests" }, diff --git a/test-requirements.txt b/test-requirements.txt index 6fb22b24..057115b6 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,11 +1,8 @@ # Needed generally in tests httpretty>=0.7.1 -nose +pytest unittest2 -coverage - -# Only needed if you want to know the test times -# nose-timer +pytest-cov # Only really needed on older versions of python contextlib2 diff --git a/tools/run-container b/tools/run-container index 23243474..a7a552ec 100755 --- a/tools/run-container +++ b/tools/run-container @@ -287,8 +287,8 @@ prep() { install_packages "$@" } -nose() { - python3 -m nose "$@" +pytest() { + python3 -m pytest "$@" } is_done_cloudinit() { @@ -478,10 +478,10 @@ main() { if [ -n "$unittest" ]; then debug 1 "running unit tests." - run_self_inside_as_cd "$name" "$user" "$cdir" nose \ + run_self_inside_as_cd "$name" "$user" "$cdir" pytest \ tests/unittests cloudinit/ || { - errorrc "nosetests failed."; - errors[${#errors[@]}]="nosetests" + errorrc "pytest failed."; + errors[${#errors[@]}]="pytest" } fi @@ -557,7 +557,7 @@ main() { } case "${1:-}" in - prep|os_info|wait_inside|nose) _n=$1; shift; "$_n" "$@";; + prep|os_info|wait_inside|pytest) _n=$1; shift; "$_n" "$@";; *) main "$@";; esac diff --git a/tools/tox-venv b/tools/tox-venv index a5d21625..9dd02460 100755 --- a/tools/tox-venv +++ b/tools/tox-venv @@ -116,7 +116,7 @@ Usage: ${0##*/} [--no-create] tox-environment [command [args]] be read from tox.ini. This allows you to do: tox-venv py27 - tests/some/sub/dir and have the 'command' read correctly and have that execute: - python -m nose tests/some/sub/dir + python -m pytest tests/some/sub/dir EOF if [ -f "$tox_ini" ]; then diff --git a/tox.ini b/tox.ini index 8612f034..8c994a05 100644 --- a/tox.ini +++ b/tox.ini @@ -1,13 +1,13 @@ [tox] -envlist = py3, xenial, pycodestyle, pyflakes, pylint +envlist = py3, xenial-dev, pycodestyle, pyflakes, pylint recreate = True [testenv] -commands = python -m nose {posargs:tests/unittests cloudinit} +commands = {envpython} -m pytest {posargs:tests/unittests cloudinit} setenv = LC_ALL = en_US.utf-8 passenv= - NOSE_VERBOSE + PYTEST_ADDOPTS [testenv:pycodestyle] basepython = python3 @@ -32,23 +32,16 @@ commands = {envpython} -m pylint {posargs:cloudinit tests tools} [testenv:py3] basepython = python3 deps = - nose-timer -r{toxinidir}/test-requirements.txt -commands = {envpython} -m nose --with-timer --timer-top-n 10 \ - {posargs:--with-coverage --cover-erase --cover-branches \ - --cover-inclusive --cover-package=cloudinit \ +commands = {envpython} -m pytest \ + --durations 10 \ + {posargs:--cov=cloudinit --cov-branch \ tests/unittests cloudinit} [testenv:py27] basepython = python2.7 deps = -r{toxinidir}/test-requirements.txt -[testenv:py26] -deps = -r{toxinidir}/test-requirements.txt -commands = nosetests {posargs:tests/unittests cloudinit} -setenv = - LC_ALL = C - [flake8] #H102 Apache 2.0 license header not found ignore=H404,H405,H105,H301,H104,H403,H101,H102,H106,H304 @@ -62,11 +55,15 @@ commands = {envpython} -m sphinx {posargs:doc/rtd doc/rtd_html} doc8 doc/rtd -[testenv:xenial] -commands = - python ./tools/pipremove jsonschema - python -m nose {posargs:tests/unittests cloudinit} -basepython = python3 +[xenial-shared-deps] +# The version of pytest in xenial doesn't work with Python 3.8, so we define +# two xenial environments: [testenv:xenial] runs the tests with exactly the +# version of pytest present in xenial, and is used in CI. [testenv:xenial-dev] +# runs the tests with the lowest version of pytest that works with Python 3.8, +# 3.0.7, but keeps the other dependencies at xenial's level. +# +# (This section is not a testenv, it is used to maintain a single definition of +# the dependencies shared between the two xenial testenvs.) deps = # requirements jinja2==2.8 @@ -83,38 +80,26 @@ deps = # test-requirements httpretty==0.9.6 mock==1.3.0 - nose==1.3.7 unittest2==1.1.0 contextlib2==0.5.1 -[testenv:centos6] -basepython = python2.6 -commands = nosetests {posargs:tests/unittests cloudinit} +[testenv:xenial] +commands = + python ./tools/pipremove jsonschema + python -m pytest {posargs:tests/unittests cloudinit} +basepython = python3 deps = - # requirements - argparse==1.2.1 - jinja2==2.2.1 - pyyaml==3.10 - oauthlib==0.6.0 - configobj==4.6.0 - requests==2.6.0 - jsonpatch==1.2 - six==1.9.0 - -r{toxinidir}/test-requirements.txt + # Refer to the comment in [xenial-shared-deps] for details + {[xenial-shared-deps]deps} + pytest==2.8.7 -[testenv:opensusel150] -basepython = python2.7 -commands = nosetests {posargs:tests/unittests cloudinit} +[testenv:xenial-dev] +commands = {[testenv:xenial]commands} +basepython = {[testenv:xenial]basepython} deps = - # requirements - jinja2==2.10 - PyYAML==3.12 - oauthlib==2.0.6 - configobj==5.0.6 - requests==2.18.4 - jsonpatch==1.16 - six==1.11.0 - -r{toxinidir}/test-requirements.txt + # Refer to the comment in [xenial-shared-deps] for details + {[xenial-shared-deps]deps} + pytest==3.0.7 [testenv:tip-pycodestyle] commands = {envpython} -m pycodestyle {posargs:cloudinit/ tests/ tools/} -- cgit v1.2.3 From 64c33704853087c05c54e157fc42a340a4350159 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 10 Mar 2020 14:40:27 -0400 Subject: tox.ini: bump pyflakes version to 2.1.1 (#239) pyflakes versions older than 2.1.0 are incompatible with Python 3.8 (which is the Python version in the current Ubuntu development release). See https://github.com/PyCQA/pyflakes/issues/367 for details. 2.1.1 is the latest version ATM, so bump to that. --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 8c994a05..0f9db826 100644 --- a/tox.ini +++ b/tox.ini @@ -108,7 +108,7 @@ deps = pycodestyle [testenv:pyflakes] commands = {envpython} -m pyflakes {posargs:cloudinit/ tests/ tools/} deps = - pyflakes==1.6.0 + pyflakes==2.1.1 [testenv:tip-pyflakes] commands = {envpython} -m pyflakes {posargs:cloudinit/ tests/ tools/} -- cgit v1.2.3 From aea652990cc3d39e5b5e5dbddd1d8190a4621563 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 10 Mar 2020 14:10:07 -0600 Subject: workflows: CLA validation altered to fail status on pull_request (#164) Github api doesn't allow read-write access to labels or comments when running from a pull_request fork during CI. This restriction results in an API error message: "Resource not accessible by integration" If we want to run this action per pull_request, we need to convert the action to fail the PR status check and emit the required steps to sign the CLA to the console on the PR's failed status tab. --- .github/workflows/cla.yml | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml index 34e11c2d..757e8ff1 100644 --- a/.github/workflows/cla.yml +++ b/.github/workflows/cla.yml @@ -8,22 +8,22 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v1 - - run: | - echo "::set-env name=CLA_SIGNED::$(grep -q ': \"${{ github.actor }}\"' ./tools/.lp-to-git-user && echo CLA signed || echo CLA not signed)" - - name: Add CLA label + - name: Check CLA signing status for ${{ github.actor}} run: | - # POST a new label to this issue - curl --request POST \ - --url https://api.github.com/repos/${{ github.repository }}/issues/${{ github.event.number }}/labels \ - --header 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' \ - --header 'content-type: application/json' \ - --data '{"labels": ["${{env.CLA_SIGNED}}"]}' - - name: Comment about CLA signing - if: env.CLA_SIGNED == 'CLA not signed' - run: | - # POST a comment directing submitter to sign the CLA - curl --request POST \ - --url https://api.github.com/repos/${{ github.repository }}/issues/${{ github.event.number }}/comments \ - --header 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' \ - --header 'content-type: application/json' \ - --data '{"body": "Hello ${{ github.actor }},\n\nThank you for your contribution to cloud-init.\n\nIn order for us to merge this pull request, you need\nto have signed the Contributor License Agreement (CLA).\nPlease ensure that you have signed the CLA by following our\nhacking guide at:\n\nhttps://cloudinit.readthedocs.io/en/latest/topics/hacking.html\n\nThanks,\nYour friendly cloud-init upstream\n"}' + cat > unsigned-cla.txt < Date: Tue, 10 Mar 2020 22:49:52 -0400 Subject: tox.ini: use xenial version of jsonpatch in CI (#242) Now that we can distinguish between CI xenial dependencies and needed-to-run-on-dev-machine xenial depedencies, we can return to testing with the correct jsonpatch version. --- tox.ini | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tox.ini b/tox.ini index 0f9db826..d17f493d 100644 --- a/tox.ini +++ b/tox.ini @@ -72,10 +72,6 @@ deps = pyserial==3.0.1 configobj==5.0.6 requests==2.9.1 - # jsonpatch in xenial is 1.10, not 1.19 (#839779). The oldest version - # to work with python3.6 is 1.16 as found in Artful. To keep default - # invocation of 'tox' happy, accept the difference in version here. - jsonpatch==1.16 six==1.10.0 # test-requirements httpretty==0.9.6 @@ -91,6 +87,7 @@ basepython = python3 deps = # Refer to the comment in [xenial-shared-deps] for details {[xenial-shared-deps]deps} + jsonpatch==1.10 pytest==2.8.7 [testenv:xenial-dev] @@ -99,6 +96,10 @@ basepython = {[testenv:xenial]basepython} deps = # Refer to the comment in [xenial-shared-deps] for details {[xenial-shared-deps]deps} + # jsonpatch in xenial is 1.10, not 1.19 (#839779). The oldest version + # to work with python3.6 is 1.16 as found in Artful. To keep default + # invocation of 'tox' happy, accept the difference in version here. + jsonpatch==1.16 pytest==3.0.7 [testenv:tip-pycodestyle] -- cgit v1.2.3 From a4d64e21f376be42b78d4a45d804f72dbba30fb0 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 10 Mar 2020 23:11:42 -0400 Subject: workflows/cla.yml: use correct username for CLA check (#243) Instead of using the username that triggered the action (which, in the case of a committer merging master into a PR branch will be the committer), always use the username of the submitter of the pull request. --- .github/workflows/cla.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml index 757e8ff1..389eccb8 100644 --- a/.github/workflows/cla.yml +++ b/.github/workflows/cla.yml @@ -8,10 +8,10 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v1 - - name: Check CLA signing status for ${{ github.actor}} + - name: Check CLA signing status for ${{ github.event.pull_request.user.login }} run: | cat > unsigned-cla.txt < Date: Wed, 11 Mar 2020 10:33:57 -0400 Subject: Introduce and use of a list of GitHub usernames that have signed CLA (#244) The list so far is partial. --- .github/workflows/cla.yml | 17 ++++++++++++++--- tools/.github-cla-signers | 1 + 2 files changed, 15 insertions(+), 3 deletions(-) create mode 100644 tools/.github-cla-signers diff --git a/.github/workflows/cla.yml b/.github/workflows/cla.yml index 389eccb8..8a0b2c07 100644 --- a/.github/workflows/cla.yml +++ b/.github/workflows/cla.yml @@ -24,6 +24,17 @@ jobs: Thanks, Your friendly cloud-init upstream EOF - grep -q ': \"${{ github.event.pull_request.user.login }}\"' ./tools/.lp-to-git-user && \ - echo "Thanks ${{ github.event.pull_request.user.login }} for signing cloud-init's CLA" || \ - (cat unsigned-cla.txt && exit 1) + + has_signed() { + username="$1" + grep -q ": \"$username\"" ./tools/.lp-to-git-user && return 0 + grep -q "^$username$" ./tools/.github-cla-signers && return 0 + return 1 + } + + if has_signed "${{ github.event.pull_request.user.login }}"; then + echo "Thanks ${{ github.event.pull_request.user.login }} for signing cloud-init's CLA" + else + cat unsigned-cla.txt + exit 1 + fi diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers new file mode 100644 index 00000000..48ac33e4 --- /dev/null +++ b/tools/.github-cla-signers @@ -0,0 +1 @@ +dhensby -- cgit v1.2.3 From d5dbbd1c54844a41067c673e915ada629fb4cd8f Mon Sep 17 00:00:00 2001 From: Daniel Hensby Date: Wed, 11 Mar 2020 14:55:30 +0000 Subject: Add pub_key_ed25519 to cc_phone_home (#237) --- cloudinit/config/cc_phone_home.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index b8e27090..733c3910 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -19,6 +19,7 @@ keys to post. Available keys are: - ``pub_key_dsa`` - ``pub_key_rsa`` - ``pub_key_ecdsa`` + - ``pub_key_ed25519`` - ``instance_id`` - ``hostname`` - ``fdqn`` @@ -52,6 +53,7 @@ POST_LIST_ALL = [ 'pub_key_dsa', 'pub_key_rsa', 'pub_key_ecdsa', + 'pub_key_ed25519', 'instance_id', 'hostname', 'fqdn' @@ -105,6 +107,7 @@ def handle(name, cfg, cloud, log, args): 'pub_key_dsa': '/etc/ssh/ssh_host_dsa_key.pub', 'pub_key_rsa': '/etc/ssh/ssh_host_rsa_key.pub', 'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub', + 'pub_key_ed25519': '/etc/ssh/ssh_host_ed25519_key.pub', } for (n, path) in pubkeys.items(): -- cgit v1.2.3 From 65a1b907c336786bce3917fad3f87c67f0caa7bf Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 11 Mar 2020 14:27:18 -0400 Subject: tox.ini: avoid substition syntax that causes a traceback on xenial (#245) See the added comment for details. --- tox.ini | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index d17f493d..7591d9f3 100644 --- a/tox.ini +++ b/tox.ini @@ -80,6 +80,8 @@ deps = contextlib2==0.5.1 [testenv:xenial] +# When updating this commands definition, also update the definition in +# [testenv:xenial-dev]. See the comment there for details. commands = python ./tools/pipremove jsonschema python -m pytest {posargs:tests/unittests cloudinit} @@ -91,7 +93,15 @@ deps = pytest==2.8.7 [testenv:xenial-dev] -commands = {[testenv:xenial]commands} +# This should be: +# commands = {[testenv:xenial]commands} +# but the version of pytest in xenial has a bug +# (https://github.com/tox-dev/tox/issues/208) which means that the {posargs} +# substitution variable is misparsed and causes a traceback. Ensure that any +# changes here are reflected in [testenv:xenial]. +commands = + python ./tools/pipremove jsonschema + python -m pytest {posargs:tests/unittests cloudinit} basepython = {[testenv:xenial]basepython} deps = # Refer to the comment in [xenial-shared-deps] for details -- cgit v1.2.3 From 94838def772349387e16cc642b3642020e22deda Mon Sep 17 00:00:00 2001 From: Gonéri Le Bouder Date: Thu, 12 Mar 2020 14:37:08 -0400 Subject: Add Netbsd support (#62) Add support for the NetBSD Operating System. Features in this branch: * Add BSD distro parent class from which NetBSD and FreeBSD can specialize * Add *bsd util functions to cloudinit.net and cloudinit.net.bsd_utils * subclass cloudinit.distro.freebsd.Distro from bsd.Distro * Add new cloudinit.distro.netbsd and cloudinit.net.renderer for netbsd * Add lru_cached util.is_NetBSD functions * Add NetBSD detection for ConfigDrive and NoCloud datasources This branch has been tested with: - NoCloud and OpenStack (with and without config-drive) - NetBSD 8.1. and 9.0 - FreeBSD 11.2 and 12.1 - Python 3.7 only, because of the dependency oncrypt.METHOD_BLOWFISH. This version is available in NetBSD 7, 8 and 9 anyway --- cloudinit/distros/bsd.py | 111 ++++++++++++++++ cloudinit/distros/bsd_utils.py | 50 ++++++++ cloudinit/distros/freebsd.py | 133 +++---------------- cloudinit/distros/netbsd.py | 133 +++++++++++++++++++ cloudinit/net/__init__.py | 27 ++++ cloudinit/net/bsd.py | 165 ++++++++++++++++++++++++ cloudinit/net/freebsd.py | 169 ++++--------------------- cloudinit/net/netbsd.py | 42 ++++++ cloudinit/net/renderers.py | 4 +- cloudinit/netinfo.py | 52 +++++++- cloudinit/sources/DataSourceConfigDrive.py | 8 +- cloudinit/sources/DataSourceNoCloud.py | 8 ++ cloudinit/tests/helpers.py | 1 + cloudinit/tests/test_util.py | 34 +++-- cloudinit/util.py | 33 ++++- config/cloud.cfg.tmpl | 30 ++++- doc/rtd/topics/network-config.rst | 2 +- setup.py | 4 +- sysvinit/netbsd/cloudconfig | 17 +++ sysvinit/netbsd/cloudfinal | 16 +++ sysvinit/netbsd/cloudinit | 16 +++ sysvinit/netbsd/cloudinitlocal | 18 +++ tests/unittests/test_distros/test_bsd_utils.py | 66 ++++++++++ tools/build-on-netbsd | 37 ++++++ tools/render-cloudcfg | 4 +- 25 files changed, 884 insertions(+), 296 deletions(-) create mode 100644 cloudinit/distros/bsd.py create mode 100644 cloudinit/distros/bsd_utils.py create mode 100644 cloudinit/distros/netbsd.py create mode 100644 cloudinit/net/bsd.py create mode 100644 cloudinit/net/netbsd.py create mode 100755 sysvinit/netbsd/cloudconfig create mode 100755 sysvinit/netbsd/cloudfinal create mode 100755 sysvinit/netbsd/cloudinit create mode 100755 sysvinit/netbsd/cloudinitlocal create mode 100644 tests/unittests/test_distros/test_bsd_utils.py create mode 100755 tools/build-on-netbsd diff --git a/cloudinit/distros/bsd.py b/cloudinit/distros/bsd.py new file mode 100644 index 00000000..e9b84edc --- /dev/null +++ b/cloudinit/distros/bsd.py @@ -0,0 +1,111 @@ +import platform + +from cloudinit import distros +from cloudinit.distros import bsd_utils +from cloudinit import helpers +from cloudinit import log as logging +from cloudinit import net +from cloudinit import util + +LOG = logging.getLogger(__name__) + + +class BSD(distros.Distro): + hostname_conf_fn = '/etc/rc.conf' + rc_conf_fn = "/etc/rc.conf" + + # Set in BSD distro subclasses + group_add_cmd_prefix = [] + pkg_cmd_install_prefix = [] + pkg_cmd_remove_prefix = [] + + def __init__(self, name, cfg, paths): + super().__init__(name, cfg, paths) + # This will be used to restrict certain + # calls from repeatly happening (when they + # should only happen say once per instance...) + self._runner = helpers.Runners(paths) + cfg['ssh_svcname'] = 'sshd' + self.osfamily = platform.system().lower() + + def _read_system_hostname(self): + sys_hostname = self._read_hostname(self.hostname_conf_fn) + return (self.hostname_conf_fn, sys_hostname) + + def _read_hostname(self, filename, default=None): + return bsd_utils.get_rc_config_value('hostname') + + def _get_add_member_to_group_cmd(self, member_name, group_name): + raise NotImplementedError('Return list cmd to add member to group') + + def _write_hostname(self, hostname, filename): + bsd_utils.set_rc_config_value('hostname', hostname, fn='/etc/rc.conf') + + def create_group(self, name, members=None): + if util.is_group(name): + LOG.warning("Skipping creation of existing group '%s'", name) + else: + group_add_cmd = self.group_add_cmd_prefix + [name] + try: + util.subp(group_add_cmd) + LOG.info("Created new group %s", name) + except Exception: + util.logexc(LOG, "Failed to create group %s", name) + + if not members: + members = [] + for member in members: + if not util.is_user(member): + LOG.warning("Unable to add group member '%s' to group '%s'" + "; user does not exist.", member, name) + continue + try: + util.subp(self._get_add_member_to_group_cmd(member, name)) + LOG.info("Added user '%s' to group '%s'", member, name) + except Exception: + util.logexc(LOG, "Failed to add user '%s' to group '%s'", + member, name) + + def generate_fallback_config(self): + nconf = {'config': [], 'version': 1} + for mac, name in net.get_interfaces_by_mac().items(): + nconf['config'].append( + {'type': 'physical', 'name': name, + 'mac_address': mac, 'subnets': [{'type': 'dhcp'}]}) + return nconf + + def install_packages(self, pkglist): + self.update_package_sources() + self.package_command('install', pkgs=pkglist) + + def _get_pkg_cmd_environ(self): + """Return environment vars used in *BSD package_command operations""" + raise NotImplementedError('BSD subclasses return a dict of env vars') + + def package_command(self, command, args=None, pkgs=None): + if pkgs is None: + pkgs = [] + + # TODO neither freebsd nor netbsd handles a command 'upgrade' + # provided by cloudinit/config/cc_package_update_upgrade_install.py + if command == 'install': + cmd = self.pkg_cmd_install_prefix + elif command == 'remove': + cmd = self.pkg_cmd_remove_prefix + + if args and isinstance(args, str): + cmd.append(args) + elif args and isinstance(args, list): + cmd.extend(args) + + pkglist = util.expand_package_list('%s-%s', pkgs) + cmd.extend(pkglist) + + # Allow the output of this to flow outwards (ie not be captured) + util.subp(cmd, env=self._get_pkg_cmd_environ(), capture=False) + + def _write_network_config(self, netconfig): + return self._supported_write_network_config(netconfig) + + def set_timezone(self, tz): + distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) diff --git a/cloudinit/distros/bsd_utils.py b/cloudinit/distros/bsd_utils.py new file mode 100644 index 00000000..079d0d53 --- /dev/null +++ b/cloudinit/distros/bsd_utils.py @@ -0,0 +1,50 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import shlex + +from cloudinit import util + +# On NetBSD, /etc/rc.conf comes with a if block: +# if [ -r /etc/defaults/rc.conf ]; then +# as a consequence, the file is not a regular key/value list +# anymore and we cannot use cloudinit.distros.parsers.sys_conf +# The module comes with a more naive parser, but is able to +# preserve these if blocks. + + +def _unquote(value): + if value[0] == value[-1] and value[0] in ['"', "'"]: + return value[1:-1] + return value + + +def get_rc_config_value(key, fn='/etc/rc.conf'): + key_prefix = '{}='.format(key) + for line in util.load_file(fn).splitlines(): + if line.startswith(key_prefix): + value = line.replace(key_prefix, '') + return _unquote(value) + + +def set_rc_config_value(key, value, fn='/etc/rc.conf'): + lines = [] + done = False + value = shlex.quote(value) + original_content = util.load_file(fn) + for line in original_content.splitlines(): + if '=' in line: + k, v = line.split('=', 1) + if k == key: + v = value + done = True + lines.append('='.join([k, v])) + else: + lines.append(line) + if not done: + lines.append('='.join([key, value])) + new_content = '\n'.join(lines) + '\n' + if new_content != original_content: + util.write_file(fn, new_content) + + +# vi: ts=4 expandtab diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 026d1142..a775ae51 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -8,34 +8,22 @@ import os import re from io import StringIO -from cloudinit import distros -from cloudinit import helpers +import cloudinit.distros.bsd from cloudinit import log as logging -from cloudinit import net -from cloudinit import ssh_util from cloudinit import util -from cloudinit.distros import rhel_util from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) -class Distro(distros.Distro): +class Distro(cloudinit.distros.bsd.BSD): usr_lib_exec = '/usr/local/lib' - rc_conf_fn = "/etc/rc.conf" login_conf_fn = '/etc/login.conf' login_conf_fn_bak = '/etc/login.conf.orig' ci_sudoers_fn = '/usr/local/etc/sudoers.d/90-cloud-init-users' - hostname_conf_fn = '/etc/rc.conf' - - def __init__(self, name, cfg, paths): - distros.Distro.__init__(self, name, cfg, paths) - # This will be used to restrict certain - # calls from repeatly happening (when they - # should only happen say once per instance...) - self._runner = helpers.Runners(paths) - self.osfamily = 'freebsd' - cfg['ssh_svcname'] = 'sshd' + group_add_cmd_prefix = ['pw', 'group', 'add'] + pkg_cmd_install_prefix = ["pkg", "install"] + pkg_cmd_remove_prefix = ["pkg", "remove"] def _select_hostname(self, hostname, fqdn): # Should be FQDN if available. See rc.conf(5) in FreeBSD @@ -43,45 +31,8 @@ class Distro(distros.Distro): return fqdn return hostname - def _read_system_hostname(self): - sys_hostname = self._read_hostname(self.hostname_conf_fn) - return (self.hostname_conf_fn, sys_hostname) - - def _read_hostname(self, filename, default=None): - (_exists, contents) = rhel_util.read_sysconfig_file(filename) - if contents.get('hostname'): - return contents['hostname'] - else: - return default - - def _write_hostname(self, hostname, filename): - rhel_util.update_sysconfig_file(filename, {'hostname': hostname}) - - def create_group(self, name, members): - group_add_cmd = ['pw', 'group', 'add', name] - if util.is_group(name): - LOG.warning("Skipping creation of existing group '%s'", name) - else: - try: - util.subp(group_add_cmd) - LOG.info("Created new group %s", name) - except Exception: - util.logexc(LOG, "Failed to create group %s", name) - raise - if not members: - members = [] - - for member in members: - if not util.is_user(member): - LOG.warning("Unable to add group member '%s' to group '%s'" - "; user does not exist.", member, name) - continue - try: - util.subp(['pw', 'usermod', '-n', name, '-G', member]) - LOG.info("Added user '%s' to group '%s'", member, name) - except Exception: - util.logexc(LOG, "Failed to add user '%s' to group '%s'", - member, name) + def _get_add_member_to_group_cmd(self, member_name, group_name): + return ['pw', 'usermod', '-n', member_name, '-G', group_name] def add_user(self, name, **kwargs): if util.is_user(name): @@ -162,40 +113,8 @@ class Distro(distros.Distro): util.logexc(LOG, "Failed to lock user %s", name) raise - def create_user(self, name, **kwargs): - self.add_user(name, **kwargs) - - # Set password if plain-text password provided and non-empty - if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']: - self.set_passwd(name, kwargs['plain_text_passwd']) - - # Default locking down the account. 'lock_passwd' defaults to True. - # lock account unless lock_password is False. - if kwargs.get('lock_passwd', True): - self.lock_passwd(name) - - # Configure sudo access - if 'sudo' in kwargs and kwargs['sudo'] is not False: - self.write_sudo_rules(name, kwargs['sudo']) - - # Import SSH keys - if 'ssh_authorized_keys' in kwargs: - keys = set(kwargs['ssh_authorized_keys']) or [] - ssh_util.setup_user_keys(keys, name, options=None) - - def generate_fallback_config(self): - nconf = {'config': [], 'version': 1} - for mac, name in net.get_interfaces_by_mac().items(): - nconf['config'].append( - {'type': 'physical', 'name': name, - 'mac_address': mac, 'subnets': [{'type': 'dhcp'}]}) - return nconf - - def _write_network_config(self, netconfig): - return self._supported_write_network_config(netconfig) - def apply_locale(self, locale, out_fn=None): - # Adjust the locals value to the new value + # Adjust the locales value to the new value newconf = StringIO() for line in util.load_file(self.login_conf_fn).splitlines(): newconf.write(re.sub(r'^default:', @@ -225,39 +144,17 @@ class Distro(distros.Distro): # /etc/rc.conf a line with the following format: # ifconfig_OLDNAME_name=NEWNAME # FreeBSD network script will rename the interface automatically. - return - - def install_packages(self, pkglist): - self.update_package_sources() - self.package_command('install', pkgs=pkglist) - - def package_command(self, command, args=None, pkgs=None): - if pkgs is None: - pkgs = [] + pass + def _get_pkg_cmd_environ(self): + """Return environment vars used in *BSD package_command operations""" e = os.environ.copy() e['ASSUME_ALWAYS_YES'] = 'YES' - - cmd = ['pkg'] - if args and isinstance(args, str): - cmd.append(args) - elif args and isinstance(args, list): - cmd.extend(args) - - if command: - cmd.append(command) - - pkglist = util.expand_package_list('%s-%s', pkgs) - cmd.extend(pkglist) - - # Allow the output of this to flow outwards (ie not be captured) - util.subp(cmd, env=e, capture=False) - - def set_timezone(self, tz): - distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) + return e def update_package_sources(self): - self._runner.run("update-sources", self.package_command, - ["update"], freq=PER_INSTANCE) + self._runner.run( + "update-sources", self.package_command, + ["update"], freq=PER_INSTANCE) # vi: ts=4 expandtab diff --git a/cloudinit/distros/netbsd.py b/cloudinit/distros/netbsd.py new file mode 100644 index 00000000..353eb671 --- /dev/null +++ b/cloudinit/distros/netbsd.py @@ -0,0 +1,133 @@ +# Copyright (C) 2019-2020 Gonéri Le Bouder +# +# This file is part of cloud-init. See LICENSE file for license information. + +import crypt +import os +import platform +import six + +import cloudinit.distros.bsd +from cloudinit import log as logging +from cloudinit import util + +LOG = logging.getLogger(__name__) + + +class Distro(cloudinit.distros.bsd.BSD): + ci_sudoers_fn = '/usr/pkg/etc/sudoers.d/90-cloud-init-users' + + group_add_cmd_prefix = ["groupadd"] + pkg_cmd_install_prefix = ["pkg_add", "-U"] + pkg_cmd_remove_prefix = ['pkg_delete'] + + def _get_add_member_to_group_cmd(self, member_name, group_name): + return ['usermod', '-G', group_name, member_name] + + def add_user(self, name, **kwargs): + if util.is_user(name): + LOG.info("User %s already exists, skipping.", name) + return False + + adduser_cmd = ['useradd'] + log_adduser_cmd = ['useradd'] + + adduser_opts = { + "homedir": '-d', + "gecos": '-c', + "primary_group": '-g', + "groups": '-G', + "shell": '-s', + } + adduser_flags = { + "no_user_group": '--no-user-group', + "system": '--system', + "no_log_init": '--no-log-init', + } + + for key, val in kwargs.items(): + if (key in adduser_opts and val and + isinstance(val, six.string_types)): + adduser_cmd.extend([adduser_opts[key], val]) + + elif key in adduser_flags and val: + adduser_cmd.append(adduser_flags[key]) + log_adduser_cmd.append(adduser_flags[key]) + + if 'no_create_home' not in kwargs or 'system' not in kwargs: + adduser_cmd += ['-m'] + log_adduser_cmd += ['-m'] + + adduser_cmd += [name] + log_adduser_cmd += [name] + + # Run the command + LOG.info("Adding user %s", name) + try: + util.subp(adduser_cmd, logstring=log_adduser_cmd) + except Exception: + util.logexc(LOG, "Failed to create user %s", name) + raise + # Set the password if it is provided + # For security consideration, only hashed passwd is assumed + passwd_val = kwargs.get('passwd', None) + if passwd_val is not None: + self.set_passwd(name, passwd_val, hashed=True) + + def set_passwd(self, user, passwd, hashed=False): + if hashed: + hashed_pw = passwd + elif not hasattr(crypt, 'METHOD_BLOWFISH'): + # crypt.METHOD_BLOWFISH comes with Python 3.7 which is available + # on NetBSD 7 and 8. + LOG.error(( + 'Cannot set non-encrypted password for user %s. ' + 'Python >= 3.7 is required.'), user) + return + else: + method = crypt.METHOD_BLOWFISH # pylint: disable=E1101 + hashed_pw = crypt.crypt( + passwd, + crypt.mksalt(method)) + + try: + util.subp(['usermod', '-C', 'no', '-p', hashed_pw, user]) + except Exception: + util.logexc(LOG, "Failed to set password for %s", user) + raise + + def force_passwd_change(self, user): + try: + util.subp(['usermod', '-F', user]) + except Exception: + util.logexc(LOG, "Failed to set pw expiration for %s", user) + raise + + def lock_passwd(self, name): + try: + util.subp(['usermod', '-C', 'yes', name]) + except Exception: + util.logexc(LOG, "Failed to lock user %s", name) + raise + + def apply_locale(self, locale, out_fn=None): + LOG.debug('Cannot set the locale.') + + def apply_network_config_names(self, netconfig): + LOG.debug('NetBSD cannot rename network interface.') + + def _get_pkg_cmd_environ(self): + """Return environment vars used in *BSD package_command operations""" + os_release = platform.release() + os_arch = platform.machine() + e = os.environ.copy() + e['PKG_PATH'] = ( + 'http://cdn.netbsd.org/pub/pkgsrc/' + 'packages/NetBSD/%s/%s/All') % (os_arch, os_release) + return e + + def update_package_sources(self): + pass + + +# vi: ts=4 expandtab diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 1d5eb535..400d7870 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -334,10 +334,20 @@ def find_fallback_nic(blacklist_drivers=None): """Return the name of the 'fallback' network device.""" if util.is_FreeBSD(): return find_fallback_nic_on_freebsd(blacklist_drivers) + elif util.is_NetBSD(): + return find_fallback_nic_on_netbsd(blacklist_drivers) else: return find_fallback_nic_on_linux(blacklist_drivers) +def find_fallback_nic_on_netbsd(blacklist_drivers=None): + values = list(sorted( + get_interfaces_by_mac().values(), + key=natural_sort_key)) + if values: + return values[0] + + def find_fallback_nic_on_freebsd(blacklist_drivers=None): """Return the name of the 'fallback' network device on FreeBSD. @@ -799,6 +809,8 @@ def get_ib_interface_hwaddr(ifname, ethernet_format): def get_interfaces_by_mac(): if util.is_FreeBSD(): return get_interfaces_by_mac_on_freebsd() + elif util.is_NetBSD(): + return get_interfaces_by_mac_on_netbsd() else: return get_interfaces_by_mac_on_linux() @@ -830,6 +842,21 @@ def get_interfaces_by_mac_on_freebsd(): return results +def get_interfaces_by_mac_on_netbsd(): + ret = {} + re_field_match = ( + r"(?P\w+).*address:\s" + r"(?P([\da-f]{2}[:-]){5}([\da-f]{2})).*") + (out, _) = util.subp(['ifconfig', '-a']) + if_lines = re.sub(r'\n\s+', ' ', out).splitlines() + for line in if_lines: + m = re.match(re_field_match, line) + if m: + fields = m.groupdict() + ret[fields['mac']] = fields['ifname'] + return ret + + def get_interfaces_by_mac_on_linux(): """Build a dictionary of tuples {mac: name}. diff --git a/cloudinit/net/bsd.py b/cloudinit/net/bsd.py new file mode 100644 index 00000000..fb714d4c --- /dev/null +++ b/cloudinit/net/bsd.py @@ -0,0 +1,165 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import re + +from cloudinit import log as logging +from cloudinit import net +from cloudinit import util +from cloudinit.distros.parsers.resolv_conf import ResolvConf +from cloudinit.distros import bsd_utils + +from . import renderer + +LOG = logging.getLogger(__name__) + + +class BSDRenderer(renderer.Renderer): + resolv_conf_fn = 'etc/resolv.conf' + rc_conf_fn = 'etc/rc.conf' + + def get_rc_config_value(self, key): + fn = util.target_path(self.target, self.rc_conf_fn) + bsd_utils.get_rc_config_value(key, fn=fn) + + def set_rc_config_value(self, key, value): + fn = util.target_path(self.target, self.rc_conf_fn) + bsd_utils.set_rc_config_value(key, value, fn=fn) + + def __init__(self, config=None): + if not config: + config = {} + self.target = None + self.interface_configurations = {} + self._postcmds = config.get('postcmds', True) + + def _ifconfig_entries(self, settings, target=None): + ifname_by_mac = net.get_interfaces_by_mac() + for interface in settings.iter_interfaces(): + device_name = interface.get("name") + device_mac = interface.get("mac_address") + if device_name and re.match(r'^lo\d+$', device_name): + continue + if device_mac not in ifname_by_mac: + LOG.info('Cannot find any device with MAC %s', device_mac) + elif device_mac and device_name: + cur_name = ifname_by_mac[device_mac] + if cur_name != device_name: + LOG.info('netif service will rename interface %s to %s', + cur_name, device_name) + try: + self.rename_interface(cur_name, device_name) + except NotImplementedError: + LOG.error(( + 'Interface renaming is ' + 'not supported on this OS')) + device_name = cur_name + + else: + device_name = ifname_by_mac[device_mac] + + LOG.info('Configuring interface %s', device_name) + + self.interface_configurations[device_name] = 'DHCP' + + for subnet in interface.get("subnets", []): + if subnet.get('type') == 'static': + if not subnet.get('netmask'): + LOG.debug( + 'Skipping IP %s, because there is no netmask', + subnet.get('address')) + continue + LOG.debug('Configuring dev %s with %s / %s', device_name, + subnet.get('address'), subnet.get('netmask')) + + self.interface_configurations[device_name] = { + 'address': subnet.get('address'), + 'netmask': subnet.get('netmask'), + } + + def _route_entries(self, settings, target=None): + routes = list(settings.iter_routes()) + for interface in settings.iter_interfaces(): + subnets = interface.get("subnets", []) + for subnet in subnets: + if subnet.get('type') != 'static': + continue + gateway = subnet.get('gateway') + if gateway and len(gateway.split('.')) == 4: + routes.append({ + 'network': '0.0.0.0', + 'netmask': '0.0.0.0', + 'gateway': gateway}) + routes += subnet.get('routes', []) + for route in routes: + network = route.get('network') + if not network: + LOG.debug('Skipping a bad route entry') + continue + netmask = route.get('netmask') + gateway = route.get('gateway') + self.set_route(network, netmask, gateway) + + def _resolve_conf(self, settings, target=None): + nameservers = settings.dns_nameservers + searchdomains = settings.dns_searchdomains + for interface in settings.iter_interfaces(): + for subnet in interface.get("subnets", []): + if 'dns_nameservers' in subnet: + nameservers.extend(subnet['dns_nameservers']) + if 'dns_search' in subnet: + searchdomains.extend(subnet['dns_search']) + # Try to read the /etc/resolv.conf or just start from scratch if that + # fails. + try: + resolvconf = ResolvConf(util.load_file(util.target_path( + target, self.resolv_conf_fn))) + resolvconf.parse() + except IOError: + util.logexc(LOG, "Failed to parse %s, use new empty file", + util.target_path(target, self.resolv_conf_fn)) + resolvconf = ResolvConf('') + resolvconf.parse() + + # Add some nameservers + for server in nameservers: + try: + resolvconf.add_nameserver(server) + except ValueError: + util.logexc(LOG, "Failed to add nameserver %s", server) + + # And add any searchdomains. + for domain in searchdomains: + try: + resolvconf.add_search_domain(domain) + except ValueError: + util.logexc(LOG, "Failed to add search domain %s", domain) + util.write_file( + util.target_path(target, self.resolv_conf_fn), + str(resolvconf), 0o644) + + def render_network_state(self, network_state, templates=None, target=None): + self._ifconfig_entries(settings=network_state) + self._route_entries(settings=network_state) + self._resolve_conf(settings=network_state) + + self.write_config() + self.start_services(run=self._postcmds) + + def dhcp_interfaces(self): + ic = self.interface_configurations.items + return [k for k, v in ic() if v == 'DHCP'] + + def start_services(self, run=False): + raise NotImplementedError() + + def write_config(self, target=None): + raise NotImplementedError() + + def set_gateway(self, gateway): + raise NotImplementedError() + + def rename_interface(self, cur_name, device_name): + raise NotImplementedError() + + def set_route(self, network, netmask, gateway): + raise NotImplementedError() diff --git a/cloudinit/net/freebsd.py b/cloudinit/net/freebsd.py index d6f61da3..60f05bb2 100644 --- a/cloudinit/net/freebsd.py +++ b/cloudinit/net/freebsd.py @@ -1,156 +1,29 @@ # This file is part of cloud-init. See LICENSE file for license information. -import re - from cloudinit import log as logging -from cloudinit import net +import cloudinit.net.bsd from cloudinit import util -from cloudinit.distros import rhel_util -from cloudinit.distros.parsers.resolv_conf import ResolvConf - -from . import renderer LOG = logging.getLogger(__name__) -class Renderer(renderer.Renderer): - resolv_conf_fn = 'etc/resolv.conf' - rc_conf_fn = 'etc/rc.conf' +class Renderer(cloudinit.net.bsd.BSDRenderer): def __init__(self, config=None): - if not config: - config = {} - self.dhcp_interfaces = [] - self._postcmds = config.get('postcmds', True) - - def _update_rc_conf(self, settings, target=None): - fn = util.target_path(target, self.rc_conf_fn) - rhel_util.update_sysconfig_file(fn, settings) - - def _write_ifconfig_entries(self, settings, target=None): - ifname_by_mac = net.get_interfaces_by_mac() - for interface in settings.iter_interfaces(): - device_name = interface.get("name") - device_mac = interface.get("mac_address") - if device_name and re.match(r'^lo\d+$', device_name): - continue - if device_mac not in ifname_by_mac: - LOG.info('Cannot find any device with MAC %s', device_mac) - elif device_mac and device_name: - cur_name = ifname_by_mac[device_mac] - if cur_name != device_name: - LOG.info('netif service will rename interface %s to %s', - cur_name, device_name) - self._update_rc_conf( - {'ifconfig_%s_name' % cur_name: device_name}, - target=target) - else: - device_name = ifname_by_mac[device_mac] - - LOG.info('Configuring interface %s', device_name) - ifconfig = 'DHCP' # default - - for subnet in interface.get("subnets", []): - if ifconfig != 'DHCP': - LOG.info('The FreeBSD provider only set the first subnet.') - break - if subnet.get('type') == 'static': - if not subnet.get('netmask'): - LOG.debug( - 'Skipping IP %s, because there is no netmask', - subnet.get('address')) - continue - LOG.debug('Configuring dev %s with %s / %s', device_name, - subnet.get('address'), subnet.get('netmask')) - # Configure an ipv4 address. - ifconfig = ( - subnet.get('address') + ' netmask ' + - subnet.get('netmask')) - - if ifconfig == 'DHCP': - self.dhcp_interfaces.append(device_name) - self._update_rc_conf( - {'ifconfig_' + device_name: ifconfig}, - target=target) - - def _write_route_entries(self, settings, target=None): - routes = list(settings.iter_routes()) - for interface in settings.iter_interfaces(): - subnets = interface.get("subnets", []) - for subnet in subnets: - if subnet.get('type') != 'static': - continue - gateway = subnet.get('gateway') - if gateway and len(gateway.split('.')) == 4: - routes.append({ - 'network': '0.0.0.0', - 'netmask': '0.0.0.0', - 'gateway': gateway}) - routes += subnet.get('routes', []) - route_cpt = 0 - for route in routes: - network = route.get('network') - if not network: - LOG.debug('Skipping a bad route entry') - continue - netmask = route.get('netmask') - gateway = route.get('gateway') - route_cmd = "-route %s/%s %s" % (network, netmask, gateway) - if network == '0.0.0.0': - self._update_rc_conf( - {'defaultrouter': gateway}, target=target) + self._route_cpt = 0 + super(Renderer, self).__init__() + + def rename_interface(self, cur_name, device_name): + self.set_rc_config_value('ifconfig_%s_name' % cur_name, device_name) + + def write_config(self): + for device_name, v in self.interface_configurations.items(): + if isinstance(v, dict): + self.set_rc_config_value( + 'ifconfig_' + device_name, + v.get('address') + ' netmask ' + v.get('netmask')) else: - self._update_rc_conf( - {'route_net%d' % route_cpt: route_cmd}, target=target) - route_cpt += 1 - - def _write_resolve_conf(self, settings, target=None): - nameservers = settings.dns_nameservers - searchdomains = settings.dns_searchdomains - for interface in settings.iter_interfaces(): - for subnet in interface.get("subnets", []): - if 'dns_nameservers' in subnet: - nameservers.extend(subnet['dns_nameservers']) - if 'dns_search' in subnet: - searchdomains.extend(subnet['dns_search']) - # Try to read the /etc/resolv.conf or just start from scratch if that - # fails. - try: - resolvconf = ResolvConf(util.load_file(util.target_path( - target, self.resolv_conf_fn))) - resolvconf.parse() - except IOError: - util.logexc(LOG, "Failed to parse %s, use new empty file", - util.target_path(target, self.resolv_conf_fn)) - resolvconf = ResolvConf('') - resolvconf.parse() - - # Add some nameservers - for server in nameservers: - try: - resolvconf.add_nameserver(server) - except ValueError: - util.logexc(LOG, "Failed to add nameserver %s", server) - - # And add any searchdomains. - for domain in searchdomains: - try: - resolvconf.add_search_domain(domain) - except ValueError: - util.logexc(LOG, "Failed to add search domain %s", domain) - util.write_file( - util.target_path(target, self.resolv_conf_fn), - str(resolvconf), 0o644) - - def _write_network(self, settings, target=None): - self._write_ifconfig_entries(settings, target=target) - self._write_route_entries(settings, target=target) - self._write_resolve_conf(settings, target=target) - - self.start_services(run=self._postcmds) - - def render_network_state(self, network_state, templates=None, target=None): - self._write_network(network_state, target=target) + self.set_rc_config_value('ifconfig_' + device_name, 'DHCP') def start_services(self, run=False): if not run: @@ -165,11 +38,21 @@ class Renderer(renderer.Renderer): # - dhclient: it cannot stop the dhclient started by the netif service. # In both case, the situation is ok, and we can proceed. util.subp(['service', 'routing', 'restart'], capture=True, rcs=[0, 1]) - for dhcp_interface in self.dhcp_interfaces: + + for dhcp_interface in self.dhcp_interfaces(): util.subp(['service', 'dhclient', 'restart', dhcp_interface], rcs=[0, 1], capture=True) + def set_route(self, network, netmask, gateway): + if network == '0.0.0.0': + self.set_rc_config_value('defaultrouter', gateway) + else: + route_name = 'route_net%d' % self._route_cpt + route_cmd = "-route %s/%s %s" % (network, netmask, gateway) + self.set_rc_config_value(route_name, route_cmd) + self._route_cpt += 1 + def available(target=None): return util.is_FreeBSD() diff --git a/cloudinit/net/netbsd.py b/cloudinit/net/netbsd.py new file mode 100644 index 00000000..9cc8ef31 --- /dev/null +++ b/cloudinit/net/netbsd.py @@ -0,0 +1,42 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import log as logging +from cloudinit import util +import cloudinit.net.bsd + +LOG = logging.getLogger(__name__) + + +class Renderer(cloudinit.net.bsd.BSDRenderer): + + def __init__(self, config=None): + super(Renderer, self).__init__() + + def write_config(self): + if self.dhcp_interfaces(): + self.set_rc_config_value('dhcpcd', 'YES') + self.set_rc_config_value( + 'dhcpcd_flags', + ' '.join(self.dhcp_interfaces())) + for device_name, v in self.interface_configurations.items(): + if isinstance(v, dict): + self.set_rc_config_value( + 'ifconfig_' + device_name, + v.get('address') + ' netmask ' + v.get('netmask')) + + def start_services(self, run=False): + if not run: + LOG.debug("netbsd generate postcmd disabled") + return + + util.subp(['service', 'network', 'restart'], capture=True) + if self.dhcp_interfaces(): + util.subp(['service', 'dhcpcd', 'restart'], capture=True) + + def set_route(self, network, netmask, gateway): + if network == '0.0.0.0': + self.set_rc_config_value('defaultroute', gateway) + + +def available(target=None): + return util.is_NetBSD() diff --git a/cloudinit/net/renderers.py b/cloudinit/net/renderers.py index b98dbbe3..e4bcae9d 100644 --- a/cloudinit/net/renderers.py +++ b/cloudinit/net/renderers.py @@ -2,6 +2,7 @@ from . import eni from . import freebsd +from . import netbsd from . import netplan from . import RendererNotFoundError from . import sysconfig @@ -9,11 +10,12 @@ from . import sysconfig NAME_TO_RENDERER = { "eni": eni, "freebsd": freebsd, + "netbsd": netbsd, "netplan": netplan, "sysconfig": sysconfig, } -DEFAULT_PRIORITY = ["eni", "sysconfig", "netplan", "freebsd"] +DEFAULT_PRIORITY = ["eni", "sysconfig", "netplan", "freebsd", "netbsd"] def search(priority=None, target=None, first=False): diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index 6ba21f4d..1001f149 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -91,6 +91,53 @@ def _netdev_info_iproute(ipaddr_out): return devs +def _netdev_info_ifconfig_netbsd(ifconfig_data): + # fields that need to be returned in devs for each dev + devs = {} + for line in ifconfig_data.splitlines(): + if len(line) == 0: + continue + if line[0] not in ("\t", " "): + curdev = line.split()[0] + # current ifconfig pops a ':' on the end of the device + if curdev.endswith(':'): + curdev = curdev[:-1] + if curdev not in devs: + devs[curdev] = deepcopy(DEFAULT_NETDEV_INFO) + toks = line.lower().strip().split() + if len(toks) > 1: + if re.search(r"flags=[x\d]+", toks[1]): + devs[curdev]['up'] = True + + for i in range(len(toks)): + if toks[i] == "inet": # Create new ipv4 addr entry + network, net_bits = toks[i + 1].split('/') + devs[curdev]['ipv4'].append( + {'ip': network, 'mask': net_prefix_to_ipv4_mask(net_bits)}) + elif toks[i] == "broadcast": + devs[curdev]['ipv4'][-1]['bcast'] = toks[i + 1] + elif toks[i] == "address:": + devs[curdev]['hwaddr'] = toks[i + 1] + elif toks[i] == "inet6": + if toks[i + 1] == "addr:": + devs[curdev]['ipv6'].append({'ip': toks[i + 2]}) + else: + devs[curdev]['ipv6'].append({'ip': toks[i + 1]}) + elif toks[i] == "prefixlen": # Add prefix to current ipv6 value + addr6 = devs[curdev]['ipv6'][-1]['ip'] + "/" + toks[i + 1] + devs[curdev]['ipv6'][-1]['ip'] = addr6 + elif toks[i].startswith("scope:"): + devs[curdev]['ipv6'][-1]['scope6'] = toks[i].lstrip("scope:") + elif toks[i] == "scopeid": + res = re.match(r'.*<(\S+)>', toks[i + 1]) + if res: + devs[curdev]['ipv6'][-1]['scope6'] = res.group(1) + else: + devs[curdev]['ipv6'][-1]['scope6'] = toks[i + 1] + + return devs + + def _netdev_info_ifconfig(ifconfig_data): # fields that need to be returned in devs for each dev devs = {} @@ -149,7 +196,10 @@ def _netdev_info_ifconfig(ifconfig_data): def netdev_info(empty=""): devs = {} - if util.which('ip'): + if util.is_NetBSD(): + (ifcfg_out, _err) = util.subp(["ifconfig", "-a"], rcs=[0, 1]) + devs = _netdev_info_ifconfig_netbsd(ifcfg_out) + elif util.which('ip'): # Try iproute first of all (ipaddr_out, _err) = util.subp(["ip", "addr", "show"]) devs = _netdev_info_iproute(ipaddr_out) diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index f77923c2..dee8cde4 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -71,11 +71,11 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): if not found: dslist = self.sys_cfg.get('datasource_list') for dev in find_candidate_devs(dslist=dslist): - try: - if util.is_FreeBSD() and dev.startswith("/dev/cd"): + mtype = None + if (util.is_FreeBSD() or util.is_NetBSD()): + if dev.startswith("/dev/cd"): mtype = "cd9660" - else: - mtype = None + try: results = util.mount_cb(dev, read_config_drive, mtype=mtype) found = dev diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index ee748b41..2a44128d 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -40,6 +40,14 @@ class DataSourceNoCloud(sources.DataSource): devlist = [ p for p in ['/dev/msdosfs/' + label, '/dev/iso9660/' + label] if os.path.exists(p)] + elif util.is_NetBSD(): + out, _err = util.subp(['sysctl', '-n', 'hw.disknames'], rcs=[0]) + devlist = [] + for dev in out.split(): + mscdlabel_out, _ = util.subp(['mscdlabel', dev], rcs=[0, 1]) + if ('label "%s"' % label) in mscdlabel_out: + devlist.append('/dev/' + dev) + devlist.append('/dev/' + dev + 'a') # NetBSD 7 else: # Query optical drive to get it in blkid cache for 2.6 kernels util.find_devs_with(path="/dev/sr0") diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 70f6bad7..8adb9e75 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -363,6 +363,7 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): root = self.tmp_dir() self.patchUtils(root) self.patchOS(root) + self.patchOpen(root) return root @contextmanager diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index 11f37000..815da0fd 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -419,12 +419,6 @@ class TestGetLinuxDistro(CiTestCase): if path == '/etc/redhat-release': return 1 - @classmethod - def freebsd_version_exists(self, path): - """Side effect function """ - if path == '/bin/freebsd-version': - return 1 - @mock.patch('cloudinit.util.load_file') def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists): """Verify we get the correct name if the os-release file has @@ -443,11 +437,16 @@ class TestGetLinuxDistro(CiTestCase): dist = util.get_linux_distro() self.assertEqual(('ubuntu', '16.04', 'xenial'), dist) - @mock.patch('cloudinit.util.subp') - def test_get_linux_freebsd(self, m_subp, m_path_exists): + @mock.patch('platform.system') + @mock.patch('platform.release') + @mock.patch('cloudinit.util._parse_redhat_release') + def test_get_linux_freebsd(self, m_path_exists, m_platform_release, + m_platform_system, m_parse_redhat_release): """Verify we get the correct name and release name on FreeBSD.""" - m_path_exists.side_effect = TestGetLinuxDistro.freebsd_version_exists - m_subp.return_value = ("12.0-RELEASE-p10\n", '') + m_path_exists.return_value = False + m_platform_release.return_value = '12.0-RELEASE-p10' + m_platform_system.return_value = 'FreeBSD' + m_parse_redhat_release.return_value = {} dist = util.get_linux_distro() self.assertEqual(('freebsd', '12.0-RELEASE-p10', ''), dist) @@ -538,27 +537,36 @@ class TestGetLinuxDistro(CiTestCase): self.assertEqual( ('opensuse-tumbleweed', '20180920', platform.machine()), dist) + @mock.patch('platform.system') @mock.patch('platform.dist', create=True) - def test_get_linux_distro_no_data(self, m_platform_dist, m_path_exists): + def test_get_linux_distro_no_data(self, m_platform_dist, + m_platform_system, m_path_exists): """Verify we get no information if os-release does not exist""" m_platform_dist.return_value = ('', '', '') + m_platform_system.return_value = "Linux" m_path_exists.return_value = 0 dist = util.get_linux_distro() self.assertEqual(('', '', ''), dist) + @mock.patch('platform.system') @mock.patch('platform.dist', create=True) - def test_get_linux_distro_no_impl(self, m_platform_dist, m_path_exists): + def test_get_linux_distro_no_impl(self, m_platform_dist, + m_platform_system, m_path_exists): """Verify we get an empty tuple when no information exists and Exceptions are not propagated""" m_platform_dist.side_effect = Exception() + m_platform_system.return_value = "Linux" m_path_exists.return_value = 0 dist = util.get_linux_distro() self.assertEqual(('', '', ''), dist) + @mock.patch('platform.system') @mock.patch('platform.dist', create=True) - def test_get_linux_distro_plat_data(self, m_platform_dist, m_path_exists): + def test_get_linux_distro_plat_data(self, m_platform_dist, + m_platform_system, m_path_exists): """Verify we get the correct platform information""" m_platform_dist.return_value = ('foo', '1.1', 'aarch64') + m_platform_system.return_value = "Linux" m_path_exists.return_value = 0 dist = util.get_linux_distro() self.assertEqual(('foo', '1.1', 'aarch64'), dist) diff --git a/cloudinit/util.py b/cloudinit/util.py index 132f6051..718c6959 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -552,6 +552,11 @@ def is_FreeBSD(): return system_info()['variant'] == "freebsd" +@lru_cache() +def is_NetBSD(): + return system_info()['variant'] == "netbsd" + + def get_cfg_option_bool(yobj, key, default=False): if key not in yobj: return default @@ -625,10 +630,9 @@ def get_linux_distro(): flavor = match.groupdict()['codename'] if distro_name == 'rhel': distro_name = 'redhat' - elif os.path.exists('/bin/freebsd-version'): - distro_name = 'freebsd' - distro_version, _ = subp(['uname', '-r']) - distro_version = distro_version.strip() + elif 'BSD' in platform.system(): + distro_name = platform.system().lower() + distro_version = platform.release() else: dist = ('', '', '') try: @@ -675,7 +679,7 @@ def system_info(): var = 'suse' else: var = 'linux' - elif system in ('windows', 'darwin', "freebsd"): + elif system in ('windows', 'darwin', "freebsd", "netbsd"): var = system info['variant'] = var @@ -1254,6 +1258,21 @@ def close_stdin(): os.dup2(fp.fileno(), sys.stdin.fileno()) +def find_devs_with_netbsd(criteria=None, oformat='device', + tag=None, no_cache=False, path=None): + if not path: + path = "/dev/cd0" + cmd = ["mscdlabel", path] + out, _ = subp(cmd, capture=True, decode="replace", rcs=[0, 1]) + result = out.split() + if result and len(result) > 2: + if criteria == "TYPE=iso9660" and "ISO" in result: + return [path] + if criteria == "LABEL=CONFIG-2" and '"config-2"' in result: + return [path] + return [] + + def find_devs_with(criteria=None, oformat='device', tag=None, no_cache=False, path=None): """ @@ -1263,6 +1282,10 @@ def find_devs_with(criteria=None, oformat='device', LABEL=