From 4f940bd1f76f50f947af533661ba6fafa3e60e59 Mon Sep 17 00:00:00 2001 From: "Mark T. Voelker" Date: Thu, 27 Feb 2020 14:08:14 -0500 Subject: Fix docs for OpenStack DMI Asset Tag (#228) In cloud-init 19.2, we added the ability for cloud-init to detect OpenStack platforms by checking for "OpenStack Compute" or "OpenStack Nova" in the chassis asset tag. However, this was never reflected in the documentation. This patch updates the datasources documentation for OpenStack to reflect the possibility of using the chassis asset tag. LP: #1669875 --- doc/rtd/topics/datasources/openstack.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'doc/rtd') diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst index 8ce2a53d..ff817e45 100644 --- a/doc/rtd/topics/datasources/openstack.rst +++ b/doc/rtd/topics/datasources/openstack.rst @@ -19,7 +19,8 @@ checks the following environment attributes as a potential OpenStack platform: * **/proc/1/environ**: Nova-lxd contains *product_name=OpenStack Nova* * **DMI product_name**: Either *Openstack Nova* or *OpenStack Compute* - * **DMI chassis_asset_tag** is *OpenTelekomCloud* + * **DMI chassis_asset_tag** is *OpenTelekomCloud* or *OpenStack Nova* + (since 19.2) or *OpenStack Compute* (since 19.2) Configuration -- cgit v1.2.3 From 0140f74dd577c6407197eb82ca47ad5f07cac4f4 Mon Sep 17 00:00:00 2001 From: Nick Wales <588472+nickwales@users.noreply.github.com> Date: Thu, 27 Feb 2020 13:34:24 -0600 Subject: Fixes typo on Amazon Web Services (#217) one line doc fix --- doc/rtd/topics/instancedata.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'doc/rtd') diff --git a/doc/rtd/topics/instancedata.rst b/doc/rtd/topics/instancedata.rst index e7dd0d62..4227c4fd 100644 --- a/doc/rtd/topics/instancedata.rst +++ b/doc/rtd/topics/instancedata.rst @@ -103,7 +103,7 @@ v1.cloud_name ------------- Where possible this will indicate the 'name' of the cloud the system is running on. This is different than the 'platform' item. For example, the cloud name of -Amazone Web Services is 'aws', while the platform is 'ec2'. +Amazon Web Services is 'aws', while the platform is 'ec2'. If determining a specific name is not possible or provided in meta-data, then this filed may contain the same content as 'platform'. -- cgit v1.2.3 From 67c8e53cc3fe007bb40d6e9c10549ca8200a9cd7 Mon Sep 17 00:00:00 2001 From: Alexey Vazhnov Date: Fri, 28 Feb 2020 01:16:29 +0300 Subject: docs: typo fixed: dta → data MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- doc/rtd/topics/format.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'doc/rtd') diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst index 2b60bdd3..e3e5f8aa 100644 --- a/doc/rtd/topics/format.rst +++ b/doc/rtd/topics/format.rst @@ -126,7 +126,7 @@ Begins with: ``#cloud-config`` or ``Content-Type: text/cloud-config`` when using a MIME archive. .. note:: - New in cloud-init v. 18.4: Cloud config dta can also render cloud instance + New in cloud-init v. 18.4: Cloud config data can also render cloud instance metadata variables using jinja templating. See :ref:`instance_metadata` for more information. -- cgit v1.2.3 From 1d2dfc5d879dc905f440697c2b805c9485dda821 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 4 Mar 2020 13:53:19 -0700 Subject: net: support network-config:disabled on the kernel commandline (#232) Allow disabling cloud-init's network configuration via a plain-text kernel cmdline Cloud-init docs indicate that users can disable cloud-init networking via kernel command line parameter 'network-config='. This does not work unless the payload base64 encoded. Document the base64 encoding requirement and add a plain-text value for disabling cloud-init network config: network-config=disabled Also: - Log an error and ignore any plain-text network-config payloads that are not specifically 'network-config=disabled'. - Log a warning if network-config kernel param is invalid yaml but do not raise an exception, allowing boot to continue and use fallback networking. LP: #1862702 --- cloudinit/net/cmdline.py | 34 ++++++++++++++++++++-------------- doc/rtd/topics/network-config.rst | 22 ++++++++++++++-------- tests/unittests/test_net.py | 17 +++++++++++++++++ 3 files changed, 51 insertions(+), 22 deletions(-) (limited to 'doc/rtd') diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py index 64e1c699..814ce411 100755 --- a/cloudinit/net/cmdline.py +++ b/cloudinit/net/cmdline.py @@ -10,6 +10,7 @@ import base64 import glob import gzip import io +import logging import os from cloudinit import util @@ -19,6 +20,8 @@ from . import read_sys_net_safe _OPEN_ISCSI_INTERFACE_FILE = "/run/initramfs/open-iscsi.interface" +KERNEL_CMDLINE_NETWORK_CONFIG_DISABLED = "disabled" + class InitramfsNetworkConfigSource(metaclass=abc.ABCMeta): """ABC for net config sources that read config written by initramfses""" @@ -233,34 +236,35 @@ def read_initramfs_config(): return None -def _decomp_gzip(blob, strict=True): - # decompress blob. raise exception if not compressed unless strict=False. +def _decomp_gzip(blob): + # decompress blob or return original blob with io.BytesIO(blob) as iobuf: gzfp = None try: gzfp = gzip.GzipFile(mode="rb", fileobj=iobuf) return gzfp.read() except IOError: - if strict: - raise return blob finally: if gzfp: gzfp.close() -def _b64dgz(b64str, gzipped="try"): - # decode a base64 string. If gzipped is true, transparently uncompresss - # if gzipped is 'try', then try gunzip, returning the original on fail. - try: - blob = base64.b64decode(b64str) - except TypeError: - raise ValueError("Invalid base64 text: %s" % b64str) +def _b64dgz(data): + """Decode a string base64 encoding, if gzipped, uncompress as well - if not gzipped: - return blob + :return: decompressed unencoded string of the data or empty string on + unencoded data. + """ + try: + blob = base64.b64decode(data) + except (TypeError, ValueError): + logging.error( + "Expected base64 encoded kernel commandline parameter" + " network-config. Ignoring network-config=%s.", data) + return '' - return _decomp_gzip(blob, strict=gzipped != "try") + return _decomp_gzip(blob) def read_kernel_cmdline_config(cmdline=None): @@ -273,6 +277,8 @@ def read_kernel_cmdline_config(cmdline=None): if tok.startswith("network-config="): data64 = tok.split("=", 1)[1] if data64: + if data64 == KERNEL_CMDLINE_NETWORK_CONFIG_DISABLED: + return {"config": "disabled"} return util.load_yaml(_b64dgz(data64)) return None diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst index 1520ba9a..0144dfae 100644 --- a/doc/rtd/topics/network-config.rst +++ b/doc/rtd/topics/network-config.rst @@ -25,17 +25,23 @@ For example, OpenStack may provide network config in the MetaData Service. **System Config** -A ``network:`` entry in /etc/cloud/cloud.cfg.d/* configuration files. +A ``network:`` entry in ``/etc/cloud/cloud.cfg.d/*`` configuration files. **Kernel Command Line** -``ip=`` or ``network-config=`` +``ip=`` or ``network-config=`` User-data cannot change an instance's network configuration. In the absence of network configuration in any of the above sources , `Cloud-init`_ will write out a network configuration that will issue a DHCP request on a "first" network interface. +.. note:: + + The network-config value is expected to be a Base64 encoded YAML string in + :ref:`network_config_v1` or :ref:`network_config_v2` format. Optionally it + can be compressed with ``gzip`` prior to Base64 encoding. + Disabling Network Configuration =============================== @@ -48,19 +54,19 @@ on other methods, such as embedded configuration or other customizations. **Kernel Command Line** -`Cloud-init`_ will check for a parameter ``network-config`` and the -value is expected to be YAML string in the :ref:`network_config_v1` format. -The YAML string may optionally be ``Base64`` encoded, and optionally -compressed with ``gzip``. +`Cloud-init`_ will check additionally check for the parameter +``network-config=disabled`` which will automatically disable any network +configuration. Example disabling kernel command line entry: :: - network-config={config: disabled} + network-config=disabled **cloud config** -In the combined cloud-init configuration dictionary. :: +In the combined cloud-init configuration dictionary, merged from +``/etc/cloud/cloud.cfg`` and ``/etc/cloud/cloud.cfg.d/*``:: network: config: disabled diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index bedd05fe..e03857c4 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -4017,6 +4017,8 @@ class TestEniNetworkStateToEni(CiTestCase): class TestCmdlineConfigParsing(CiTestCase): + with_logs = True + simple_cfg = { 'config': [{"type": "physical", "name": "eth0", "mac_address": "c0:d6:9f:2c:e8:80", @@ -4066,6 +4068,21 @@ class TestCmdlineConfigParsing(CiTestCase): found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline) self.assertEqual(found, self.simple_cfg) + def test_cmdline_with_net_config_disabled(self): + raw_cmdline = 'ro network-config=disabled root=foo' + found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline) + self.assertEqual(found, {'config': 'disabled'}) + + def test_cmdline_with_net_config_unencoded_logs_error(self): + """network-config cannot be unencoded besides 'disabled'.""" + raw_cmdline = 'ro network-config={config:disabled} root=foo' + found = cmdline.read_kernel_cmdline_config(cmdline=raw_cmdline) + self.assertIsNone(found) + expected_log = ( + 'ERROR: Expected base64 encoded kernel commandline parameter' + ' network-config. Ignoring network-config={config:disabled}.') + self.assertIn(expected_log, self.logs.getvalue()) + def test_cmdline_with_b64_gz(self): data = _gzip_data(json.dumps(self.simple_cfg).encode()) encoded_text = base64.b64encode(data).decode() -- cgit v1.2.3 From 71af48df3514ca831c90b77dc71ba0a121dec401 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 10 Mar 2020 08:22:22 -0600 Subject: instance-data: add cloud-init merged_cfg and sys_info keys to json (#214) Cloud-config userdata provided as jinja templates are now distro, platform and merged cloud config aware. The cloud-init query command will also surface this config data. Now users can selectively render portions of cloud-config based on: * distro name, version, release * python version * merged cloud config values * machine platform * kernel To support template handling of this config, add new top-level keys to /run/cloud-init/instance-data.json. The new 'merged_cfg' key represents merged cloud config from /etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/*. The new 'sys_info' key which captures distro and platform info from cloudinit.util.system_info. Cloud config userdata templates can render conditional content based on these additional environmental checks such as the following simple example: ``` ## template: jinja #cloud-config runcmd: {% if distro == 'opensuse' %} - sh /custom-setup-sles {% elif distro == 'centos' %} - sh /custom-setup-centos {% elif distro == 'debian' %} - sh /custom-setup-debian {% endif %} ``` To see all values: sudo cloud-init query --all Any keys added to the standardized v1 keys are guaranteed to not change or drop on future released of cloud-init. 'v1' keys will be retained for backward-compatibility even if a new standardized 'v2' set of keys are introduced The following standardized v1 keys are added: * distro, distro_release, distro_version, kernel_version, machine, python_version, system_platform, variant LP: #1865969 --- cloudinit/sources/__init__.py | 43 ++- cloudinit/sources/tests/test_init.py | 98 +++++- cloudinit/util.py | 2 +- doc/rtd/topics/instancedata.rst | 360 +++++++++++++++------ tests/cloud_tests/testcases/base.py | 55 +++- tests/unittests/test_datasource/test_cloudsigma.py | 6 +- 6 files changed, 426 insertions(+), 138 deletions(-) (limited to 'doc/rtd') diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 805d803d..a6e6d202 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -89,26 +89,26 @@ def process_instance_metadata(metadata, key_path='', sensitive_keys=()): @return Dict copy of processed metadata. """ md_copy = copy.deepcopy(metadata) - md_copy['base64_encoded_keys'] = [] - md_copy['sensitive_keys'] = [] + base64_encoded_keys = [] + sens_keys = [] for key, val in metadata.items(): if key_path: sub_key_path = key_path + '/' + key else: sub_key_path = key if key in sensitive_keys or sub_key_path in sensitive_keys: - md_copy['sensitive_keys'].append(sub_key_path) + sens_keys.append(sub_key_path) if isinstance(val, str) and val.startswith('ci-b64:'): - md_copy['base64_encoded_keys'].append(sub_key_path) + base64_encoded_keys.append(sub_key_path) md_copy[key] = val.replace('ci-b64:', '') if isinstance(val, dict): return_val = process_instance_metadata( val, sub_key_path, sensitive_keys) - md_copy['base64_encoded_keys'].extend( - return_val.pop('base64_encoded_keys')) - md_copy['sensitive_keys'].extend( - return_val.pop('sensitive_keys')) + base64_encoded_keys.extend(return_val.pop('base64_encoded_keys')) + sens_keys.extend(return_val.pop('sensitive_keys')) md_copy[key] = return_val + md_copy['base64_encoded_keys'] = sorted(base64_encoded_keys) + md_copy['sensitive_keys'] = sorted(sens_keys) return md_copy @@ -193,7 +193,7 @@ class DataSource(metaclass=abc.ABCMeta): # N-tuple of keypaths or keynames redact from instance-data.json for # non-root users - sensitive_metadata_keys = ('security-credentials',) + sensitive_metadata_keys = ('merged_cfg', 'security-credentials',) def __init__(self, sys_cfg, distro, paths, ud_proc=None): self.sys_cfg = sys_cfg @@ -218,14 +218,15 @@ class DataSource(metaclass=abc.ABCMeta): def __str__(self): return type_utils.obj_name(self) - def _get_standardized_metadata(self): + def _get_standardized_metadata(self, instance_data): """Return a dictionary of standardized metadata keys.""" local_hostname = self.get_hostname() instance_id = self.get_instance_id() availability_zone = self.availability_zone # In the event of upgrade from existing cloudinit, pickled datasource # will not contain these new class attributes. So we need to recrawl - # metadata to discover that content. + # metadata to discover that content + sysinfo = instance_data["sys_info"] return { 'v1': { '_beta_keys': ['subplatform'], @@ -233,14 +234,22 @@ class DataSource(metaclass=abc.ABCMeta): 'availability_zone': availability_zone, 'cloud-name': self.cloud_name, 'cloud_name': self.cloud_name, + 'distro': sysinfo["dist"][0], + 'distro_version': sysinfo["dist"][1], + 'distro_release': sysinfo["dist"][2], 'platform': self.platform_type, 'public_ssh_keys': self.get_public_ssh_keys(), + 'python_version': sysinfo["python"], 'instance-id': instance_id, 'instance_id': instance_id, + 'kernel_release': sysinfo["uname"][2], 'local-hostname': local_hostname, 'local_hostname': local_hostname, + 'machine': sysinfo["uname"][4], 'region': self.region, - 'subplatform': self.subplatform}} + 'subplatform': self.subplatform, + 'system_platform': sysinfo["platform"], + 'variant': sysinfo["variant"]}} def clear_cached_attrs(self, attr_defaults=()): """Reset any cached metadata attributes to datasource defaults. @@ -299,9 +308,15 @@ class DataSource(metaclass=abc.ABCMeta): ec2_metadata = getattr(self, 'ec2_metadata') if ec2_metadata != UNSET: instance_data['ds']['ec2_metadata'] = ec2_metadata - instance_data.update( - self._get_standardized_metadata()) instance_data['ds']['_doc'] = EXPERIMENTAL_TEXT + # Add merged cloud.cfg and sys info for jinja templates and cli query + instance_data['merged_cfg'] = copy.deepcopy(self.sys_cfg) + instance_data['merged_cfg']['_doc'] = ( + 'Merged cloud-init system config from /etc/cloud/cloud.cfg and' + ' /etc/cloud/cloud.cfg.d/') + instance_data['sys_info'] = util.system_info() + instance_data.update( + self._get_standardized_metadata(instance_data)) try: # Process content base64encoding unserializable values content = util.json_dumps(instance_data) diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index 6db127e7..541cbbeb 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -55,6 +55,7 @@ class InvalidDataSourceTestSubclassNet(DataSource): class TestDataSource(CiTestCase): with_logs = True + maxDiff = None def setUp(self): super(TestDataSource, self).setUp() @@ -288,27 +289,47 @@ class TestDataSource(CiTestCase): tmp = self.tmp_dir() datasource = DataSourceTestSubclassNet( self.sys_cfg, self.distro, Paths({'run_dir': tmp})) - datasource.get_data() + sys_info = { + "python": "3.7", + "platform": + "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal", + "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah", + "x86_64"], + "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]} + with mock.patch("cloudinit.util.system_info", return_value=sys_info): + datasource.get_data() json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) content = util.load_file(json_file) expected = { 'base64_encoded_keys': [], - 'sensitive_keys': [], + 'merged_cfg': REDACT_SENSITIVE_VALUE, + 'sensitive_keys': ['merged_cfg'], + 'sys_info': sys_info, 'v1': { '_beta_keys': ['subplatform'], 'availability-zone': 'myaz', 'availability_zone': 'myaz', 'cloud-name': 'subclasscloudname', 'cloud_name': 'subclasscloudname', + 'distro': 'ubuntu', + 'distro_release': 'focal', + 'distro_version': '20.04', 'instance-id': 'iid-datasource', 'instance_id': 'iid-datasource', 'local-hostname': 'test-subclass-hostname', 'local_hostname': 'test-subclass-hostname', + 'kernel_release': '5.4.0-24-generic', + 'machine': 'x86_64', 'platform': 'mytestsubclass', 'public_ssh_keys': [], + 'python_version': '3.7', 'region': 'myregion', - 'subplatform': 'unknown'}, + 'system_platform': + 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal', + 'subplatform': 'unknown', + 'variant': 'ubuntu'}, 'ds': { + '_doc': EXPERIMENTAL_TEXT, 'meta_data': {'availability_zone': 'myaz', 'local-hostname': 'test-subclass-hostname', @@ -329,28 +350,49 @@ class TestDataSource(CiTestCase): 'region': 'myregion', 'some': {'security-credentials': { 'cred1': 'sekret', 'cred2': 'othersekret'}}}) - self.assertEqual( - ('security-credentials',), datasource.sensitive_metadata_keys) - datasource.get_data() + self.assertItemsEqual( + ('merged_cfg', 'security-credentials',), + datasource.sensitive_metadata_keys) + sys_info = { + "python": "3.7", + "platform": + "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal", + "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah", + "x86_64"], + "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]} + with mock.patch("cloudinit.util.system_info", return_value=sys_info): + datasource.get_data() json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp) redacted = util.load_json(util.load_file(json_file)) expected = { 'base64_encoded_keys': [], - 'sensitive_keys': ['ds/meta_data/some/security-credentials'], + 'merged_cfg': REDACT_SENSITIVE_VALUE, + 'sensitive_keys': [ + 'ds/meta_data/some/security-credentials', 'merged_cfg'], + 'sys_info': sys_info, 'v1': { '_beta_keys': ['subplatform'], 'availability-zone': 'myaz', 'availability_zone': 'myaz', 'cloud-name': 'subclasscloudname', 'cloud_name': 'subclasscloudname', + 'distro': 'ubuntu', + 'distro_release': 'focal', + 'distro_version': '20.04', 'instance-id': 'iid-datasource', 'instance_id': 'iid-datasource', 'local-hostname': 'test-subclass-hostname', 'local_hostname': 'test-subclass-hostname', + 'kernel_release': '5.4.0-24-generic', + 'machine': 'x86_64', 'platform': 'mytestsubclass', 'public_ssh_keys': [], + 'python_version': '3.7', 'region': 'myregion', - 'subplatform': 'unknown'}, + 'system_platform': + 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal', + 'subplatform': 'unknown', + 'variant': 'ubuntu'}, 'ds': { '_doc': EXPERIMENTAL_TEXT, 'meta_data': { @@ -359,7 +401,7 @@ class TestDataSource(CiTestCase): 'region': 'myregion', 'some': {'security-credentials': REDACT_SENSITIVE_VALUE}}} } - self.assertEqual(expected, redacted) + self.assertItemsEqual(expected, redacted) file_stat = os.stat(json_file) self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode)) @@ -376,28 +418,54 @@ class TestDataSource(CiTestCase): 'region': 'myregion', 'some': {'security-credentials': { 'cred1': 'sekret', 'cred2': 'othersekret'}}}) - self.assertEqual( - ('security-credentials',), datasource.sensitive_metadata_keys) - datasource.get_data() + sys_info = { + "python": "3.7", + "platform": + "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal", + "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah", + "x86_64"], + "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]} + + self.assertItemsEqual( + ('merged_cfg', 'security-credentials',), + datasource.sensitive_metadata_keys) + with mock.patch("cloudinit.util.system_info", return_value=sys_info): + datasource.get_data() sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp) content = util.load_file(sensitive_json_file) expected = { 'base64_encoded_keys': [], - 'sensitive_keys': ['ds/meta_data/some/security-credentials'], + 'merged_cfg': { + '_doc': ( + 'Merged cloud-init system config from ' + '/etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/'), + 'datasource': {'_undef': {'key1': False}}}, + 'sensitive_keys': [ + 'ds/meta_data/some/security-credentials', 'merged_cfg'], + 'sys_info': sys_info, 'v1': { '_beta_keys': ['subplatform'], 'availability-zone': 'myaz', 'availability_zone': 'myaz', 'cloud-name': 'subclasscloudname', 'cloud_name': 'subclasscloudname', + 'distro': 'ubuntu', + 'distro_release': 'focal', + 'distro_version': '20.04', 'instance-id': 'iid-datasource', 'instance_id': 'iid-datasource', + 'kernel_release': '5.4.0-24-generic', 'local-hostname': 'test-subclass-hostname', 'local_hostname': 'test-subclass-hostname', + 'machine': 'x86_64', 'platform': 'mytestsubclass', 'public_ssh_keys': [], + 'python_version': '3.7', 'region': 'myregion', - 'subplatform': 'unknown'}, + 'subplatform': 'unknown', + 'system_platform': + 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal', + 'variant': 'ubuntu'}, 'ds': { '_doc': EXPERIMENTAL_TEXT, 'meta_data': { @@ -408,7 +476,7 @@ class TestDataSource(CiTestCase): 'security-credentials': {'cred1': 'sekret', 'cred2': 'othersekret'}}}} } - self.assertEqual(expected, util.load_json(content)) + self.assertItemsEqual(expected, util.load_json(content)) file_stat = os.stat(sensitive_json_file) self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode)) self.assertEqual(expected, util.load_json(content)) diff --git a/cloudinit/util.py b/cloudinit/util.py index c02b3d9a..132f6051 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -656,7 +656,7 @@ def system_info(): 'system': platform.system(), 'release': platform.release(), 'python': platform.python_version(), - 'uname': platform.uname(), + 'uname': list(platform.uname()), 'dist': get_linux_distro() } system = info['system'].lower() diff --git a/doc/rtd/topics/instancedata.rst b/doc/rtd/topics/instancedata.rst index 4227c4fd..845098bb 100644 --- a/doc/rtd/topics/instancedata.rst +++ b/doc/rtd/topics/instancedata.rst @@ -76,6 +76,11 @@ There are three basic top-level keys: 'security sensitive'. Only the keys listed here will be redacted from instance-data.json for non-root users. +* **merged_cfg**: Merged cloud-init 'system_config' from `/etc/cloud/cloud.cfg` + and `/etc/cloud/cloud-cfg.d`. Values under this key could contain sensitive + information such as passwords, so it is included in the **sensitive-keys** + list which is only readable by root. + * **ds**: Datasource-specific metadata crawled for the specific cloud platform. It should closely represent the structure of the cloud metadata crawled. The structure of content and details provided are entirely @@ -83,6 +88,9 @@ There are three basic top-level keys: The content exposed under the 'ds' key is currently **experimental** and expected to change slightly in the upcoming cloud-init release. +* **sys_info**: Information about the underlying os, python, architecture and + kernel. This represents the data collected by `cloudinit.util.system_info`. + * **v1**: Standardized cloud-init metadata keys, these keys are guaranteed to exist on all cloud platforms. They will also retain their current behavior and format and will be carried forward even if cloud-init introduces a new @@ -117,6 +125,21 @@ Example output: - nocloud - ovf +v1.distro, v1.distro_version, v1.distro_release +----------------------------------------------- +This shall be the distro name, version and release as determined by +`cloudinit.util.get_linux_distro`. + +Example output: + +- centos, 7.5, core +- debian, 9, stretch +- freebsd, 12.0-release-p10, +- opensuse, 42.3, x86_64 +- opensuse-tumbleweed, 20180920, x86_64 +- redhat, 7.5, 'maipo' +- sles, 12.3, x86_64 +- ubuntu, 20.04, focal v1.instance_id -------------- @@ -126,6 +149,14 @@ Examples output: - i- +v1.kernel_release +----------------- +This shall be the running kernel `uname -r` + +Example output: + +- 5.3.0-1010-aws + v1.local_hostname ----------------- The internal or local hostname of the system. @@ -135,6 +166,17 @@ Examples output: - ip-10-41-41-70 - +v1.machine +---------- +This shall be the running cpu machine architecture `uname -m` + +Example output: + +- x86_64 +- i686 +- ppc64le +- s390x + v1.platform ------------- An attempt to identify the cloud platfrom instance that the system is running @@ -154,7 +196,7 @@ v1.subplatform Additional platform details describing the specific source or type of metadata used. The format of subplatform will be: -`` (`` +`` ()`` Examples output: @@ -171,6 +213,15 @@ Examples output: - ['ssh-rsa AA...', ...] +v1.python_version +----------------- +The version of python that is running cloud-init as determined by +`cloudinit.util.system_info` + +Example output: + +- 3.7.6 + v1.region --------- The physical region/data center in which the instance is deployed. @@ -192,164 +243,265 @@ Examples output: Example Output -------------- -Below is an example of ``/run/cloud-init/instance_data.json`` on an EC2 -instance: +Below is an example of ``/run/cloud-init/instance-data-sensitive.json`` on an +EC2 instance: .. sourcecode:: json { + "_beta_keys": [ + "subplatform" + ], + "availability_zone": "us-east-1b", "base64_encoded_keys": [], + "merged_cfg": { + "_doc": "Merged cloud-init system config from /etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/", + "_log": [ + "[loggers]\nkeys=root,cloudinit\n\n[handlers]\nkeys=consoleHandler,cloudLogHandler\n\n[formatters]\nkeys=simpleFormatter,arg0Formatter\n\n[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler,cloudLogHandler\n\n[logger_cloudinit]\nlevel=DEBUG\nqualname=cloudinit\nhandlers=\npropagate=1\n\n[handler_consoleHandler]\nclass=StreamHandler\nlevel=WARNING\nformatter=arg0Formatter\nargs=(sys.stderr,)\n\n[formatter_arg0Formatter]\nformat=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s\n\n[formatter_simpleFormatter]\nformat=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s\n", + "[handler_cloudLogHandler]\nclass=FileHandler\nlevel=DEBUG\nformatter=arg0Formatter\nargs=('/var/log/cloud-init.log',)\n", + "[handler_cloudLogHandler]\nclass=handlers.SysLogHandler\nlevel=DEBUG\nformatter=simpleFormatter\nargs=(\"/dev/log\", handlers.SysLogHandler.LOG_USER)\n" + ], + "cloud_config_modules": [ + "emit_upstart", + "snap", + "ssh-import-id", + "locale", + "set-passwords", + "grub-dpkg", + "apt-pipelining", + "apt-configure", + "ubuntu-advantage", + "ntp", + "timezone", + "disable-ec2-metadata", + "runcmd", + "byobu" + ], + "cloud_final_modules": [ + "package-update-upgrade-install", + "fan", + "landscape", + "lxd", + "ubuntu-drivers", + "puppet", + "chef", + "mcollective", + "salt-minion", + "rightscale_userdata", + "scripts-vendor", + "scripts-per-once", + "scripts-per-boot", + "scripts-per-instance", + "scripts-user", + "ssh-authkey-fingerprints", + "keys-to-console", + "phone-home", + "final-message", + "power-state-change" + ], + "cloud_init_modules": [ + "migrator", + "seed_random", + "bootcmd", + "write-files", + "growpart", + "resizefs", + "disk_setup", + "mounts", + "set_hostname", + "update_hostname", + "update_etc_hosts", + "ca-certs", + "rsyslog", + "users-groups", + "ssh" + ], + "datasource_list": [ + "Ec2", + "None" + ], + "def_log_file": "/var/log/cloud-init.log", + "disable_root": true, + "log_cfgs": [ + [ + "[loggers]\nkeys=root,cloudinit\n\n[handlers]\nkeys=consoleHandler,cloudLogHandler\n\n[formatters]\nkeys=simpleFormatter,arg0Formatter\n\n[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler,cloudLogHandler\n\n[logger_cloudinit]\nlevel=DEBUG\nqualname=cloudinit\nhandlers=\npropagate=1\n\n[handler_consoleHandler]\nclass=StreamHandler\nlevel=WARNING\nformatter=arg0Formatter\nargs=(sys.stderr,)\n\n[formatter_arg0Formatter]\nformat=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s\n\n[formatter_simpleFormatter]\nformat=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s\n", + "[handler_cloudLogHandler]\nclass=FileHandler\nlevel=DEBUG\nformatter=arg0Formatter\nargs=('/var/log/cloud-init.log',)\n" + ] + ], + "output": { + "all": "| tee -a /var/log/cloud-init-output.log" + }, + "preserve_hostname": false, + "syslog_fix_perms": [ + "syslog:adm", + "root:adm", + "root:wheel", + "root:root" + ], + "users": [ + "default" + ], + "vendor_data": { + "enabled": true, + "prefix": [] + } + }, + "cloud_name": "aws", + "distro": "ubuntu", + "distro_release": "focal", + "distro_version": "20.04", "ds": { "_doc": "EXPERIMENTAL: The structure and format of content scoped under the 'ds' key may change in subsequent releases of cloud-init.", "_metadata_api_version": "2016-09-02", "dynamic": { - "instance-identity": { + "instance_identity": { "document": { - "accountId": "437526006925", + "accountId": "329910648901", "architecture": "x86_64", - "availabilityZone": "us-east-2b", + "availabilityZone": "us-east-1b", "billingProducts": null, "devpayProductCodes": null, - "imageId": "ami-079638aae7046bdd2", - "instanceId": "i-075f088c72ad3271c", + "imageId": "ami-02e8aa396f8be3b6d", + "instanceId": "i-0929128ff2f73a2f1", "instanceType": "t2.micro", "kernelId": null, "marketplaceProductCodes": null, - "pendingTime": "2018-10-05T20:10:43Z", - "privateIp": "10.41.41.95", + "pendingTime": "2020-02-27T20:46:18Z", + "privateIp": "172.31.81.43", "ramdiskId": null, - "region": "us-east-2", + "region": "us-east-1", "version": "2017-09-30" }, "pkcs7": [ - "MIAGCSqGSIb3DQEHAqCAMIACAQExCzAJBgUrDgMCGgUAMIAGCSqGSIb3DQEHAaCAJIAEggHbewog", - "ICJkZXZwYXlQcm9kdWN0Q29kZXMiIDogbnVsbCwKICAibWFya2V0cGxhY2VQcm9kdWN0Q29kZXMi", - "IDogbnVsbCwKICAicHJpdmF0ZUlwIiA6ICIxMC40MS40MS45NSIsCiAgInZlcnNpb24iIDogIjIw", - "MTctMDktMzAiLAogICJpbnN0YW5jZUlkIiA6ICJpLTA3NWYwODhjNzJhZDMyNzFjIiwKICAiYmls", - "bGluZ1Byb2R1Y3RzIiA6IG51bGwsCiAgImluc3RhbmNlVHlwZSIgOiAidDIubWljcm8iLAogICJh", - "Y2NvdW50SWQiIDogIjQzNzUyNjAwNjkyNSIsCiAgImF2YWlsYWJpbGl0eVpvbmUiIDogInVzLWVh", - "c3QtMmIiLAogICJrZXJuZWxJZCIgOiBudWxsLAogICJyYW1kaXNrSWQiIDogbnVsbCwKICAiYXJj", - "aGl0ZWN0dXJlIiA6ICJ4ODZfNjQiLAogICJpbWFnZUlkIiA6ICJhbWktMDc5NjM4YWFlNzA0NmJk", - "ZDIiLAogICJwZW5kaW5nVGltZSIgOiAiMjAxOC0xMC0wNVQyMDoxMDo0M1oiLAogICJyZWdpb24i", - "IDogInVzLWVhc3QtMiIKfQAAAAAAADGCARcwggETAgEBMGkwXDELMAkGA1UEBhMCVVMxGTAXBgNV", - "BAgTEFdhc2hpbmd0b24gU3RhdGUxEDAOBgNVBAcTB1NlYXR0bGUxIDAeBgNVBAoTF0FtYXpvbiBX", - "ZWIgU2VydmljZXMgTExDAgkAlrpI2eVeGmcwCQYFKw4DAhoFAKBdMBgGCSqGSIb3DQEJAzELBgkq", - "hkiG9w0BBwEwHAYJKoZIhvcNAQkFMQ8XDTE4MTAwNTIwMTA0OFowIwYJKoZIhvcNAQkEMRYEFK0k", - "Tz6n1A8/zU1AzFj0riNQORw2MAkGByqGSM44BAMELjAsAhRNrr174y98grPBVXUforN/6wZp8AIU", - "JLZBkrB2GJA8A4WJ1okq++jSrBIAAAAAAAA=" + "MIAGCSqGSIb3DQ...", + "REDACTED", + "AhQUgq0iPWqPTVnT96tZE6L1XjjLHQAAAAAAAA==" ], "rsa2048": [ - "MIAGCSqGSIb3DQEHAqCAMIACAQExDzANBglghkgBZQMEAgEFADCABgkqhkiG9w0BBwGggCSABIIB", - "23sKICAiZGV2cGF5UHJvZHVjdENvZGVzIiA6IG51bGwsCiAgIm1hcmtldHBsYWNlUHJvZHVjdENv", - "ZGVzIiA6IG51bGwsCiAgInByaXZhdGVJcCIgOiAiMTAuNDEuNDEuOTUiLAogICJ2ZXJzaW9uIiA6", - "ICIyMDE3LTA5LTMwIiwKICAiaW5zdGFuY2VJZCIgOiAiaS0wNzVmMDg4YzcyYWQzMjcxYyIsCiAg", - "ImJpbGxpbmdQcm9kdWN0cyIgOiBudWxsLAogICJpbnN0YW5jZVR5cGUiIDogInQyLm1pY3JvIiwK", - "ICAiYWNjb3VudElkIiA6ICI0Mzc1MjYwMDY5MjUiLAogICJhdmFpbGFiaWxpdHlab25lIiA6ICJ1", - "cy1lYXN0LTJiIiwKICAia2VybmVsSWQiIDogbnVsbCwKICAicmFtZGlza0lkIiA6IG51bGwsCiAg", - "ImFyY2hpdGVjdHVyZSIgOiAieDg2XzY0IiwKICAiaW1hZ2VJZCIgOiAiYW1pLTA3OTYzOGFhZTcw", - "NDZiZGQyIiwKICAicGVuZGluZ1RpbWUiIDogIjIwMTgtMTAtMDVUMjA6MTA6NDNaIiwKICAicmVn", - "aW9uIiA6ICJ1cy1lYXN0LTIiCn0AAAAAAAAxggH/MIIB+wIBATBpMFwxCzAJBgNVBAYTAlVTMRkw", - "FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6", - "b24gV2ViIFNlcnZpY2VzIExMQwIJAM07oeX4xevdMA0GCWCGSAFlAwQCAQUAoGkwGAYJKoZIhvcN", - "AQkDMQsGCSqGSIb3DQEHATAcBgkqhkiG9w0BCQUxDxcNMTgxMDA1MjAxMDQ4WjAvBgkqhkiG9w0B", - "CQQxIgQgkYz0pZk3zJKBi4KP4egeOKJl/UYwu5UdE7id74pmPwMwDQYJKoZIhvcNAQEBBQAEggEA", - "dC3uIGGNul1OC1mJKSH3XoBWsYH20J/xhIdftYBoXHGf2BSFsrs9ZscXd2rKAKea4pSPOZEYMXgz", - "lPuT7W0WU89N3ZKviy/ReMSRjmI/jJmsY1lea6mlgcsJXreBXFMYucZvyeWGHdnCjamoKWXkmZlM", - "mSB1gshWy8Y7DzoKviYPQZi5aI54XK2Upt4kGme1tH1NI2Cq+hM4K+adxTbNhS3uzvWaWzMklUuU", - "QHX2GMmjAVRVc8vnA8IAsBCJJp+gFgYzi09IK+cwNgCFFPADoG6jbMHHf4sLB3MUGpiA+G9JlCnM", - "fmkjI2pNRB8spc0k4UG4egqLrqCz67WuK38tjwAAAAAAAA==" + "MIAGCSqGSIb...", + "REDACTED", + "clYQvuE45xXm7Yreg3QtQbrP//owl1eZHj6s350AAAAAAAA=" ], "signature": [ - "Tsw6h+V3WnxrNVSXBYIOs1V4j95YR1mLPPH45XnhX0/Ei3waJqf7/7EEKGYP1Cr4PTYEULtZ7Mvf", - "+xJpM50Ivs2bdF7o0c4vnplRWe3f06NI9pv50dr110j/wNzP4MZ1pLhJCqubQOaaBTF3LFutgRrt", - "r4B0mN3p7EcqD8G+ll0=" + "dA+QV+LLCWCRNddnrKleYmh2GvYo+t8urDkdgmDSsPi", + "REDACTED", + "kDT4ygyJLFkd3b4qjAs=" ] } }, - "meta-data": { - "ami-id": "ami-079638aae7046bdd2", - "ami-launch-index": "0", - "ami-manifest-path": "(unknown)", - "block-device-mapping": { + "meta_data": { + "ami_id": "ami-02e8aa396f8be3b6d", + "ami_launch_index": "0", + "ami_manifest_path": "(unknown)", + "block_device_mapping": { "ami": "/dev/sda1", - "ephemeral0": "sdb", - "ephemeral1": "sdc", "root": "/dev/sda1" }, - "hostname": "ip-10-41-41-95.us-east-2.compute.internal", - "instance-action": "none", - "instance-id": "i-075f088c72ad3271c", - "instance-type": "t2.micro", - "local-hostname": "ip-10-41-41-95.us-east-2.compute.internal", - "local-ipv4": "10.41.41.95", - "mac": "06:74:8f:39:cd:a6", + "hostname": "ip-172-31-81-43.ec2.internal", + "instance_action": "none", + "instance_id": "i-0929128ff2f73a2f1", + "instance_type": "t2.micro", + "local_hostname": "ip-172-31-81-43.ec2.internal", + "local_ipv4": "172.31.81.43", + "mac": "12:7e:c9:93:29:af", "metrics": { "vhostmd": "" }, "network": { "interfaces": { "macs": { - "06:74:8f:39:cd:a6": { - "device-number": "0", - "interface-id": "eni-052058bbd7831eaae", - "ipv4-associations": { - "18.218.221.122": "10.41.41.95" - }, - "local-hostname": "ip-10-41-41-95.us-east-2.compute.internal", - "local-ipv4s": "10.41.41.95", - "mac": "06:74:8f:39:cd:a6", - "owner-id": "437526006925", - "public-hostname": "ec2-18-218-221-122.us-east-2.compute.amazonaws.com", - "public-ipv4s": "18.218.221.122", - "security-group-ids": "sg-828247e9", - "security-groups": "Cloud-init integration test secgroup", - "subnet-id": "subnet-282f3053", - "subnet-ipv4-cidr-block": "10.41.41.0/24", - "subnet-ipv6-cidr-blocks": "2600:1f16:b80:ad00::/64", - "vpc-id": "vpc-252ef24d", - "vpc-ipv4-cidr-block": "10.41.0.0/16", - "vpc-ipv4-cidr-blocks": "10.41.0.0/16", - "vpc-ipv6-cidr-blocks": "2600:1f16:b80:ad00::/56" - } + "12:7e:c9:93:29:af": { + "device_number": "0", + "interface_id": "eni-0c07a0474339b801d", + "ipv4_associations": { + "3.89.187.177": "172.31.81.43" + }, + "local_hostname": "ip-172-31-81-43.ec2.internal", + "local_ipv4s": "172.31.81.43", + "mac": "12:7e:c9:93:29:af", + "owner_id": "329910648901", + "public_hostname": "ec2-3-89-187-177.compute-1.amazonaws.com", + "public_ipv4s": "3.89.187.177", + "security_group_ids": "sg-0100038b68aa79986", + "security_groups": "launch-wizard-3", + "subnet_id": "subnet-04e2d12a", + "subnet_ipv4_cidr_block": "172.31.80.0/20", + "vpc_id": "vpc-210b4b5b", + "vpc_ipv4_cidr_block": "172.31.0.0/16", + "vpc_ipv4_cidr_blocks": "172.31.0.0/16" + } } } }, "placement": { - "availability-zone": "us-east-2b" + "availability_zone": "us-east-1b" }, "profile": "default-hvm", - "public-hostname": "ec2-18-218-221-122.us-east-2.compute.amazonaws.com", - "public-ipv4": "18.218.221.122", - "public-keys": { - "cloud-init-integration": [ - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL7uWGj8cgWyIOaspgKdVy0cKJ+UTjfv7jBOjG2H/GN8bJVXy72XAvnhM0dUM+CCs8FOf0YlPX+Frvz2hKInrmRhZVwRSL129PasD12MlI3l44u6IwS1o/W86Q+tkQYEljtqDOo0a+cOsaZkvUNzUyEXUwz/lmYa6G4hMKZH4NBj7nbAAF96wsMCoyNwbWryBnDYUr6wMbjRR1J9Pw7Xh7WRC73wy4Va2YuOgbD3V/5ZrFPLbWZW/7TFXVrql04QVbyei4aiFR5n//GvoqwQDNe58LmbzX/xvxyKJYdny2zXmdAhMxbrpFQsfpkJ9E/H5w0yOdSvnWbUoG5xNGoOB cloud-init-integration" - ] - }, - "reservation-id": "r-0594a20e31f6cfe46", - "security-groups": "Cloud-init integration test secgroup", + "public_hostname": "ec2-3-89-187-177.compute-1.amazonaws.com", + "public_ipv4": "3.89.187.177", + "reservation_id": "r-0c481643d15766a02", + "security_groups": "launch-wizard-3", "services": { "domain": "amazonaws.com", "partition": "aws" } } }, + "instance_id": "i-0929128ff2f73a2f1", + "kernel_release": "5.3.0-1010-aws", + "local_hostname": "ip-172-31-81-43", + "machine": "x86_64", + "platform": "ec2", + "public_ssh_keys": [], + "python_version": "3.7.6", + "region": "us-east-1", "sensitive_keys": [], + "subplatform": "metadata (http://169.254.169.254)", + "sys_info": { + "dist": [ + "ubuntu", + "20.04", + "focal" + ], + "platform": "Linux-5.3.0-1010-aws-x86_64-with-Ubuntu-20.04-focal", + "python": "3.7.6", + "release": "5.3.0-1010-aws", + "system": "Linux", + "uname": [ + "Linux", + "ip-172-31-81-43", + "5.3.0-1010-aws", + "#11-Ubuntu SMP Thu Jan 16 07:59:32 UTC 2020", + "x86_64", + "x86_64" + ], + "variant": "ubuntu" + }, + "system_platform": "Linux-5.3.0-1010-aws-x86_64-with-Ubuntu-20.04-focal", + "userdata": "#cloud-config\nssh_import_id: []\n...", "v1": { "_beta_keys": [ "subplatform" ], - "availability-zone": "us-east-2b", - "availability_zone": "us-east-2b", + "availability_zone": "us-east-1b", "cloud_name": "aws", - "instance_id": "i-075f088c72ad3271c", - "local_hostname": "ip-10-41-41-95", + "distro": "ubuntu", + "distro_release": "focal", + "distro_version": "20.04", + "instance_id": "i-0929128ff2f73a2f1", + "kernel": "5.3.0-1010-aws", + "local_hostname": "ip-172-31-81-43", + "machine": "x86_64", "platform": "ec2", - "public_ssh_keys": [ - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSL7uWGj8cgWyIOaspgKdVy0cKJ+UTjfv7jBOjG2H/GN8bJVXy72XAvnhM0dUM+CCs8FOf0YlPX+Frvz2hKInrmRhZVwRSL129PasD12MlI3l44u6IwS1o/W86Q+tkQYEljtqDOo0a+cOsaZkvUNzUyEXUwz/lmYa6G4hMKZH4NBj7nbAAF96wsMCoyNwbWryBnDYUr6wMbjRR1J9Pw7Xh7WRC73wy4Va2YuOgbD3V/5ZrFPLbWZW/7TFXVrql04QVbyei4aiFR5n//GvoqwQDNe58LmbzX/xvxyKJYdny2zXmdAhMxbrpFQsfpkJ9E/H5w0yOdSvnWbUoG5xNGoOB cloud-init-integration" - ], - "region": "us-east-2", - "subplatform": "metadata (http://169.254.169.254)" - } + "public_ssh_keys": [], + "python": "3.7.6", + "region": "us-east-1", + "subplatform": "metadata (http://169.254.169.254)", + "system_platform": "Linux-5.3.0-1010-aws-x86_64-with-Ubuntu-20.04-focal", + "variant": "ubuntu" + }, + "variant": "ubuntu", + "vendordata": "" } diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py index fd12d87b..5976e234 100644 --- a/tests/cloud_tests/testcases/base.py +++ b/tests/cloud_tests/testcases/base.py @@ -172,9 +172,7 @@ class CloudTestCase(unittest2.TestCase): 'Skipping instance-data.json test.' ' OS: %s not bionic or newer' % self.os_name) instance_data = json.loads(out) - self.assertItemsEqual( - [], - instance_data['base64_encoded_keys']) + self.assertItemsEqual(['ci_cfg'], instance_data['sensitive_keys']) ds = instance_data.get('ds', {}) v1_data = instance_data.get('v1', {}) metadata = ds.get('meta-data', {}) @@ -201,6 +199,23 @@ class CloudTestCase(unittest2.TestCase): self.assertIn('i-', v1_data['instance_id']) self.assertIn('ip-', v1_data['local_hostname']) self.assertIsNotNone(v1_data['region'], 'expected ec2 region') + self.assertIsNotNone( + re.match(r'\d\.\d+\.\d+-\d+-aws', v1_data['kernel_release'])) + self.assertEqual( + 'redacted for non-root user', instance_data['merged_cfg']) + self.assertEqual(self.os_cfg['os'], v1_data['variant']) + self.assertEqual(self.os_cfg['os'], v1_data['distro']) + self.assertEqual( + self.os_cfg['os'], instance_data["sys_info"]['dist'][0], + "Unexpected sys_info dist value") + self.assertEqual(self.os_name, v1_data['distro_release']) + self.assertEqual( + str(self.os_cfg['version']), v1_data['distro_version']) + self.assertEqual('x86_64', v1_data['machine']) + self.assertIsNotNone( + re.match(r'3.\d\.\d', v1_data['python_version']), + "unexpected python version: {ver}".format( + ver=v1_data["python_version"])) def test_instance_data_json_lxd(self): """Validate instance-data.json content by lxd platform. @@ -237,6 +252,23 @@ class CloudTestCase(unittest2.TestCase): self.assertIsNone( v1_data['region'], 'found unexpected lxd region %s' % v1_data['region']) + self.assertIsNotNone( + re.match(r'\d\.\d+\.\d+-\d+', v1_data['kernel_release'])) + self.assertEqual( + 'redacted for non-root user', instance_data['merged_cfg']) + self.assertEqual(self.os_cfg['os'], v1_data['variant']) + self.assertEqual(self.os_cfg['os'], v1_data['distro']) + self.assertEqual( + self.os_cfg['os'], instance_data["sys_info"]['dist'][0], + "Unexpected sys_info dist value") + self.assertEqual(self.os_name, v1_data['distro_release']) + self.assertEqual( + str(self.os_cfg['version']), v1_data['distro_version']) + self.assertEqual('x86_64', v1_data['machine']) + self.assertIsNotNone( + re.match(r'3.\d\.\d', v1_data['python_version']), + "unexpected python version: {ver}".format( + ver=v1_data["python_version"])) def test_instance_data_json_kvm(self): """Validate instance-data.json content by nocloud-kvm platform. @@ -278,6 +310,23 @@ class CloudTestCase(unittest2.TestCase): self.assertIsNone( v1_data['region'], 'found unexpected lxd region %s' % v1_data['region']) + self.assertIsNotNone( + re.match(r'\d\.\d+\.\d+-\d+', v1_data['kernel_release'])) + self.assertEqual( + 'redacted for non-root user', instance_data['merged_cfg']) + self.assertEqual(self.os_cfg['os'], v1_data['variant']) + self.assertEqual(self.os_cfg['os'], v1_data['distro']) + self.assertEqual( + self.os_cfg['os'], instance_data["sys_info"]['dist'][0], + "Unexpected sys_info dist value") + self.assertEqual(self.os_name, v1_data['distro_release']) + self.assertEqual( + str(self.os_cfg['version']), v1_data['distro_version']) + self.assertEqual('x86_64', v1_data['machine']) + self.assertIsNotNone( + re.match(r'3.\d\.\d', v1_data['python_version']), + "unexpected python version: {ver}".format( + ver=v1_data["python_version"])) class PasswordListTest(CloudTestCase): diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py index d62d542b..7aa3b1d1 100644 --- a/tests/unittests/test_datasource/test_cloudsigma.py +++ b/tests/unittests/test_datasource/test_cloudsigma.py @@ -3,6 +3,7 @@ import copy from cloudinit.cs_utils import Cepko +from cloudinit import distros from cloudinit import helpers from cloudinit import sources from cloudinit.sources import DataSourceCloudSigma @@ -47,8 +48,11 @@ class DataSourceCloudSigmaTest(test_helpers.CiTestCase): self.paths = helpers.Paths({'run_dir': self.tmp_dir()}) self.add_patch(DS_PATH + '.is_running_in_cloudsigma', "m_is_container", return_value=True) + + distro_cls = distros.fetch("ubuntu") + distro = distro_cls("ubuntu", cfg={}, paths=self.paths) self.datasource = DataSourceCloudSigma.DataSourceCloudSigma( - "", "", paths=self.paths) + sys_cfg={}, distro=distro, paths=self.paths) self.datasource.cepko = CepkoMock(SERVER_CONTEXT) def test_get_hostname(self): -- cgit v1.2.3 From 94838def772349387e16cc642b3642020e22deda Mon Sep 17 00:00:00 2001 From: Gonéri Le Bouder Date: Thu, 12 Mar 2020 14:37:08 -0400 Subject: Add Netbsd support (#62) Add support for the NetBSD Operating System. Features in this branch: * Add BSD distro parent class from which NetBSD and FreeBSD can specialize * Add *bsd util functions to cloudinit.net and cloudinit.net.bsd_utils * subclass cloudinit.distro.freebsd.Distro from bsd.Distro * Add new cloudinit.distro.netbsd and cloudinit.net.renderer for netbsd * Add lru_cached util.is_NetBSD functions * Add NetBSD detection for ConfigDrive and NoCloud datasources This branch has been tested with: - NoCloud and OpenStack (with and without config-drive) - NetBSD 8.1. and 9.0 - FreeBSD 11.2 and 12.1 - Python 3.7 only, because of the dependency oncrypt.METHOD_BLOWFISH. This version is available in NetBSD 7, 8 and 9 anyway --- cloudinit/distros/bsd.py | 111 ++++++++++++++++ cloudinit/distros/bsd_utils.py | 50 ++++++++ cloudinit/distros/freebsd.py | 133 +++---------------- cloudinit/distros/netbsd.py | 133 +++++++++++++++++++ cloudinit/net/__init__.py | 27 ++++ cloudinit/net/bsd.py | 165 ++++++++++++++++++++++++ cloudinit/net/freebsd.py | 169 ++++--------------------- cloudinit/net/netbsd.py | 42 ++++++ cloudinit/net/renderers.py | 4 +- cloudinit/netinfo.py | 52 +++++++- cloudinit/sources/DataSourceConfigDrive.py | 8 +- cloudinit/sources/DataSourceNoCloud.py | 8 ++ cloudinit/tests/helpers.py | 1 + cloudinit/tests/test_util.py | 34 +++-- cloudinit/util.py | 33 ++++- config/cloud.cfg.tmpl | 30 ++++- doc/rtd/topics/network-config.rst | 2 +- setup.py | 4 +- sysvinit/netbsd/cloudconfig | 17 +++ sysvinit/netbsd/cloudfinal | 16 +++ sysvinit/netbsd/cloudinit | 16 +++ sysvinit/netbsd/cloudinitlocal | 18 +++ tests/unittests/test_distros/test_bsd_utils.py | 66 ++++++++++ tools/build-on-netbsd | 37 ++++++ tools/render-cloudcfg | 4 +- 25 files changed, 884 insertions(+), 296 deletions(-) create mode 100644 cloudinit/distros/bsd.py create mode 100644 cloudinit/distros/bsd_utils.py create mode 100644 cloudinit/distros/netbsd.py create mode 100644 cloudinit/net/bsd.py create mode 100644 cloudinit/net/netbsd.py create mode 100755 sysvinit/netbsd/cloudconfig create mode 100755 sysvinit/netbsd/cloudfinal create mode 100755 sysvinit/netbsd/cloudinit create mode 100755 sysvinit/netbsd/cloudinitlocal create mode 100644 tests/unittests/test_distros/test_bsd_utils.py create mode 100755 tools/build-on-netbsd (limited to 'doc/rtd') diff --git a/cloudinit/distros/bsd.py b/cloudinit/distros/bsd.py new file mode 100644 index 00000000..e9b84edc --- /dev/null +++ b/cloudinit/distros/bsd.py @@ -0,0 +1,111 @@ +import platform + +from cloudinit import distros +from cloudinit.distros import bsd_utils +from cloudinit import helpers +from cloudinit import log as logging +from cloudinit import net +from cloudinit import util + +LOG = logging.getLogger(__name__) + + +class BSD(distros.Distro): + hostname_conf_fn = '/etc/rc.conf' + rc_conf_fn = "/etc/rc.conf" + + # Set in BSD distro subclasses + group_add_cmd_prefix = [] + pkg_cmd_install_prefix = [] + pkg_cmd_remove_prefix = [] + + def __init__(self, name, cfg, paths): + super().__init__(name, cfg, paths) + # This will be used to restrict certain + # calls from repeatly happening (when they + # should only happen say once per instance...) + self._runner = helpers.Runners(paths) + cfg['ssh_svcname'] = 'sshd' + self.osfamily = platform.system().lower() + + def _read_system_hostname(self): + sys_hostname = self._read_hostname(self.hostname_conf_fn) + return (self.hostname_conf_fn, sys_hostname) + + def _read_hostname(self, filename, default=None): + return bsd_utils.get_rc_config_value('hostname') + + def _get_add_member_to_group_cmd(self, member_name, group_name): + raise NotImplementedError('Return list cmd to add member to group') + + def _write_hostname(self, hostname, filename): + bsd_utils.set_rc_config_value('hostname', hostname, fn='/etc/rc.conf') + + def create_group(self, name, members=None): + if util.is_group(name): + LOG.warning("Skipping creation of existing group '%s'", name) + else: + group_add_cmd = self.group_add_cmd_prefix + [name] + try: + util.subp(group_add_cmd) + LOG.info("Created new group %s", name) + except Exception: + util.logexc(LOG, "Failed to create group %s", name) + + if not members: + members = [] + for member in members: + if not util.is_user(member): + LOG.warning("Unable to add group member '%s' to group '%s'" + "; user does not exist.", member, name) + continue + try: + util.subp(self._get_add_member_to_group_cmd(member, name)) + LOG.info("Added user '%s' to group '%s'", member, name) + except Exception: + util.logexc(LOG, "Failed to add user '%s' to group '%s'", + member, name) + + def generate_fallback_config(self): + nconf = {'config': [], 'version': 1} + for mac, name in net.get_interfaces_by_mac().items(): + nconf['config'].append( + {'type': 'physical', 'name': name, + 'mac_address': mac, 'subnets': [{'type': 'dhcp'}]}) + return nconf + + def install_packages(self, pkglist): + self.update_package_sources() + self.package_command('install', pkgs=pkglist) + + def _get_pkg_cmd_environ(self): + """Return environment vars used in *BSD package_command operations""" + raise NotImplementedError('BSD subclasses return a dict of env vars') + + def package_command(self, command, args=None, pkgs=None): + if pkgs is None: + pkgs = [] + + # TODO neither freebsd nor netbsd handles a command 'upgrade' + # provided by cloudinit/config/cc_package_update_upgrade_install.py + if command == 'install': + cmd = self.pkg_cmd_install_prefix + elif command == 'remove': + cmd = self.pkg_cmd_remove_prefix + + if args and isinstance(args, str): + cmd.append(args) + elif args and isinstance(args, list): + cmd.extend(args) + + pkglist = util.expand_package_list('%s-%s', pkgs) + cmd.extend(pkglist) + + # Allow the output of this to flow outwards (ie not be captured) + util.subp(cmd, env=self._get_pkg_cmd_environ(), capture=False) + + def _write_network_config(self, netconfig): + return self._supported_write_network_config(netconfig) + + def set_timezone(self, tz): + distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) diff --git a/cloudinit/distros/bsd_utils.py b/cloudinit/distros/bsd_utils.py new file mode 100644 index 00000000..079d0d53 --- /dev/null +++ b/cloudinit/distros/bsd_utils.py @@ -0,0 +1,50 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import shlex + +from cloudinit import util + +# On NetBSD, /etc/rc.conf comes with a if block: +# if [ -r /etc/defaults/rc.conf ]; then +# as a consequence, the file is not a regular key/value list +# anymore and we cannot use cloudinit.distros.parsers.sys_conf +# The module comes with a more naive parser, but is able to +# preserve these if blocks. + + +def _unquote(value): + if value[0] == value[-1] and value[0] in ['"', "'"]: + return value[1:-1] + return value + + +def get_rc_config_value(key, fn='/etc/rc.conf'): + key_prefix = '{}='.format(key) + for line in util.load_file(fn).splitlines(): + if line.startswith(key_prefix): + value = line.replace(key_prefix, '') + return _unquote(value) + + +def set_rc_config_value(key, value, fn='/etc/rc.conf'): + lines = [] + done = False + value = shlex.quote(value) + original_content = util.load_file(fn) + for line in original_content.splitlines(): + if '=' in line: + k, v = line.split('=', 1) + if k == key: + v = value + done = True + lines.append('='.join([k, v])) + else: + lines.append(line) + if not done: + lines.append('='.join([key, value])) + new_content = '\n'.join(lines) + '\n' + if new_content != original_content: + util.write_file(fn, new_content) + + +# vi: ts=4 expandtab diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 026d1142..a775ae51 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -8,34 +8,22 @@ import os import re from io import StringIO -from cloudinit import distros -from cloudinit import helpers +import cloudinit.distros.bsd from cloudinit import log as logging -from cloudinit import net -from cloudinit import ssh_util from cloudinit import util -from cloudinit.distros import rhel_util from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) -class Distro(distros.Distro): +class Distro(cloudinit.distros.bsd.BSD): usr_lib_exec = '/usr/local/lib' - rc_conf_fn = "/etc/rc.conf" login_conf_fn = '/etc/login.conf' login_conf_fn_bak = '/etc/login.conf.orig' ci_sudoers_fn = '/usr/local/etc/sudoers.d/90-cloud-init-users' - hostname_conf_fn = '/etc/rc.conf' - - def __init__(self, name, cfg, paths): - distros.Distro.__init__(self, name, cfg, paths) - # This will be used to restrict certain - # calls from repeatly happening (when they - # should only happen say once per instance...) - self._runner = helpers.Runners(paths) - self.osfamily = 'freebsd' - cfg['ssh_svcname'] = 'sshd' + group_add_cmd_prefix = ['pw', 'group', 'add'] + pkg_cmd_install_prefix = ["pkg", "install"] + pkg_cmd_remove_prefix = ["pkg", "remove"] def _select_hostname(self, hostname, fqdn): # Should be FQDN if available. See rc.conf(5) in FreeBSD @@ -43,45 +31,8 @@ class Distro(distros.Distro): return fqdn return hostname - def _read_system_hostname(self): - sys_hostname = self._read_hostname(self.hostname_conf_fn) - return (self.hostname_conf_fn, sys_hostname) - - def _read_hostname(self, filename, default=None): - (_exists, contents) = rhel_util.read_sysconfig_file(filename) - if contents.get('hostname'): - return contents['hostname'] - else: - return default - - def _write_hostname(self, hostname, filename): - rhel_util.update_sysconfig_file(filename, {'hostname': hostname}) - - def create_group(self, name, members): - group_add_cmd = ['pw', 'group', 'add', name] - if util.is_group(name): - LOG.warning("Skipping creation of existing group '%s'", name) - else: - try: - util.subp(group_add_cmd) - LOG.info("Created new group %s", name) - except Exception: - util.logexc(LOG, "Failed to create group %s", name) - raise - if not members: - members = [] - - for member in members: - if not util.is_user(member): - LOG.warning("Unable to add group member '%s' to group '%s'" - "; user does not exist.", member, name) - continue - try: - util.subp(['pw', 'usermod', '-n', name, '-G', member]) - LOG.info("Added user '%s' to group '%s'", member, name) - except Exception: - util.logexc(LOG, "Failed to add user '%s' to group '%s'", - member, name) + def _get_add_member_to_group_cmd(self, member_name, group_name): + return ['pw', 'usermod', '-n', member_name, '-G', group_name] def add_user(self, name, **kwargs): if util.is_user(name): @@ -162,40 +113,8 @@ class Distro(distros.Distro): util.logexc(LOG, "Failed to lock user %s", name) raise - def create_user(self, name, **kwargs): - self.add_user(name, **kwargs) - - # Set password if plain-text password provided and non-empty - if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']: - self.set_passwd(name, kwargs['plain_text_passwd']) - - # Default locking down the account. 'lock_passwd' defaults to True. - # lock account unless lock_password is False. - if kwargs.get('lock_passwd', True): - self.lock_passwd(name) - - # Configure sudo access - if 'sudo' in kwargs and kwargs['sudo'] is not False: - self.write_sudo_rules(name, kwargs['sudo']) - - # Import SSH keys - if 'ssh_authorized_keys' in kwargs: - keys = set(kwargs['ssh_authorized_keys']) or [] - ssh_util.setup_user_keys(keys, name, options=None) - - def generate_fallback_config(self): - nconf = {'config': [], 'version': 1} - for mac, name in net.get_interfaces_by_mac().items(): - nconf['config'].append( - {'type': 'physical', 'name': name, - 'mac_address': mac, 'subnets': [{'type': 'dhcp'}]}) - return nconf - - def _write_network_config(self, netconfig): - return self._supported_write_network_config(netconfig) - def apply_locale(self, locale, out_fn=None): - # Adjust the locals value to the new value + # Adjust the locales value to the new value newconf = StringIO() for line in util.load_file(self.login_conf_fn).splitlines(): newconf.write(re.sub(r'^default:', @@ -225,39 +144,17 @@ class Distro(distros.Distro): # /etc/rc.conf a line with the following format: # ifconfig_OLDNAME_name=NEWNAME # FreeBSD network script will rename the interface automatically. - return - - def install_packages(self, pkglist): - self.update_package_sources() - self.package_command('install', pkgs=pkglist) - - def package_command(self, command, args=None, pkgs=None): - if pkgs is None: - pkgs = [] + pass + def _get_pkg_cmd_environ(self): + """Return environment vars used in *BSD package_command operations""" e = os.environ.copy() e['ASSUME_ALWAYS_YES'] = 'YES' - - cmd = ['pkg'] - if args and isinstance(args, str): - cmd.append(args) - elif args and isinstance(args, list): - cmd.extend(args) - - if command: - cmd.append(command) - - pkglist = util.expand_package_list('%s-%s', pkgs) - cmd.extend(pkglist) - - # Allow the output of this to flow outwards (ie not be captured) - util.subp(cmd, env=e, capture=False) - - def set_timezone(self, tz): - distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) + return e def update_package_sources(self): - self._runner.run("update-sources", self.package_command, - ["update"], freq=PER_INSTANCE) + self._runner.run( + "update-sources", self.package_command, + ["update"], freq=PER_INSTANCE) # vi: ts=4 expandtab diff --git a/cloudinit/distros/netbsd.py b/cloudinit/distros/netbsd.py new file mode 100644 index 00000000..353eb671 --- /dev/null +++ b/cloudinit/distros/netbsd.py @@ -0,0 +1,133 @@ +# Copyright (C) 2019-2020 Gonéri Le Bouder +# +# This file is part of cloud-init. See LICENSE file for license information. + +import crypt +import os +import platform +import six + +import cloudinit.distros.bsd +from cloudinit import log as logging +from cloudinit import util + +LOG = logging.getLogger(__name__) + + +class Distro(cloudinit.distros.bsd.BSD): + ci_sudoers_fn = '/usr/pkg/etc/sudoers.d/90-cloud-init-users' + + group_add_cmd_prefix = ["groupadd"] + pkg_cmd_install_prefix = ["pkg_add", "-U"] + pkg_cmd_remove_prefix = ['pkg_delete'] + + def _get_add_member_to_group_cmd(self, member_name, group_name): + return ['usermod', '-G', group_name, member_name] + + def add_user(self, name, **kwargs): + if util.is_user(name): + LOG.info("User %s already exists, skipping.", name) + return False + + adduser_cmd = ['useradd'] + log_adduser_cmd = ['useradd'] + + adduser_opts = { + "homedir": '-d', + "gecos": '-c', + "primary_group": '-g', + "groups": '-G', + "shell": '-s', + } + adduser_flags = { + "no_user_group": '--no-user-group', + "system": '--system', + "no_log_init": '--no-log-init', + } + + for key, val in kwargs.items(): + if (key in adduser_opts and val and + isinstance(val, six.string_types)): + adduser_cmd.extend([adduser_opts[key], val]) + + elif key in adduser_flags and val: + adduser_cmd.append(adduser_flags[key]) + log_adduser_cmd.append(adduser_flags[key]) + + if 'no_create_home' not in kwargs or 'system' not in kwargs: + adduser_cmd += ['-m'] + log_adduser_cmd += ['-m'] + + adduser_cmd += [name] + log_adduser_cmd += [name] + + # Run the command + LOG.info("Adding user %s", name) + try: + util.subp(adduser_cmd, logstring=log_adduser_cmd) + except Exception: + util.logexc(LOG, "Failed to create user %s", name) + raise + # Set the password if it is provided + # For security consideration, only hashed passwd is assumed + passwd_val = kwargs.get('passwd', None) + if passwd_val is not None: + self.set_passwd(name, passwd_val, hashed=True) + + def set_passwd(self, user, passwd, hashed=False): + if hashed: + hashed_pw = passwd + elif not hasattr(crypt, 'METHOD_BLOWFISH'): + # crypt.METHOD_BLOWFISH comes with Python 3.7 which is available + # on NetBSD 7 and 8. + LOG.error(( + 'Cannot set non-encrypted password for user %s. ' + 'Python >= 3.7 is required.'), user) + return + else: + method = crypt.METHOD_BLOWFISH # pylint: disable=E1101 + hashed_pw = crypt.crypt( + passwd, + crypt.mksalt(method)) + + try: + util.subp(['usermod', '-C', 'no', '-p', hashed_pw, user]) + except Exception: + util.logexc(LOG, "Failed to set password for %s", user) + raise + + def force_passwd_change(self, user): + try: + util.subp(['usermod', '-F', user]) + except Exception: + util.logexc(LOG, "Failed to set pw expiration for %s", user) + raise + + def lock_passwd(self, name): + try: + util.subp(['usermod', '-C', 'yes', name]) + except Exception: + util.logexc(LOG, "Failed to lock user %s", name) + raise + + def apply_locale(self, locale, out_fn=None): + LOG.debug('Cannot set the locale.') + + def apply_network_config_names(self, netconfig): + LOG.debug('NetBSD cannot rename network interface.') + + def _get_pkg_cmd_environ(self): + """Return environment vars used in *BSD package_command operations""" + os_release = platform.release() + os_arch = platform.machine() + e = os.environ.copy() + e['PKG_PATH'] = ( + 'http://cdn.netbsd.org/pub/pkgsrc/' + 'packages/NetBSD/%s/%s/All') % (os_arch, os_release) + return e + + def update_package_sources(self): + pass + + +# vi: ts=4 expandtab diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 1d5eb535..400d7870 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -334,10 +334,20 @@ def find_fallback_nic(blacklist_drivers=None): """Return the name of the 'fallback' network device.""" if util.is_FreeBSD(): return find_fallback_nic_on_freebsd(blacklist_drivers) + elif util.is_NetBSD(): + return find_fallback_nic_on_netbsd(blacklist_drivers) else: return find_fallback_nic_on_linux(blacklist_drivers) +def find_fallback_nic_on_netbsd(blacklist_drivers=None): + values = list(sorted( + get_interfaces_by_mac().values(), + key=natural_sort_key)) + if values: + return values[0] + + def find_fallback_nic_on_freebsd(blacklist_drivers=None): """Return the name of the 'fallback' network device on FreeBSD. @@ -799,6 +809,8 @@ def get_ib_interface_hwaddr(ifname, ethernet_format): def get_interfaces_by_mac(): if util.is_FreeBSD(): return get_interfaces_by_mac_on_freebsd() + elif util.is_NetBSD(): + return get_interfaces_by_mac_on_netbsd() else: return get_interfaces_by_mac_on_linux() @@ -830,6 +842,21 @@ def get_interfaces_by_mac_on_freebsd(): return results +def get_interfaces_by_mac_on_netbsd(): + ret = {} + re_field_match = ( + r"(?P\w+).*address:\s" + r"(?P([\da-f]{2}[:-]){5}([\da-f]{2})).*") + (out, _) = util.subp(['ifconfig', '-a']) + if_lines = re.sub(r'\n\s+', ' ', out).splitlines() + for line in if_lines: + m = re.match(re_field_match, line) + if m: + fields = m.groupdict() + ret[fields['mac']] = fields['ifname'] + return ret + + def get_interfaces_by_mac_on_linux(): """Build a dictionary of tuples {mac: name}. diff --git a/cloudinit/net/bsd.py b/cloudinit/net/bsd.py new file mode 100644 index 00000000..fb714d4c --- /dev/null +++ b/cloudinit/net/bsd.py @@ -0,0 +1,165 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import re + +from cloudinit import log as logging +from cloudinit import net +from cloudinit import util +from cloudinit.distros.parsers.resolv_conf import ResolvConf +from cloudinit.distros import bsd_utils + +from . import renderer + +LOG = logging.getLogger(__name__) + + +class BSDRenderer(renderer.Renderer): + resolv_conf_fn = 'etc/resolv.conf' + rc_conf_fn = 'etc/rc.conf' + + def get_rc_config_value(self, key): + fn = util.target_path(self.target, self.rc_conf_fn) + bsd_utils.get_rc_config_value(key, fn=fn) + + def set_rc_config_value(self, key, value): + fn = util.target_path(self.target, self.rc_conf_fn) + bsd_utils.set_rc_config_value(key, value, fn=fn) + + def __init__(self, config=None): + if not config: + config = {} + self.target = None + self.interface_configurations = {} + self._postcmds = config.get('postcmds', True) + + def _ifconfig_entries(self, settings, target=None): + ifname_by_mac = net.get_interfaces_by_mac() + for interface in settings.iter_interfaces(): + device_name = interface.get("name") + device_mac = interface.get("mac_address") + if device_name and re.match(r'^lo\d+$', device_name): + continue + if device_mac not in ifname_by_mac: + LOG.info('Cannot find any device with MAC %s', device_mac) + elif device_mac and device_name: + cur_name = ifname_by_mac[device_mac] + if cur_name != device_name: + LOG.info('netif service will rename interface %s to %s', + cur_name, device_name) + try: + self.rename_interface(cur_name, device_name) + except NotImplementedError: + LOG.error(( + 'Interface renaming is ' + 'not supported on this OS')) + device_name = cur_name + + else: + device_name = ifname_by_mac[device_mac] + + LOG.info('Configuring interface %s', device_name) + + self.interface_configurations[device_name] = 'DHCP' + + for subnet in interface.get("subnets", []): + if subnet.get('type') == 'static': + if not subnet.get('netmask'): + LOG.debug( + 'Skipping IP %s, because there is no netmask', + subnet.get('address')) + continue + LOG.debug('Configuring dev %s with %s / %s', device_name, + subnet.get('address'), subnet.get('netmask')) + + self.interface_configurations[device_name] = { + 'address': subnet.get('address'), + 'netmask': subnet.get('netmask'), + } + + def _route_entries(self, settings, target=None): + routes = list(settings.iter_routes()) + for interface in settings.iter_interfaces(): + subnets = interface.get("subnets", []) + for subnet in subnets: + if subnet.get('type') != 'static': + continue + gateway = subnet.get('gateway') + if gateway and len(gateway.split('.')) == 4: + routes.append({ + 'network': '0.0.0.0', + 'netmask': '0.0.0.0', + 'gateway': gateway}) + routes += subnet.get('routes', []) + for route in routes: + network = route.get('network') + if not network: + LOG.debug('Skipping a bad route entry') + continue + netmask = route.get('netmask') + gateway = route.get('gateway') + self.set_route(network, netmask, gateway) + + def _resolve_conf(self, settings, target=None): + nameservers = settings.dns_nameservers + searchdomains = settings.dns_searchdomains + for interface in settings.iter_interfaces(): + for subnet in interface.get("subnets", []): + if 'dns_nameservers' in subnet: + nameservers.extend(subnet['dns_nameservers']) + if 'dns_search' in subnet: + searchdomains.extend(subnet['dns_search']) + # Try to read the /etc/resolv.conf or just start from scratch if that + # fails. + try: + resolvconf = ResolvConf(util.load_file(util.target_path( + target, self.resolv_conf_fn))) + resolvconf.parse() + except IOError: + util.logexc(LOG, "Failed to parse %s, use new empty file", + util.target_path(target, self.resolv_conf_fn)) + resolvconf = ResolvConf('') + resolvconf.parse() + + # Add some nameservers + for server in nameservers: + try: + resolvconf.add_nameserver(server) + except ValueError: + util.logexc(LOG, "Failed to add nameserver %s", server) + + # And add any searchdomains. + for domain in searchdomains: + try: + resolvconf.add_search_domain(domain) + except ValueError: + util.logexc(LOG, "Failed to add search domain %s", domain) + util.write_file( + util.target_path(target, self.resolv_conf_fn), + str(resolvconf), 0o644) + + def render_network_state(self, network_state, templates=None, target=None): + self._ifconfig_entries(settings=network_state) + self._route_entries(settings=network_state) + self._resolve_conf(settings=network_state) + + self.write_config() + self.start_services(run=self._postcmds) + + def dhcp_interfaces(self): + ic = self.interface_configurations.items + return [k for k, v in ic() if v == 'DHCP'] + + def start_services(self, run=False): + raise NotImplementedError() + + def write_config(self, target=None): + raise NotImplementedError() + + def set_gateway(self, gateway): + raise NotImplementedError() + + def rename_interface(self, cur_name, device_name): + raise NotImplementedError() + + def set_route(self, network, netmask, gateway): + raise NotImplementedError() diff --git a/cloudinit/net/freebsd.py b/cloudinit/net/freebsd.py index d6f61da3..60f05bb2 100644 --- a/cloudinit/net/freebsd.py +++ b/cloudinit/net/freebsd.py @@ -1,156 +1,29 @@ # This file is part of cloud-init. See LICENSE file for license information. -import re - from cloudinit import log as logging -from cloudinit import net +import cloudinit.net.bsd from cloudinit import util -from cloudinit.distros import rhel_util -from cloudinit.distros.parsers.resolv_conf import ResolvConf - -from . import renderer LOG = logging.getLogger(__name__) -class Renderer(renderer.Renderer): - resolv_conf_fn = 'etc/resolv.conf' - rc_conf_fn = 'etc/rc.conf' +class Renderer(cloudinit.net.bsd.BSDRenderer): def __init__(self, config=None): - if not config: - config = {} - self.dhcp_interfaces = [] - self._postcmds = config.get('postcmds', True) - - def _update_rc_conf(self, settings, target=None): - fn = util.target_path(target, self.rc_conf_fn) - rhel_util.update_sysconfig_file(fn, settings) - - def _write_ifconfig_entries(self, settings, target=None): - ifname_by_mac = net.get_interfaces_by_mac() - for interface in settings.iter_interfaces(): - device_name = interface.get("name") - device_mac = interface.get("mac_address") - if device_name and re.match(r'^lo\d+$', device_name): - continue - if device_mac not in ifname_by_mac: - LOG.info('Cannot find any device with MAC %s', device_mac) - elif device_mac and device_name: - cur_name = ifname_by_mac[device_mac] - if cur_name != device_name: - LOG.info('netif service will rename interface %s to %s', - cur_name, device_name) - self._update_rc_conf( - {'ifconfig_%s_name' % cur_name: device_name}, - target=target) - else: - device_name = ifname_by_mac[device_mac] - - LOG.info('Configuring interface %s', device_name) - ifconfig = 'DHCP' # default - - for subnet in interface.get("subnets", []): - if ifconfig != 'DHCP': - LOG.info('The FreeBSD provider only set the first subnet.') - break - if subnet.get('type') == 'static': - if not subnet.get('netmask'): - LOG.debug( - 'Skipping IP %s, because there is no netmask', - subnet.get('address')) - continue - LOG.debug('Configuring dev %s with %s / %s', device_name, - subnet.get('address'), subnet.get('netmask')) - # Configure an ipv4 address. - ifconfig = ( - subnet.get('address') + ' netmask ' + - subnet.get('netmask')) - - if ifconfig == 'DHCP': - self.dhcp_interfaces.append(device_name) - self._update_rc_conf( - {'ifconfig_' + device_name: ifconfig}, - target=target) - - def _write_route_entries(self, settings, target=None): - routes = list(settings.iter_routes()) - for interface in settings.iter_interfaces(): - subnets = interface.get("subnets", []) - for subnet in subnets: - if subnet.get('type') != 'static': - continue - gateway = subnet.get('gateway') - if gateway and len(gateway.split('.')) == 4: - routes.append({ - 'network': '0.0.0.0', - 'netmask': '0.0.0.0', - 'gateway': gateway}) - routes += subnet.get('routes', []) - route_cpt = 0 - for route in routes: - network = route.get('network') - if not network: - LOG.debug('Skipping a bad route entry') - continue - netmask = route.get('netmask') - gateway = route.get('gateway') - route_cmd = "-route %s/%s %s" % (network, netmask, gateway) - if network == '0.0.0.0': - self._update_rc_conf( - {'defaultrouter': gateway}, target=target) + self._route_cpt = 0 + super(Renderer, self).__init__() + + def rename_interface(self, cur_name, device_name): + self.set_rc_config_value('ifconfig_%s_name' % cur_name, device_name) + + def write_config(self): + for device_name, v in self.interface_configurations.items(): + if isinstance(v, dict): + self.set_rc_config_value( + 'ifconfig_' + device_name, + v.get('address') + ' netmask ' + v.get('netmask')) else: - self._update_rc_conf( - {'route_net%d' % route_cpt: route_cmd}, target=target) - route_cpt += 1 - - def _write_resolve_conf(self, settings, target=None): - nameservers = settings.dns_nameservers - searchdomains = settings.dns_searchdomains - for interface in settings.iter_interfaces(): - for subnet in interface.get("subnets", []): - if 'dns_nameservers' in subnet: - nameservers.extend(subnet['dns_nameservers']) - if 'dns_search' in subnet: - searchdomains.extend(subnet['dns_search']) - # Try to read the /etc/resolv.conf or just start from scratch if that - # fails. - try: - resolvconf = ResolvConf(util.load_file(util.target_path( - target, self.resolv_conf_fn))) - resolvconf.parse() - except IOError: - util.logexc(LOG, "Failed to parse %s, use new empty file", - util.target_path(target, self.resolv_conf_fn)) - resolvconf = ResolvConf('') - resolvconf.parse() - - # Add some nameservers - for server in nameservers: - try: - resolvconf.add_nameserver(server) - except ValueError: - util.logexc(LOG, "Failed to add nameserver %s", server) - - # And add any searchdomains. - for domain in searchdomains: - try: - resolvconf.add_search_domain(domain) - except ValueError: - util.logexc(LOG, "Failed to add search domain %s", domain) - util.write_file( - util.target_path(target, self.resolv_conf_fn), - str(resolvconf), 0o644) - - def _write_network(self, settings, target=None): - self._write_ifconfig_entries(settings, target=target) - self._write_route_entries(settings, target=target) - self._write_resolve_conf(settings, target=target) - - self.start_services(run=self._postcmds) - - def render_network_state(self, network_state, templates=None, target=None): - self._write_network(network_state, target=target) + self.set_rc_config_value('ifconfig_' + device_name, 'DHCP') def start_services(self, run=False): if not run: @@ -165,11 +38,21 @@ class Renderer(renderer.Renderer): # - dhclient: it cannot stop the dhclient started by the netif service. # In both case, the situation is ok, and we can proceed. util.subp(['service', 'routing', 'restart'], capture=True, rcs=[0, 1]) - for dhcp_interface in self.dhcp_interfaces: + + for dhcp_interface in self.dhcp_interfaces(): util.subp(['service', 'dhclient', 'restart', dhcp_interface], rcs=[0, 1], capture=True) + def set_route(self, network, netmask, gateway): + if network == '0.0.0.0': + self.set_rc_config_value('defaultrouter', gateway) + else: + route_name = 'route_net%d' % self._route_cpt + route_cmd = "-route %s/%s %s" % (network, netmask, gateway) + self.set_rc_config_value(route_name, route_cmd) + self._route_cpt += 1 + def available(target=None): return util.is_FreeBSD() diff --git a/cloudinit/net/netbsd.py b/cloudinit/net/netbsd.py new file mode 100644 index 00000000..9cc8ef31 --- /dev/null +++ b/cloudinit/net/netbsd.py @@ -0,0 +1,42 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import log as logging +from cloudinit import util +import cloudinit.net.bsd + +LOG = logging.getLogger(__name__) + + +class Renderer(cloudinit.net.bsd.BSDRenderer): + + def __init__(self, config=None): + super(Renderer, self).__init__() + + def write_config(self): + if self.dhcp_interfaces(): + self.set_rc_config_value('dhcpcd', 'YES') + self.set_rc_config_value( + 'dhcpcd_flags', + ' '.join(self.dhcp_interfaces())) + for device_name, v in self.interface_configurations.items(): + if isinstance(v, dict): + self.set_rc_config_value( + 'ifconfig_' + device_name, + v.get('address') + ' netmask ' + v.get('netmask')) + + def start_services(self, run=False): + if not run: + LOG.debug("netbsd generate postcmd disabled") + return + + util.subp(['service', 'network', 'restart'], capture=True) + if self.dhcp_interfaces(): + util.subp(['service', 'dhcpcd', 'restart'], capture=True) + + def set_route(self, network, netmask, gateway): + if network == '0.0.0.0': + self.set_rc_config_value('defaultroute', gateway) + + +def available(target=None): + return util.is_NetBSD() diff --git a/cloudinit/net/renderers.py b/cloudinit/net/renderers.py index b98dbbe3..e4bcae9d 100644 --- a/cloudinit/net/renderers.py +++ b/cloudinit/net/renderers.py @@ -2,6 +2,7 @@ from . import eni from . import freebsd +from . import netbsd from . import netplan from . import RendererNotFoundError from . import sysconfig @@ -9,11 +10,12 @@ from . import sysconfig NAME_TO_RENDERER = { "eni": eni, "freebsd": freebsd, + "netbsd": netbsd, "netplan": netplan, "sysconfig": sysconfig, } -DEFAULT_PRIORITY = ["eni", "sysconfig", "netplan", "freebsd"] +DEFAULT_PRIORITY = ["eni", "sysconfig", "netplan", "freebsd", "netbsd"] def search(priority=None, target=None, first=False): diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index 6ba21f4d..1001f149 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -91,6 +91,53 @@ def _netdev_info_iproute(ipaddr_out): return devs +def _netdev_info_ifconfig_netbsd(ifconfig_data): + # fields that need to be returned in devs for each dev + devs = {} + for line in ifconfig_data.splitlines(): + if len(line) == 0: + continue + if line[0] not in ("\t", " "): + curdev = line.split()[0] + # current ifconfig pops a ':' on the end of the device + if curdev.endswith(':'): + curdev = curdev[:-1] + if curdev not in devs: + devs[curdev] = deepcopy(DEFAULT_NETDEV_INFO) + toks = line.lower().strip().split() + if len(toks) > 1: + if re.search(r"flags=[x\d]+", toks[1]): + devs[curdev]['up'] = True + + for i in range(len(toks)): + if toks[i] == "inet": # Create new ipv4 addr entry + network, net_bits = toks[i + 1].split('/') + devs[curdev]['ipv4'].append( + {'ip': network, 'mask': net_prefix_to_ipv4_mask(net_bits)}) + elif toks[i] == "broadcast": + devs[curdev]['ipv4'][-1]['bcast'] = toks[i + 1] + elif toks[i] == "address:": + devs[curdev]['hwaddr'] = toks[i + 1] + elif toks[i] == "inet6": + if toks[i + 1] == "addr:": + devs[curdev]['ipv6'].append({'ip': toks[i + 2]}) + else: + devs[curdev]['ipv6'].append({'ip': toks[i + 1]}) + elif toks[i] == "prefixlen": # Add prefix to current ipv6 value + addr6 = devs[curdev]['ipv6'][-1]['ip'] + "/" + toks[i + 1] + devs[curdev]['ipv6'][-1]['ip'] = addr6 + elif toks[i].startswith("scope:"): + devs[curdev]['ipv6'][-1]['scope6'] = toks[i].lstrip("scope:") + elif toks[i] == "scopeid": + res = re.match(r'.*<(\S+)>', toks[i + 1]) + if res: + devs[curdev]['ipv6'][-1]['scope6'] = res.group(1) + else: + devs[curdev]['ipv6'][-1]['scope6'] = toks[i + 1] + + return devs + + def _netdev_info_ifconfig(ifconfig_data): # fields that need to be returned in devs for each dev devs = {} @@ -149,7 +196,10 @@ def _netdev_info_ifconfig(ifconfig_data): def netdev_info(empty=""): devs = {} - if util.which('ip'): + if util.is_NetBSD(): + (ifcfg_out, _err) = util.subp(["ifconfig", "-a"], rcs=[0, 1]) + devs = _netdev_info_ifconfig_netbsd(ifcfg_out) + elif util.which('ip'): # Try iproute first of all (ipaddr_out, _err) = util.subp(["ip", "addr", "show"]) devs = _netdev_info_iproute(ipaddr_out) diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index f77923c2..dee8cde4 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -71,11 +71,11 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): if not found: dslist = self.sys_cfg.get('datasource_list') for dev in find_candidate_devs(dslist=dslist): - try: - if util.is_FreeBSD() and dev.startswith("/dev/cd"): + mtype = None + if (util.is_FreeBSD() or util.is_NetBSD()): + if dev.startswith("/dev/cd"): mtype = "cd9660" - else: - mtype = None + try: results = util.mount_cb(dev, read_config_drive, mtype=mtype) found = dev diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index ee748b41..2a44128d 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -40,6 +40,14 @@ class DataSourceNoCloud(sources.DataSource): devlist = [ p for p in ['/dev/msdosfs/' + label, '/dev/iso9660/' + label] if os.path.exists(p)] + elif util.is_NetBSD(): + out, _err = util.subp(['sysctl', '-n', 'hw.disknames'], rcs=[0]) + devlist = [] + for dev in out.split(): + mscdlabel_out, _ = util.subp(['mscdlabel', dev], rcs=[0, 1]) + if ('label "%s"' % label) in mscdlabel_out: + devlist.append('/dev/' + dev) + devlist.append('/dev/' + dev + 'a') # NetBSD 7 else: # Query optical drive to get it in blkid cache for 2.6 kernels util.find_devs_with(path="/dev/sr0") diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 70f6bad7..8adb9e75 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -363,6 +363,7 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): root = self.tmp_dir() self.patchUtils(root) self.patchOS(root) + self.patchOpen(root) return root @contextmanager diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index 11f37000..815da0fd 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -419,12 +419,6 @@ class TestGetLinuxDistro(CiTestCase): if path == '/etc/redhat-release': return 1 - @classmethod - def freebsd_version_exists(self, path): - """Side effect function """ - if path == '/bin/freebsd-version': - return 1 - @mock.patch('cloudinit.util.load_file') def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists): """Verify we get the correct name if the os-release file has @@ -443,11 +437,16 @@ class TestGetLinuxDistro(CiTestCase): dist = util.get_linux_distro() self.assertEqual(('ubuntu', '16.04', 'xenial'), dist) - @mock.patch('cloudinit.util.subp') - def test_get_linux_freebsd(self, m_subp, m_path_exists): + @mock.patch('platform.system') + @mock.patch('platform.release') + @mock.patch('cloudinit.util._parse_redhat_release') + def test_get_linux_freebsd(self, m_path_exists, m_platform_release, + m_platform_system, m_parse_redhat_release): """Verify we get the correct name and release name on FreeBSD.""" - m_path_exists.side_effect = TestGetLinuxDistro.freebsd_version_exists - m_subp.return_value = ("12.0-RELEASE-p10\n", '') + m_path_exists.return_value = False + m_platform_release.return_value = '12.0-RELEASE-p10' + m_platform_system.return_value = 'FreeBSD' + m_parse_redhat_release.return_value = {} dist = util.get_linux_distro() self.assertEqual(('freebsd', '12.0-RELEASE-p10', ''), dist) @@ -538,27 +537,36 @@ class TestGetLinuxDistro(CiTestCase): self.assertEqual( ('opensuse-tumbleweed', '20180920', platform.machine()), dist) + @mock.patch('platform.system') @mock.patch('platform.dist', create=True) - def test_get_linux_distro_no_data(self, m_platform_dist, m_path_exists): + def test_get_linux_distro_no_data(self, m_platform_dist, + m_platform_system, m_path_exists): """Verify we get no information if os-release does not exist""" m_platform_dist.return_value = ('', '', '') + m_platform_system.return_value = "Linux" m_path_exists.return_value = 0 dist = util.get_linux_distro() self.assertEqual(('', '', ''), dist) + @mock.patch('platform.system') @mock.patch('platform.dist', create=True) - def test_get_linux_distro_no_impl(self, m_platform_dist, m_path_exists): + def test_get_linux_distro_no_impl(self, m_platform_dist, + m_platform_system, m_path_exists): """Verify we get an empty tuple when no information exists and Exceptions are not propagated""" m_platform_dist.side_effect = Exception() + m_platform_system.return_value = "Linux" m_path_exists.return_value = 0 dist = util.get_linux_distro() self.assertEqual(('', '', ''), dist) + @mock.patch('platform.system') @mock.patch('platform.dist', create=True) - def test_get_linux_distro_plat_data(self, m_platform_dist, m_path_exists): + def test_get_linux_distro_plat_data(self, m_platform_dist, + m_platform_system, m_path_exists): """Verify we get the correct platform information""" m_platform_dist.return_value = ('foo', '1.1', 'aarch64') + m_platform_system.return_value = "Linux" m_path_exists.return_value = 0 dist = util.get_linux_distro() self.assertEqual(('foo', '1.1', 'aarch64'), dist) diff --git a/cloudinit/util.py b/cloudinit/util.py index 132f6051..718c6959 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -552,6 +552,11 @@ def is_FreeBSD(): return system_info()['variant'] == "freebsd" +@lru_cache() +def is_NetBSD(): + return system_info()['variant'] == "netbsd" + + def get_cfg_option_bool(yobj, key, default=False): if key not in yobj: return default @@ -625,10 +630,9 @@ def get_linux_distro(): flavor = match.groupdict()['codename'] if distro_name == 'rhel': distro_name = 'redhat' - elif os.path.exists('/bin/freebsd-version'): - distro_name = 'freebsd' - distro_version, _ = subp(['uname', '-r']) - distro_version = distro_version.strip() + elif 'BSD' in platform.system(): + distro_name = platform.system().lower() + distro_version = platform.release() else: dist = ('', '', '') try: @@ -675,7 +679,7 @@ def system_info(): var = 'suse' else: var = 'linux' - elif system in ('windows', 'darwin', "freebsd"): + elif system in ('windows', 'darwin', "freebsd", "netbsd"): var = system info['variant'] = var @@ -1254,6 +1258,21 @@ def close_stdin(): os.dup2(fp.fileno(), sys.stdin.fileno()) +def find_devs_with_netbsd(criteria=None, oformat='device', + tag=None, no_cache=False, path=None): + if not path: + path = "/dev/cd0" + cmd = ["mscdlabel", path] + out, _ = subp(cmd, capture=True, decode="replace", rcs=[0, 1]) + result = out.split() + if result and len(result) > 2: + if criteria == "TYPE=iso9660" and "ISO" in result: + return [path] + if criteria == "LABEL=CONFIG-2" and '"config-2"' in result: + return [path] + return [] + + def find_devs_with(criteria=None, oformat='device', tag=None, no_cache=False, path=None): """ @@ -1263,6 +1282,10 @@ def find_devs_with(criteria=None, oformat='device', LABEL=