From 6ee01078ae74338e0a11c1d4b13a667c01e9b26f Mon Sep 17 00:00:00 2001 From: Dimitri John Ledkov <19779+xnox@users.noreply.github.com> Date: Wed, 25 Nov 2020 17:27:07 +0000 Subject: cc_apt_configure: add riscv64 as a ports arch (#687) --- cloudinit/config/cc_apt_configure.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 73d8719f..bb8a1278 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -389,7 +389,7 @@ PRIMARY_ARCH_MIRRORS = {"PRIMARY": "http://archive.ubuntu.com/ubuntu/", PORTS_MIRRORS = {"PRIMARY": "http://ports.ubuntu.com/ubuntu-ports", "SECURITY": "http://ports.ubuntu.com/ubuntu-ports"} PRIMARY_ARCHES = ['amd64', 'i386'] -PORTS_ARCHES = ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el'] +PORTS_ARCHES = ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el', 'riscv64'] def get_default_mirrors(arch=None, target=None): -- cgit v1.2.3 From f550c8765ca03d313e54edf35209b877ef3381ff Mon Sep 17 00:00:00 2001 From: Eduardo Otubo Date: Tue, 1 Dec 2020 15:51:47 +0100 Subject: Adding BOOTPROTO = dhcp to render sysconfig dhcp6 stateful on RHEL (#685) BOOTPROTO needs to be set to 'dhcp' on RHEL so NetworkManager can properly acquire ipv6 address. rhbz: #1859695 Signed-off-by: Eduardo Otubo Co-authored-by: Daniel Watkins Co-authored-by: Scott Moser --- cloudinit/net/sysconfig.py | 6 ++++++ tests/unittests/test_net.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index a930e612..ba85584e 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -396,6 +396,12 @@ class Renderer(renderer.Renderer): # Only IPv6 is DHCP, IPv4 may be static iface_cfg['BOOTPROTO'] = 'dhcp6' iface_cfg['DHCLIENT6_MODE'] = 'managed' + # only if rhel AND dhcpv6 stateful + elif (flavor == 'rhel' and + subnet_type == 'ipv6_dhcpv6-stateful'): + iface_cfg['BOOTPROTO'] = 'dhcp' + iface_cfg['DHCPV6C'] = True + iface_cfg['IPV6INIT'] = True else: iface_cfg['IPV6INIT'] = True # Configure network settings using DHCPv6 diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 70453683..8b34e0c9 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1365,7 +1365,7 @@ NETWORK_CONFIGS = { }, 'expected_sysconfig_rhel': { 'ifcfg-iface0': textwrap.dedent("""\ - BOOTPROTO=none + BOOTPROTO=dhcp DEVICE=iface0 DHCPV6C=yes IPV6INIT=yes -- cgit v1.2.3 From 212b291dc0f3108f562bbbadfa89ae355815c01d Mon Sep 17 00:00:00 2001 From: Eduardo Otubo Date: Thu, 10 Dec 2020 19:10:43 +0100 Subject: Drop unnecessary shebang from cmd/main.py (#722) Fedora build system's rpmlint is complaining that there is a file with a shebang but no executable flag set. No need to have shebang on this file, so drop it. Signed-off-by: Eduardo Otubo Co-authored-by: Scott Moser --- cloudinit/cmd/main.py | 1 - 1 file changed, 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index a5446da7..baf1381f 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -1,4 +1,3 @@ -#!/usr/bin/python # # Copyright (C) 2012 Canonical Ltd. # Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -- cgit v1.2.3 From 913818553a8db236e20751c81dd0e2a27124617c Mon Sep 17 00:00:00 2001 From: Thomas Stringer Date: Wed, 16 Dec 2020 12:35:43 -0800 Subject: Azure: only generate config for NICs with addresses (#709) Prevent network interfaces without IP addresses from being added to the generated network configuration. --- cloudinit/sources/DataSourceAzure.py | 8 +++++- tests/unittests/test_datasource/test_azure.py | 40 +++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 04ff2131..bedf8ea0 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -1969,6 +1969,7 @@ def _generate_network_config_from_imds_metadata(imds_metadata) -> dict: netconfig = {'version': 2, 'ethernets': {}} network_metadata = imds_metadata['network'] for idx, intf in enumerate(network_metadata['interface']): + has_ip_address = False # First IPv4 and/or IPv6 address will be obtained via DHCP. # Any additional IPs of each type will be set as static # addresses. @@ -1978,6 +1979,11 @@ def _generate_network_config_from_imds_metadata(imds_metadata) -> dict: 'dhcp6': False} for addr_type in ('ipv4', 'ipv6'): addresses = intf.get(addr_type, {}).get('ipAddress', []) + # If there are no available IP addresses, then we don't + # want to add this interface to the generated config. + if not addresses: + continue + has_ip_address = True if addr_type == 'ipv4': default_prefix = '24' else: @@ -1998,7 +2004,7 @@ def _generate_network_config_from_imds_metadata(imds_metadata) -> dict: dev_config['addresses'].append( '{ip}/{prefix}'.format( ip=privateIp, prefix=netPrefix)) - if dev_config: + if dev_config and has_ip_address: mac = ':'.join(re.findall(r'..', intf['macAddress'])) dev_config.update({ 'match': {'macaddress': mac.lower()}, diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index e363c1f9..d64b538e 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -159,6 +159,22 @@ SECONDARY_INTERFACE = { } } +SECONDARY_INTERFACE_NO_IP = { + "macAddress": "220D3A047598", + "ipv6": { + "ipAddress": [] + }, + "ipv4": { + "subnet": [ + { + "prefix": "24", + "address": "10.0.1.0" + } + ], + "ipAddress": [] + } +} + IMDS_NETWORK_METADATA = { "interface": [ { @@ -1139,6 +1155,30 @@ scbus-1 on xpt0 bus 0 dsrc.get_data() self.assertEqual(expected_network_config, dsrc.network_config) + @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', + return_value=None) + def test_network_config_set_from_imds_for_secondary_nic_no_ip( + self, m_driver): + """If an IP address is empty then there should no config for it.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + expected_network_config = { + 'ethernets': { + 'eth0': {'set-name': 'eth0', + 'match': {'macaddress': '00:0d:3a:04:75:98'}, + 'dhcp6': False, + 'dhcp4': True, + 'dhcp4-overrides': {'route-metric': 100}}}, + 'version': 2} + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data['network']['interface'].append(SECONDARY_INTERFACE_NO_IP) + self.m_get_metadata_from_imds.return_value = imds_data + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual(expected_network_config, dsrc.network_config) + def test_availability_zone_set_from_imds(self): """Datasource.availability returns IMDS platformFaultDomain.""" sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} -- cgit v1.2.3 From a5484d02973e5710442c11e1dc6b1153695c9a59 Mon Sep 17 00:00:00 2001 From: cawamata <1749824+cawamata@users.noreply.github.com> Date: Fri, 18 Dec 2020 00:59:48 +0900 Subject: cc_ca_certs: add RHEL support (#633) This refactors cc_ca_certs to support non-ca-certificates distros, and adds RHEL support. --- cloudinit/config/cc_ca_certs.py | 123 ++++++--- .../test_handler/test_handler_ca_certs.py | 292 +++++++++++++-------- tools/.github-cla-signers | 1 + 3 files changed, 267 insertions(+), 149 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py index 3c453d91..bd7bead9 100644 --- a/cloudinit/config/cc_ca_certs.py +++ b/cloudinit/config/cc_ca_certs.py @@ -25,7 +25,7 @@ can be removed from the system with the configuration option **Module frequency:** per instance -**Supported distros:** alpine, debian, ubuntu +**Supported distros:** alpine, debian, ubuntu, rhel **Config keys**:: @@ -44,60 +44,104 @@ import os from cloudinit import subp from cloudinit import util -CA_CERT_PATH = "/usr/share/ca-certificates/" -CA_CERT_FILENAME = "cloud-init-ca-certs.crt" -CA_CERT_CONFIG = "/etc/ca-certificates.conf" -CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/" -CA_CERT_FULL_PATH = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME) +DEFAULT_CONFIG = { + 'ca_cert_path': '/usr/share/ca-certificates/', + 'ca_cert_filename': 'cloud-init-ca-certs.crt', + 'ca_cert_config': '/etc/ca-certificates.conf', + 'ca_cert_system_path': '/etc/ssl/certs/', + 'ca_cert_update_cmd': ['update-ca-certificates'] +} +DISTRO_OVERRIDES = { + 'rhel': { + 'ca_cert_path': '/usr/share/pki/ca-trust-source/', + 'ca_cert_filename': 'anchors/cloud-init-ca-certs.crt', + 'ca_cert_config': None, + 'ca_cert_system_path': '/etc/pki/ca-trust/', + 'ca_cert_update_cmd': ['update-ca-trust'] + } +} -distros = ['alpine', 'debian', 'ubuntu'] +distros = ['alpine', 'debian', 'ubuntu', 'rhel'] -def update_ca_certs(): + +def _distro_ca_certs_configs(distro_name): + """Return a distro-specific ca_certs config dictionary + + @param distro_name: String providing the distro class name. + @returns: Dict of distro configurations for ca-cert. + """ + cfg = DISTRO_OVERRIDES.get(distro_name, DEFAULT_CONFIG) + cfg['ca_cert_full_path'] = os.path.join(cfg['ca_cert_path'], + cfg['ca_cert_filename']) + return cfg + + +def update_ca_certs(distro_cfg): """ Updates the CA certificate cache on the current machine. + + @param distro_cfg: A hash providing _distro_ca_certs_configs function. """ - subp.subp(["update-ca-certificates"], capture=False) + subp.subp(distro_cfg['ca_cert_update_cmd'], capture=False) -def add_ca_certs(certs): +def add_ca_certs(distro_cfg, certs): """ Adds certificates to the system. To actually apply the new certificates you must also call L{update_ca_certs}. + @param distro_cfg: A hash providing _distro_ca_certs_configs function. @param certs: A list of certificate strings. """ - if certs: - # First ensure they are strings... - cert_file_contents = "\n".join([str(c) for c in certs]) - util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0o644) - - if os.stat(CA_CERT_CONFIG).st_size == 0: - # If the CA_CERT_CONFIG file is empty (i.e. all existing - # CA certs have been deleted) then simply output a single - # line with the cloud-init cert filename. - out = "%s\n" % CA_CERT_FILENAME - else: - # Append cert filename to CA_CERT_CONFIG file. - # We have to strip the content because blank lines in the file - # causes subsequent entries to be ignored. (LP: #1077020) - orig = util.load_file(CA_CERT_CONFIG) - cur_cont = '\n'.join([line for line in orig.splitlines() - if line != CA_CERT_FILENAME]) - out = "%s\n%s\n" % (cur_cont.rstrip(), CA_CERT_FILENAME) - util.write_file(CA_CERT_CONFIG, out, omode="wb") - - -def remove_default_ca_certs(distro_name): + if not certs: + return + # First ensure they are strings... + cert_file_contents = "\n".join([str(c) for c in certs]) + util.write_file(distro_cfg['ca_cert_full_path'], + cert_file_contents, + mode=0o644) + update_cert_config(distro_cfg) + + +def update_cert_config(distro_cfg): + """ + Update Certificate config file to add the file path managed cloud-init + + @param distro_cfg: A hash providing _distro_ca_certs_configs function. + """ + if distro_cfg['ca_cert_config'] is None: + return + if os.stat(distro_cfg['ca_cert_config']).st_size == 0: + # If the CA_CERT_CONFIG file is empty (i.e. all existing + # CA certs have been deleted) then simply output a single + # line with the cloud-init cert filename. + out = "%s\n" % distro_cfg['ca_cert_filename'] + else: + # Append cert filename to CA_CERT_CONFIG file. + # We have to strip the content because blank lines in the file + # causes subsequent entries to be ignored. (LP: #1077020) + orig = util.load_file(distro_cfg['ca_cert_config']) + cr_cont = '\n'.join([line for line in orig.splitlines() + if line != distro_cfg['ca_cert_filename']]) + out = "%s\n%s\n" % (cr_cont.rstrip(), + distro_cfg['ca_cert_filename']) + util.write_file(distro_cfg['ca_cert_config'], out, omode="wb") + + +def remove_default_ca_certs(distro_name, distro_cfg): """ Removes all default trusted CA certificates from the system. To actually apply the change you must also call L{update_ca_certs}. + + @param distro_name: String providing the distro class name. + @param distro_cfg: A hash providing _distro_ca_certs_configs function. """ - util.delete_dir_contents(CA_CERT_PATH) - util.delete_dir_contents(CA_CERT_SYSTEM_PATH) - util.write_file(CA_CERT_CONFIG, "", mode=0o644) + util.delete_dir_contents(distro_cfg['ca_cert_path']) + util.delete_dir_contents(distro_cfg['ca_cert_system_path']) + util.write_file(distro_cfg['ca_cert_config'], "", mode=0o644) - if distro_name != 'alpine': + if distro_name in ['debian', 'ubuntu']: debconf_sel = ( "ca-certificates ca-certificates/trust_new_crts " + "select no") subp.subp(('debconf-set-selections', '-'), debconf_sel) @@ -120,22 +164,23 @@ def handle(name, cfg, cloud, log, _args): return ca_cert_cfg = cfg['ca-certs'] + distro_cfg = _distro_ca_certs_configs(cloud.distro.name) # If there is a remove-defaults option set to true, remove the system # default trusted CA certs first. if ca_cert_cfg.get("remove-defaults", False): log.debug("Removing default certificates") - remove_default_ca_certs(cloud.distro.name) + remove_default_ca_certs(cloud.distro.name, distro_cfg) # If we are given any new trusted CA certs to add, add them. if "trusted" in ca_cert_cfg: trusted_certs = util.get_cfg_option_list(ca_cert_cfg, "trusted") if trusted_certs: log.debug("Adding %d certificates" % len(trusted_certs)) - add_ca_certs(trusted_certs) + add_ca_certs(distro_cfg, trusted_certs) # Update the system with the new cert configuration. log.debug("Updating certificates") - update_ca_certs() + update_ca_certs(distro_cfg) # vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py index a16430d5..6e3831ed 100644 --- a/tests/unittests/test_handler/test_handler_ca_certs.py +++ b/tests/unittests/test_handler/test_handler_ca_certs.py @@ -47,12 +47,20 @@ class TestConfig(TestCase): def setUp(self): super(TestConfig, self).setUp() self.name = "ca-certs" - distro = self._fetch_distro('ubuntu') self.paths = None - self.cloud = cloud.Cloud(None, self.paths, None, distro, None) self.log = logging.getLogger("TestNoConfig") self.args = [] + def _fetch_distro(self, kind): + cls = distros.fetch(kind) + paths = helpers.Paths({}) + return cls(kind, {}, paths) + + def _get_cloud(self, kind): + distro = self._fetch_distro(kind) + return cloud.Cloud(None, self.paths, None, distro, None) + + def _mock_init(self): self.mocks = ExitStack() self.addCleanup(self.mocks.close) @@ -64,11 +72,6 @@ class TestConfig(TestCase): self.mock_remove = self.mocks.enter_context( mock.patch.object(cc_ca_certs, 'remove_default_ca_certs')) - def _fetch_distro(self, kind): - cls = distros.fetch(kind) - paths = helpers.Paths({}) - return cls(kind, {}, paths) - def test_no_trusted_list(self): """ Test that no certificates are written if the 'trusted' key is not @@ -76,71 +79,95 @@ class TestConfig(TestCase): """ config = {"ca-certs": {}} - cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = self._get_cloud(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - self.assertEqual(self.mock_add.call_count, 0) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 0) + self.assertEqual(self.mock_add.call_count, 0) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 0) def test_empty_trusted_list(self): """Test that no certificate are written if 'trusted' list is empty.""" config = {"ca-certs": {"trusted": []}} - cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = self._get_cloud(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - self.assertEqual(self.mock_add.call_count, 0) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 0) + self.assertEqual(self.mock_add.call_count, 0) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 0) def test_single_trusted(self): """Test that a single cert gets passed to add_ca_certs.""" config = {"ca-certs": {"trusted": ["CERT1"]}} - cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = self._get_cloud(distro_name) + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - self.mock_add.assert_called_once_with(['CERT1']) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 0) + self.mock_add.assert_called_once_with(conf, ['CERT1']) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 0) def test_multiple_trusted(self): """Test that multiple certs get passed to add_ca_certs.""" config = {"ca-certs": {"trusted": ["CERT1", "CERT2"]}} - cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = self._get_cloud(distro_name) + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - self.mock_add.assert_called_once_with(['CERT1', 'CERT2']) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 0) + self.mock_add.assert_called_once_with(conf, ['CERT1', 'CERT2']) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 0) def test_remove_default_ca_certs(self): """Test remove_defaults works as expected.""" config = {"ca-certs": {"remove-defaults": True}} - cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = self._get_cloud(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - self.assertEqual(self.mock_add.call_count, 0) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 1) + self.assertEqual(self.mock_add.call_count, 0) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 1) def test_no_remove_defaults_if_false(self): """Test remove_defaults is not called when config value is False.""" config = {"ca-certs": {"remove-defaults": False}} - cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = self._get_cloud(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - self.assertEqual(self.mock_add.call_count, 0) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 0) + self.assertEqual(self.mock_add.call_count, 0) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 0) def test_correct_order_for_remove_then_add(self): """Test remove_defaults is not called when config value is False.""" config = {"ca-certs": {"remove-defaults": True, "trusted": ["CERT1"]}} - cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args) + for distro_name in cc_ca_certs.distros: + self._mock_init() + cloud = self._get_cloud(distro_name) + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - self.mock_add.assert_called_once_with(['CERT1']) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 1) + self.mock_add.assert_called_once_with(conf, ['CERT1']) + self.assertEqual(self.mock_update.call_count, 1) + self.assertEqual(self.mock_remove.call_count, 1) class TestAddCaCerts(TestCase): @@ -154,11 +181,18 @@ class TestAddCaCerts(TestCase): }) self.add_patch("cloudinit.config.cc_ca_certs.os.stat", "m_stat") + def _fetch_distro(self, kind): + cls = distros.fetch(kind) + paths = helpers.Paths({}) + return cls(kind, {}, paths) + def test_no_certs_in_list(self): """Test that no certificate are written if not provided.""" - with mock.patch.object(util, 'write_file') as mockobj: - cc_ca_certs.add_ca_certs([]) - self.assertEqual(mockobj.call_count, 0) + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + with mock.patch.object(util, 'write_file') as mockobj: + cc_ca_certs.add_ca_certs(conf, []) + self.assertEqual(mockobj.call_count, 0) def test_single_cert_trailing_cr(self): """Test adding a single certificate to the trusted CAs @@ -168,20 +202,28 @@ class TestAddCaCerts(TestCase): ca_certs_content = "line1\nline2\ncloud-init-ca-certs.crt\nline3\n" expected = "line1\nline2\nline3\ncloud-init-ca-certs.crt\n" - with ExitStack() as mocks: - mock_write = mocks.enter_context( - mock.patch.object(util, 'write_file')) - mock_load = mocks.enter_context( - mock.patch.object(util, 'load_file', - return_value=ca_certs_content)) + self.m_stat.return_value.st_size = 1 - cc_ca_certs.add_ca_certs([cert]) + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - mock_write.assert_has_calls([ - mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt", - cert, mode=0o644), - mock.call("/etc/ca-certificates.conf", expected, omode="wb")]) - mock_load.assert_called_once_with("/etc/ca-certificates.conf") + with ExitStack() as mocks: + mock_write = mocks.enter_context( + mock.patch.object(util, 'write_file')) + mock_load = mocks.enter_context( + mock.patch.object(util, 'load_file', + return_value=ca_certs_content)) + + cc_ca_certs.add_ca_certs(conf, [cert]) + + mock_write.assert_has_calls([ + mock.call(conf['ca_cert_full_path'], + cert, mode=0o644)]) + if conf['ca_cert_config'] is not None: + mock_write.assert_has_calls([ + mock.call(conf['ca_cert_config'], + expected, omode="wb")]) + mock_load.assert_called_once_with(conf['ca_cert_config']) def test_single_cert_no_trailing_cr(self): """Test adding a single certificate to the trusted CAs @@ -190,24 +232,31 @@ class TestAddCaCerts(TestCase): ca_certs_content = "line1\nline2\nline3" - with ExitStack() as mocks: - mock_write = mocks.enter_context( - mock.patch.object(util, 'write_file')) - mock_load = mocks.enter_context( - mock.patch.object(util, 'load_file', - return_value=ca_certs_content)) + self.m_stat.return_value.st_size = 1 + + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - cc_ca_certs.add_ca_certs([cert]) + with ExitStack() as mocks: + mock_write = mocks.enter_context( + mock.patch.object(util, 'write_file')) + mock_load = mocks.enter_context( + mock.patch.object(util, 'load_file', + return_value=ca_certs_content)) - mock_write.assert_has_calls([ - mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt", - cert, mode=0o644), - mock.call("/etc/ca-certificates.conf", - "%s\n%s\n" % (ca_certs_content, - "cloud-init-ca-certs.crt"), - omode="wb")]) + cc_ca_certs.add_ca_certs(conf, [cert]) - mock_load.assert_called_once_with("/etc/ca-certificates.conf") + mock_write.assert_has_calls([ + mock.call(conf['ca_cert_full_path'], + cert, mode=0o644)]) + if conf['ca_cert_config'] is not None: + mock_write.assert_has_calls([ + mock.call(conf['ca_cert_config'], + "%s\n%s\n" % (ca_certs_content, + conf['ca_cert_filename']), + omode="wb")]) + + mock_load.assert_called_once_with(conf['ca_cert_config']) def test_single_cert_to_empty_existing_ca_file(self): """Test adding a single certificate to the trusted CAs @@ -216,15 +265,22 @@ class TestAddCaCerts(TestCase): expected = "cloud-init-ca-certs.crt\n" - with mock.patch.object(util, 'write_file', autospec=True) as m_write: - self.m_stat.return_value.st_size = 0 + self.m_stat.return_value.st_size = 0 + + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + with mock.patch.object(util, 'write_file', + autospec=True) as m_write: - cc_ca_certs.add_ca_certs([cert]) + cc_ca_certs.add_ca_certs(conf, [cert]) - m_write.assert_has_calls([ - mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt", - cert, mode=0o644), - mock.call("/etc/ca-certificates.conf", expected, omode="wb")]) + m_write.assert_has_calls([ + mock.call(conf['ca_cert_full_path'], + cert, mode=0o644)]) + if conf['ca_cert_config'] is not None: + m_write.assert_has_calls([ + mock.call(conf['ca_cert_config'], + expected, omode="wb")]) def test_multiple_certs(self): """Test adding multiple certificates to the trusted CAs.""" @@ -232,32 +288,41 @@ class TestAddCaCerts(TestCase): expected_cert_file = "\n".join(certs) ca_certs_content = "line1\nline2\nline3" - with ExitStack() as mocks: - mock_write = mocks.enter_context( - mock.patch.object(util, 'write_file')) - mock_load = mocks.enter_context( - mock.patch.object(util, 'load_file', - return_value=ca_certs_content)) + self.m_stat.return_value.st_size = 1 - cc_ca_certs.add_ca_certs(certs) + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - mock_write.assert_has_calls([ - mock.call("/usr/share/ca-certificates/cloud-init-ca-certs.crt", - expected_cert_file, mode=0o644), - mock.call("/etc/ca-certificates.conf", - "%s\n%s\n" % (ca_certs_content, - "cloud-init-ca-certs.crt"), - omode='wb')]) + with ExitStack() as mocks: + mock_write = mocks.enter_context( + mock.patch.object(util, 'write_file')) + mock_load = mocks.enter_context( + mock.patch.object(util, 'load_file', + return_value=ca_certs_content)) - mock_load.assert_called_once_with("/etc/ca-certificates.conf") + cc_ca_certs.add_ca_certs(conf, certs) + + mock_write.assert_has_calls([ + mock.call(conf['ca_cert_full_path'], + expected_cert_file, mode=0o644)]) + if conf['ca_cert_config'] is not None: + mock_write.assert_has_calls([ + mock.call(conf['ca_cert_config'], + "%s\n%s\n" % (ca_certs_content, + conf['ca_cert_filename']), + omode='wb')]) + + mock_load.assert_called_once_with(conf['ca_cert_config']) class TestUpdateCaCerts(unittest.TestCase): def test_commands(self): - with mock.patch.object(subp, 'subp') as mockobj: - cc_ca_certs.update_ca_certs() - mockobj.assert_called_once_with( - ["update-ca-certificates"], capture=False) + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + with mock.patch.object(subp, 'subp') as mockobj: + cc_ca_certs.update_ca_certs(conf) + mockobj.assert_called_once_with( + conf['ca_cert_update_cmd'], capture=False) class TestRemoveDefaultCaCerts(TestCase): @@ -271,24 +336,31 @@ class TestRemoveDefaultCaCerts(TestCase): }) def test_commands(self): - with ExitStack() as mocks: - mock_delete = mocks.enter_context( - mock.patch.object(util, 'delete_dir_contents')) - mock_write = mocks.enter_context( - mock.patch.object(util, 'write_file')) - mock_subp = mocks.enter_context(mock.patch.object(subp, 'subp')) - - cc_ca_certs.remove_default_ca_certs('ubuntu') - - mock_delete.assert_has_calls([ - mock.call("/usr/share/ca-certificates/"), - mock.call("/etc/ssl/certs/")]) - - mock_write.assert_called_once_with( - "/etc/ca-certificates.conf", "", mode=0o644) - - mock_subp.assert_called_once_with( - ('debconf-set-selections', '-'), - "ca-certificates ca-certificates/trust_new_crts select no") + for distro_name in cc_ca_certs.distros: + conf = cc_ca_certs._distro_ca_certs_configs(distro_name) + + with ExitStack() as mocks: + mock_delete = mocks.enter_context( + mock.patch.object(util, 'delete_dir_contents')) + mock_write = mocks.enter_context( + mock.patch.object(util, 'write_file')) + mock_subp = mocks.enter_context( + mock.patch.object(subp, 'subp')) + + cc_ca_certs.remove_default_ca_certs(distro_name, conf) + + mock_delete.assert_has_calls([ + mock.call(conf['ca_cert_path']), + mock.call(conf['ca_cert_system_path'])]) + + if conf['ca_cert_config'] is not None: + mock_write.assert_called_once_with( + conf['ca_cert_config'], "", mode=0o644) + + if distro_name in ['debian', 'ubuntu']: + mock_subp.assert_called_once_with( + ('debconf-set-selections', '-'), + "ca-certificates \ +ca-certificates/trust_new_crts select no") # vi: ts=4 expandtab diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index c278b032..c843e475 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -6,6 +6,7 @@ beezly bipinbachhao BirknerAlex candlerb +cawamata dermotbradley dhensby eandersson -- cgit v1.2.3 From 80847b054e64fb43e0823f86d2bf7e0c295b7fe4 Mon Sep 17 00:00:00 2001 From: Johnson Shi Date: Mon, 4 Jan 2021 12:13:44 -0800 Subject: Azure: Add telemetry for poll IMDS (#741) --- cloudinit/sources/DataSourceAzure.py | 1 + 1 file changed, 1 insertion(+) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index bedf8ea0..8a0bf91d 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -983,6 +983,7 @@ class DataSourceAzure(sources.DataSource): if nl_sock: nl_sock.close() + @azure_ds_telemetry_reporter def _poll_imds(self): """Poll IMDS for the new provisioning data until we get a valid response. Then return the returned JSON object.""" -- cgit v1.2.3 From ea6fcc14909eda6c2409658e897c6ebd5157e290 Mon Sep 17 00:00:00 2001 From: Eduardo Otubo Date: Thu, 7 Jan 2021 16:51:30 +0100 Subject: Missing IPV6_AUTOCONF=no to render sysconfig dhcp6 stateful on RHEL (#753) IPV6_AUTOCONF needs to be set to 'no' on RHEL so NetworkManager can properly acquire ipv6 address. rhbz: #1859695 Signed-off-by: Eduardo Otubo --- cloudinit/net/sysconfig.py | 1 + tests/unittests/test_net.py | 1 + 2 files changed, 2 insertions(+) (limited to 'cloudinit') diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index ba85584e..99a4bae4 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -402,6 +402,7 @@ class Renderer(renderer.Renderer): iface_cfg['BOOTPROTO'] = 'dhcp' iface_cfg['DHCPV6C'] = True iface_cfg['IPV6INIT'] = True + iface_cfg['IPV6_AUTOCONF'] = False else: iface_cfg['IPV6INIT'] = True # Configure network settings using DHCPv6 diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 8b34e0c9..bf0cdabb 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1369,6 +1369,7 @@ NETWORK_CONFIGS = { DEVICE=iface0 DHCPV6C=yes IPV6INIT=yes + IPV6_AUTOCONF=no IPV6_FORCE_ACCEPT_RA=yes DEVICE=iface0 NM_CONTROLLED=no -- cgit v1.2.3 From 75eb007f68d3a75c094f0daccd66eb412359e78e Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Thu, 7 Jan 2021 11:14:10 -0500 Subject: net_convert: add some missing help text (#755) --- cloudinit/cmd/devel/net_convert.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py index 80d217ca..0668ffa3 100755 --- a/cloudinit/cmd/devel/net_convert.py +++ b/cloudinit/cmd/devel/net_convert.py @@ -28,11 +28,13 @@ def get_parser(parser=None): if not parser: parser = argparse.ArgumentParser(prog=NAME, description=__doc__) parser.add_argument("-p", "--network-data", type=open, - metavar="PATH", required=True) + metavar="PATH", required=True, + help="The network configuration to read") parser.add_argument("-k", "--kind", choices=['eni', 'network_data.json', 'yaml', 'azure-imds', 'vmware-imc'], - required=True) + required=True, + help="The format of the given network config") parser.add_argument("-d", "--directory", metavar="PATH", help="directory to place output in", @@ -50,7 +52,8 @@ def get_parser(parser=None): help='enable debug logging to stderr.') parser.add_argument("-O", "--output-kind", choices=['eni', 'netplan', 'sysconfig'], - required=True) + required=True, + help="The network config format to emit") return parser -- cgit v1.2.3 From 4f62ae8d01e8caca9039af067280ca2adad6ab6d Mon Sep 17 00:00:00 2001 From: Thomas Stringer Date: Mon, 11 Jan 2021 16:49:24 -0500 Subject: Fix regression with handling of IMDS ssh keys (#760) With the changes for SSH public keys to be retrieved from IMDS as a first option, when a key is passed through not in the raw SSH public key format it causes an issue and the key is not added to the user's authorized_keys file. This PR will temporarily disable this behavior until a permanent fix is put in place. --- cloudinit/sources/DataSourceAzure.py | 8 ++++++++ tests/unittests/test_datasource/test_azure.py | 4 +++- 2 files changed, 11 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 8a0bf91d..090dd66b 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -651,6 +651,10 @@ class DataSourceAzure(sources.DataSource): LOG.debug('Retrieving public SSH keys') ssh_keys = [] try: + raise KeyError( + "Not using public SSH keys from IMDS" + ) + # pylint:disable=unreachable ssh_keys = [ public_key['keyData'] for public_key @@ -1272,6 +1276,10 @@ class DataSourceAzure(sources.DataSource): pubkey_info = None try: + raise KeyError( + "Not using public SSH keys from IMDS" + ) + # pylint:disable=unreachable public_keys = self.metadata['imds']['compute']['publicKeys'] LOG.debug( 'Successfully retrieved %s key(s) from IMDS', diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index d64b538e..dc615309 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1797,7 +1797,9 @@ scbus-1 on xpt0 bus 0 dsrc.get_data() dsrc.setup(True) ssh_keys = dsrc.get_public_ssh_keys() - self.assertEqual(ssh_keys, ['key1']) + # Temporarily alter this test so that SSH public keys + # from IMDS are *not* going to be in use to fix a regression. + self.assertEqual(ssh_keys, []) self.assertEqual(m_parse_certificates.call_count, 0) @mock.patch(MOCKPATH + 'get_metadata_from_imds') -- cgit v1.2.3 From 2b2c7f391140cdf268a62ddca0c40bc2e01017ea Mon Sep 17 00:00:00 2001 From: Bao Trinh Date: Tue, 12 Jan 2021 10:47:37 -0600 Subject: archlinux: fix package upgrade command handling (#768) pacman uses `-u` instead of `upgrade` to trigger a system upgrade, fix the command handling so this is properly accounted for. as is, the resulting command attempts to install a (non-existent) `upgrade` package Co-authored-by: Rick Harding --- cloudinit/distros/arch.py | 2 ++ tools/.github-cla-signers | 1 + 2 files changed, 3 insertions(+) (limited to 'cloudinit') diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py index 967be168..378a6daa 100644 --- a/cloudinit/distros/arch.py +++ b/cloudinit/distros/arch.py @@ -152,6 +152,8 @@ class Distro(distros.Distro): elif args and isinstance(args, list): cmd.extend(args) + if command == "upgrade": + command = "-u" if command: cmd.append(command) diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index d6ca6d1b..e6e3bdd1 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -26,6 +26,7 @@ nishigori olivierlemasle omBratteng onitake +qubidt riedel slyon smoser -- cgit v1.2.3 From 37abbc43334d522cfbda595fcee2e52592b4d354 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 13 Jan 2021 10:26:32 -0500 Subject: cc_seed_random: update documentation and fix integration test (#771) The documentation did not mention that the given data may not be the exact string written: the cloud's random data may be added to it. Additionally, the documentation of the command key was incorrect. test_seed_random_data was updated to check that the given data is a prefix of the written data, to match cloud-init's expected (and, now, documented) behaviour. LP: #1911227 --- cloudinit/config/cc_seed_random.py | 12 ++++++++---- tests/integration_tests/modules/test_seed_random_data.py | 2 +- 2 files changed, 9 insertions(+), 5 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py index 4fb9b44e..911789c7 100644 --- a/cloudinit/config/cc_seed_random.py +++ b/cloudinit/config/cc_seed_random.py @@ -24,15 +24,19 @@ Configuration for this module is under the ``random_seed`` config key. The optionally be specified in encoded form, with the encoding specified in ``encoding``. +If the cloud provides its own random seed data, it will be appended to ``data`` +before it is written to ``file``. + .. note:: when using a multiline value for ``data`` or specifying binary data, be sure to follow yaml syntax and use the ``|`` and ``!binary`` yaml format specifiers when appropriate -Instead of specifying a data string, a command can be run to generate/collect -the data to be written. The command should be specified as a list of args in -the ``command`` key. If a command is specified that cannot be run, no error -will be reported unless ``command_required`` is set to true. +If the ``command`` key is specified, the given command will be executed. This +will happen after ``file`` has been populated. That command's environment will +contain the value of the ``file`` key as ``RANDOM_SEED_FILE``. If a command is +specified that cannot be run, no error will be reported unless +``command_required`` is set to true. For example, to use ``pollinate`` to gather data from a remote entropy server and write it to ``/dev/urandom``, the following could be diff --git a/tests/integration_tests/modules/test_seed_random_data.py b/tests/integration_tests/modules/test_seed_random_data.py index b365fa98..f6a67c19 100644 --- a/tests/integration_tests/modules/test_seed_random_data.py +++ b/tests/integration_tests/modules/test_seed_random_data.py @@ -25,4 +25,4 @@ class TestSeedRandomData: @pytest.mark.user_data(USER_DATA) def test_seed_random_data(self, client): seed_output = client.read_from_file("/root/seed") - assert seed_output.strip() == "MYUb34023nD:LFDK10913jk;dfnk:Df" + assert seed_output.startswith("MYUb34023nD:LFDK10913jk;dfnk:Df") -- cgit v1.2.3 From 9a258eebd96aa5ad4486dba1fe86bea5bcf00c2f Mon Sep 17 00:00:00 2001 From: Pavel Abalikhin Date: Thu, 14 Jan 2021 01:19:17 +0300 Subject: net: Fix static routes to host in eni renderer (#668) Route '-net' parameter is incompatible with /32 IPv4 addresses so we have to use '-host' in that case. --- cloudinit/net/eni.py | 2 ++ tests/integration_tests/bugs/test_gh668.py | 37 ++++++++++++++++++++++++++++++ tests/unittests/test_net.py | 7 ++++++ tools/.github-cla-signers | 1 + 4 files changed, 47 insertions(+) create mode 100644 tests/integration_tests/bugs/test_gh668.py (limited to 'cloudinit') diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index 0074691b..a89e5ad2 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -387,6 +387,8 @@ class Renderer(renderer.Renderer): if k == 'network': if ':' in route[k]: route_line += ' -A inet6' + elif route.get('prefix') == 32: + route_line += ' -host' else: route_line += ' -net' if 'prefix' in route: diff --git a/tests/integration_tests/bugs/test_gh668.py b/tests/integration_tests/bugs/test_gh668.py new file mode 100644 index 00000000..a3a0c374 --- /dev/null +++ b/tests/integration_tests/bugs/test_gh668.py @@ -0,0 +1,37 @@ +"""Integration test for gh-668. + +Ensure that static route to host is working correctly. +The original problem is specific to the ENI renderer but that test is suitable +for all network configuration outputs. +""" + +import pytest + +from tests.integration_tests.instances import IntegrationInstance + + +DESTINATION_IP = "172.16.0.10" +GATEWAY_IP = "10.0.0.100" + +NETWORK_CONFIG = """\ +version: 2 +ethernets: + eth0: + addresses: [10.0.0.10/8] + dhcp4: false + routes: + - to: {}/32 + via: {} +""".format(DESTINATION_IP, GATEWAY_IP) + +EXPECTED_ROUTE = "{} via {}".format(DESTINATION_IP, GATEWAY_IP) + + +@pytest.mark.lxd_container +@pytest.mark.lxd_vm +@pytest.mark.lxd_config_dict({ + "user.network-config": NETWORK_CONFIG, +}) +def test_static_route_to_host(client: IntegrationInstance): + route = client.execute("ip route | grep {}".format(DESTINATION_IP)) + assert route.startswith(EXPECTED_ROUTE) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index bf0cdabb..38d934d4 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -4820,6 +4820,9 @@ class TestEniRoundTrip(CiTestCase): {'type': 'route', 'id': 6, 'metric': 1, 'destination': '10.0.200.0/16', 'gateway': '172.23.31.1'}, + {'type': 'route', 'id': 7, + 'metric': 1, 'destination': '10.0.0.100/32', + 'gateway': '172.23.31.1'}, ] files = self._render_and_read( @@ -4843,6 +4846,10 @@ class TestEniRoundTrip(CiTestCase): '172.23.31.1 metric 1 || true'), ('pre-down route del -net 10.0.200.0/16 gw ' '172.23.31.1 metric 1 || true'), + ('post-up route add -host 10.0.0.100/32 gw ' + '172.23.31.1 metric 1 || true'), + ('pre-down route del -host 10.0.0.100/32 gw ' + '172.23.31.1 metric 1 || true'), ] found = files['/etc/network/interfaces'].splitlines() diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index e6e3bdd1..ac95b422 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -32,6 +32,7 @@ slyon smoser sshedi TheRealFalcon +tnt-dev tomponline tsanghan WebSpider -- cgit v1.2.3 From 11630044d235e0c6e1ffd2b12ff8906613ccdac6 Mon Sep 17 00:00:00 2001 From: xiaofengw-vmware <42736879+xiaofengw-vmware@users.noreply.github.com> Date: Thu, 14 Jan 2021 07:18:28 +0800 Subject: [VMware] Support cloudinit raw data feature (#691) This feature will modify VMware datasource to read from meta data and user data which are specified by VMware vSphere user. If meta data/user data are found in cloud-init configuration directory, datasource will parse the meta data/network and user data from the configuration file, otherwise it will continue to parse them from traditional customization configuration file as before. The supported meta data file is in json or yaml format. --- cloudinit/sources/DataSourceOVF.py | 162 +++++++++++- cloudinit/sources/helpers/vmware/imc/config.py | 12 + .../sources/helpers/vmware/imc/guestcust_error.py | 1 + tests/unittests/test_datasource/test_ovf.py | 291 ++++++++++++++++++++- tests/unittests/test_vmware_config_file.py | 16 ++ 5 files changed, 468 insertions(+), 14 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 741c140a..94d9f1b9 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -16,6 +16,7 @@ from xml.dom import minidom from cloudinit import dmi from cloudinit import log as logging +from cloudinit import safeyaml from cloudinit import sources from cloudinit import subp from cloudinit import util @@ -47,6 +48,7 @@ LOG = logging.getLogger(__name__) CONFGROUPNAME_GUESTCUSTOMIZATION = "deployPkg" GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS = "enable-custom-scripts" +VMWARE_IMC_DIR = "/var/run/vmware-imc" class DataSourceOVF(sources.DataSource): @@ -99,9 +101,7 @@ class DataSourceOVF(sources.DataSource): if not self.vmware_customization_supported: LOG.debug("Skipping the check for " "VMware Customization support") - elif not util.get_cfg_option_bool( - self.sys_cfg, "disable_vmware_customization", True): - + else: search_paths = ( "/usr/lib/vmware-tools", "/usr/lib64/vmware-tools", "/usr/lib/open-vm-tools", "/usr/lib64/open-vm-tools") @@ -119,7 +119,9 @@ class DataSourceOVF(sources.DataSource): # When the VM is powered on, the "VMware Tools" daemon # copies the customization specification file to # /var/run/vmware-imc directory. cloud-init code needs - # to search for the file in that directory. + # to search for the file in that directory which indicates + # that required metadata and userdata files are now + # present. max_wait = get_max_wait_from_cfg(self.ds_cfg) vmwareImcConfigFilePath = util.log_time( logfunc=LOG.debug, @@ -129,26 +131,83 @@ class DataSourceOVF(sources.DataSource): else: LOG.debug("Did not find the customization plugin.") + md_path = None if vmwareImcConfigFilePath: + imcdirpath = os.path.dirname(vmwareImcConfigFilePath) + cf = ConfigFile(vmwareImcConfigFilePath) + self._vmware_cust_conf = Config(cf) LOG.debug("Found VMware Customization Config File at %s", vmwareImcConfigFilePath) - nicspath = wait_for_imc_cfg_file( - filename="nics.txt", maxwait=10, naplen=5) + try: + (md_path, ud_path, nicspath) = collect_imc_file_paths( + self._vmware_cust_conf) + except FileNotFoundError as e: + _raise_error_status( + "File(s) missing in directory", + e, + GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, + vmwareImcConfigFilePath, + self._vmware_cust_conf) else: LOG.debug("Did not find VMware Customization Config File") - else: - LOG.debug("Customization for VMware platform is disabled.") - if vmwareImcConfigFilePath: + # Honor disable_vmware_customization setting on metadata absent + if not md_path: + if util.get_cfg_option_bool(self.sys_cfg, + "disable_vmware_customization", + True): + LOG.debug( + "Customization for VMware platform is disabled.") + # reset vmwareImcConfigFilePath to None to avoid + # customization for VMware platform + vmwareImcConfigFilePath = None + + use_raw_data = bool(vmwareImcConfigFilePath and md_path) + if use_raw_data: + set_gc_status(self._vmware_cust_conf, "Started") + LOG.debug("Start to load cloud-init meta data and user data") + try: + (md, ud, cfg, network) = load_cloudinit_data(md_path, ud_path) + + if network: + self._network_config = network + else: + self._network_config = ( + self.distro.generate_fallback_config() + ) + + except safeyaml.YAMLError as e: + _raise_error_status( + "Error parsing the cloud-init meta data", + e, + GuestCustErrorEnum.GUESTCUST_ERROR_WRONG_META_FORMAT, + vmwareImcConfigFilePath, + self._vmware_cust_conf) + except Exception as e: + _raise_error_status( + "Error loading cloud-init configuration", + e, + GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, + vmwareImcConfigFilePath, + self._vmware_cust_conf) + + self._vmware_cust_found = True + found.append('vmware-tools') + + util.del_dir(imcdirpath) + set_customization_status( + GuestCustStateEnum.GUESTCUST_STATE_DONE, + GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS) + set_gc_status(self._vmware_cust_conf, "Successful") + + elif vmwareImcConfigFilePath: + # Load configuration from vmware_imc self._vmware_nics_to_enable = "" try: - cf = ConfigFile(vmwareImcConfigFilePath) - self._vmware_cust_conf = Config(cf) set_gc_status(self._vmware_cust_conf, "Started") (md, ud, cfg) = read_vmware_imc(self._vmware_cust_conf) self._vmware_nics_to_enable = get_nics_to_enable(nicspath) - imcdirpath = os.path.dirname(vmwareImcConfigFilePath) product_marker = self._vmware_cust_conf.marker_id hasmarkerfile = check_marker_exists( product_marker, os.path.join(self.paths.cloud_dir, 'data')) @@ -684,4 +743,83 @@ def _raise_error_status(prefix, error, event, config_file, conf): util.del_dir(os.path.dirname(config_file)) raise error + +def load_cloudinit_data(md_path, ud_path): + """ + Load the cloud-init meta data, user data, cfg and network from the + given files + + @return: 4-tuple of configuration + metadata, userdata, cfg={}, network + + @raises: FileNotFoundError if md_path or ud_path are absent + """ + LOG.debug('load meta data from: %s: user data from: %s', + md_path, ud_path) + md = {} + ud = None + network = None + + md = safeload_yaml_or_dict(util.load_file(md_path)) + + if 'network' in md: + network = md['network'] + + if ud_path: + ud = util.load_file(ud_path).replace("\r", "") + return md, ud, {}, network + + +def safeload_yaml_or_dict(data): + ''' + The meta data could be JSON or YAML. Since YAML is a strict superset of + JSON, we will unmarshal the data as YAML. If data is None then a new + dictionary is returned. + ''' + if not data: + return {} + return safeyaml.load(data) + + +def collect_imc_file_paths(cust_conf): + ''' + collect all the other imc files. + + metadata is preferred to nics.txt configuration data. + + If metadata file exists because it is specified in customization + configuration, then metadata is required and userdata is optional. + + @return a 3-tuple containing desired configuration file paths if present + Expected returns: + 1. user provided metadata and userdata (md_path, ud_path, None) + 2. user provided metadata (md_path, None, None) + 3. user-provided network config (None, None, nics_path) + 4. No config found (None, None, None) + ''' + md_path = None + ud_path = None + nics_path = None + md_file = cust_conf.meta_data_name + if md_file: + md_path = os.path.join(VMWARE_IMC_DIR, md_file) + if not os.path.exists(md_path): + raise FileNotFoundError("meta data file is not found: %s" + % md_path) + + ud_file = cust_conf.user_data_name + if ud_file: + ud_path = os.path.join(VMWARE_IMC_DIR, ud_file) + if not os.path.exists(ud_path): + raise FileNotFoundError("user data file is not found: %s" + % ud_path) + else: + nics_path = os.path.join(VMWARE_IMC_DIR, "nics.txt") + if not os.path.exists(nics_path): + LOG.debug('%s does not exist.', nics_path) + nics_path = None + + return md_path, ud_path, nics_path + + # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py index 7109aef3..bdfab5a0 100644 --- a/cloudinit/sources/helpers/vmware/imc/config.py +++ b/cloudinit/sources/helpers/vmware/imc/config.py @@ -27,6 +27,8 @@ class Config(object): UTC = 'DATETIME|UTC' POST_GC_STATUS = 'MISC|POST-GC-STATUS' DEFAULT_RUN_POST_SCRIPT = 'MISC|DEFAULT-RUN-POST-CUST-SCRIPT' + CLOUDINIT_META_DATA = 'CLOUDINIT|METADATA' + CLOUDINIT_USER_DATA = 'CLOUDINIT|USERDATA' def __init__(self, configFile): self._configFile = configFile @@ -130,4 +132,14 @@ class Config(object): raise ValueError('defaultRunPostScript value should be yes/no') return defaultRunPostScript == 'yes' + @property + def meta_data_name(self): + """Return the name of cloud-init meta data.""" + return self._configFile.get(Config.CLOUDINIT_META_DATA, None) + + @property + def user_data_name(self): + """Return the name of cloud-init user data.""" + return self._configFile.get(Config.CLOUDINIT_USER_DATA, None) + # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py index 65ae7390..96d839b8 100644 --- a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py @@ -11,5 +11,6 @@ class GuestCustErrorEnum(object): GUESTCUST_ERROR_SUCCESS = 0 GUESTCUST_ERROR_SCRIPT_DISABLED = 6 + GUESTCUST_ERROR_WRONG_META_FORMAT = 9 # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index 16773de5..dce01f5d 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -17,6 +17,7 @@ from cloudinit.helpers import Paths from cloudinit.sources import DataSourceOVF as dsovf from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( CustomScriptNotFound) +from cloudinit.safeyaml import YAMLError MPATH = 'cloudinit.sources.DataSourceOVF.' @@ -138,16 +139,29 @@ class TestDatasourceOVF(CiTestCase): 'DEBUG: No system-product-name found', self.logs.getvalue()) def test_get_data_no_vmware_customization_disabled(self): - """When vmware customization is disabled via sys_cfg log a message.""" + """When cloud-init workflow for vmware is disabled via sys_cfg and + no meta data provided, log a message. + """ paths = Paths({'cloud_dir': self.tdir}) ds = self.datasource( sys_cfg={'disable_vmware_customization': True}, distro={}, paths=paths) + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CUSTOM-SCRIPT] + SCRIPT-NAME = test-script + [MISC] + MARKER-ID = 12345345 + """) + util.write_file(conf_file, conf_content) retcode = wrap_and_call( 'cloudinit.sources.DataSourceOVF', {'dmi.read_dmi_data': 'vmware', 'transport_iso9660': NOT_FOUND, - 'transport_vmware_guestinfo': NOT_FOUND}, + 'transport_vmware_guestinfo': NOT_FOUND, + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file}, ds.get_data) self.assertFalse(retcode, 'Expected False return from ds.get_data') self.assertIn( @@ -344,6 +358,279 @@ class TestDatasourceOVF(CiTestCase): 'vmware (%s/seed/ovf-env.xml)' % self.tdir, ds.subplatform) + def test_get_data_cloudinit_metadata_json(self): + """Test metadata can be loaded to cloud-init metadata and network. + The metadata format is json. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + """) + util.write_file(conf_file, conf_content) + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + metadata_content = dedent("""\ + { + "instance-id": "cloud-vm", + "local-hostname": "my-host.domain.com", + "network": { + "version": 2, + "ethernets": { + "eths": { + "match": { + "name": "ens*" + }, + "dhcp4": true + } + } + } + } + """) + util.write_file(metadata_file, metadata_content) + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + result = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'collect_imc_file_paths': [self.tdir + '/test-meta', '', ''], + 'get_nics_to_enable': ''}, + ds._get_data) + + self.assertTrue(result) + self.assertEqual("cloud-vm", ds.metadata['instance-id']) + self.assertEqual("my-host.domain.com", ds.metadata['local-hostname']) + self.assertEqual(2, ds.network_config['version']) + self.assertTrue(ds.network_config['ethernets']['eths']['dhcp4']) + + def test_get_data_cloudinit_metadata_yaml(self): + """Test metadata can be loaded to cloud-init metadata and network. + The metadata format is yaml. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + """) + util.write_file(conf_file, conf_content) + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + metadata_content = dedent("""\ + instance-id: cloud-vm + local-hostname: my-host.domain.com + network: + version: 2 + ethernets: + nics: + match: + name: ens* + dhcp4: yes + """) + util.write_file(metadata_file, metadata_content) + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + result = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'collect_imc_file_paths': [self.tdir + '/test-meta', '', ''], + 'get_nics_to_enable': ''}, + ds._get_data) + + self.assertTrue(result) + self.assertEqual("cloud-vm", ds.metadata['instance-id']) + self.assertEqual("my-host.domain.com", ds.metadata['local-hostname']) + self.assertEqual(2, ds.network_config['version']) + self.assertTrue(ds.network_config['ethernets']['nics']['dhcp4']) + + def test_get_data_cloudinit_metadata_not_valid(self): + """Test metadata is not JSON or YAML format. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True}, distro={}, + paths=paths) + + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + """) + util.write_file(conf_file, conf_content) + + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + metadata_content = "[This is not json or yaml format]a=b" + util.write_file(metadata_file, metadata_content) + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(YAMLError) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'collect_imc_file_paths': [ + self.tdir + '/test-meta', '', '' + ], + 'get_nics_to_enable': ''}, + ds.get_data) + + self.assertIn("expected '', but found ''", + str(context.exception)) + + def test_get_data_cloudinit_metadata_not_found(self): + """Test metadata file can't be found. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + """) + util.write_file(conf_file, conf_content) + # Don't prepare the meta data file + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(FileNotFoundError) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + + self.assertIn('is not found', str(context.exception)) + + def test_get_data_cloudinit_userdata(self): + """Test user data can be loaded to cloud-init user data. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': False}, distro={}, + paths=paths) + + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + USERDATA = test-user + """) + util.write_file(conf_file, conf_content) + + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + metadata_content = dedent("""\ + instance-id: cloud-vm + local-hostname: my-host.domain.com + network: + version: 2 + ethernets: + nics: + match: + name: ens* + dhcp4: yes + """) + util.write_file(metadata_file, metadata_content) + + # Prepare the user data file + userdata_file = self.tmp_path('test-user', self.tdir) + userdata_content = "This is the user data" + util.write_file(userdata_file, userdata_content) + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + result = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'collect_imc_file_paths': [self.tdir + '/test-meta', + self.tdir + '/test-user', ''], + 'get_nics_to_enable': ''}, + ds._get_data) + + self.assertTrue(result) + self.assertEqual("cloud-vm", ds.metadata['instance-id']) + self.assertEqual(userdata_content, ds.userdata_raw) + + def test_get_data_cloudinit_userdata_not_found(self): + """Test userdata file can't be found. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True}, distro={}, + paths=paths) + + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + USERDATA = test-user + """) + util.write_file(conf_file, conf_content) + + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + metadata_content = dedent("""\ + instance-id: cloud-vm + local-hostname: my-host.domain.com + network: + version: 2 + ethernets: + nics: + match: + name: ens* + dhcp4: yes + """) + util.write_file(metadata_file, metadata_content) + + # Don't prepare the user data file + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(FileNotFoundError) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + + self.assertIn('is not found', str(context.exception)) + class TestTransportIso9660(CiTestCase): diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/test_vmware_config_file.py index 9c7d25fa..430cc69f 100644 --- a/tests/unittests/test_vmware_config_file.py +++ b/tests/unittests/test_vmware_config_file.py @@ -525,5 +525,21 @@ class TestVmwareNetConfig(CiTestCase): 'gateway': '10.20.87.253'}]}], nc.generate()) + def test_meta_data(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + conf = Config(cf) + self.assertIsNone(conf.meta_data_name) + cf._insertKey("CLOUDINIT|METADATA", "test-metadata") + conf = Config(cf) + self.assertEqual("test-metadata", conf.meta_data_name) + + def test_user_data(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + conf = Config(cf) + self.assertIsNone(conf.user_data_name) + cf._insertKey("CLOUDINIT|USERDATA", "test-userdata") + conf = Config(cf) + self.assertEqual("test-userdata", conf.user_data_name) + # vi: ts=4 expandtab -- cgit v1.2.3 From 6fc9da9930485f4aa42aca6f2f7aa6f520223ec2 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Thu, 14 Jan 2021 15:20:55 -0500 Subject: Revert "ssh_util: handle non-default AuthorizedKeysFile config (#586)" This reverts commit b0e73814db4027dba0b7dc0282e295b7f653325c. --- cloudinit/ssh_util.py | 6 +++--- tests/unittests/test_sshutil.py | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index d5113996..c08042d6 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -262,13 +262,13 @@ def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG): except (IOError, OSError): # Give up and use a default key filename - auth_key_fns.append(default_authorizedkeys_file) + auth_key_fns[0] = default_authorizedkeys_file util.logexc(LOG, "Failed extracting 'AuthorizedKeysFile' in SSH " "config from %r, using 'AuthorizedKeysFile' file " "%r instead", DEF_SSHD_CFG, auth_key_fns[0]) - # always store all the keys in the first file configured on sshd_config - return (auth_key_fns[0], parse_authorized_keys(auth_key_fns)) + # always store all the keys in the user's private file + return (default_authorizedkeys_file, parse_authorized_keys(auth_key_fns)) def setup_user_keys(keys, username, options=None): diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py index 88a111e3..fd1d1bac 100644 --- a/tests/unittests/test_sshutil.py +++ b/tests/unittests/test_sshutil.py @@ -593,7 +593,7 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): fpw.pw_name, sshd_config) content = ssh_util.update_authorized_keys(auth_key_entries, []) - self.assertEqual(authorized_keys, auth_key_fn) + self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn) self.assertTrue(VALID_CONTENT['rsa'] in content) self.assertTrue(VALID_CONTENT['dsa'] in content) @@ -610,7 +610,7 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): sshd_config = self.tmp_path('sshd_config') util.write_file( sshd_config, - "AuthorizedKeysFile %s %s" % (user_keys, authorized_keys) + "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys) ) (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( @@ -618,7 +618,7 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): ) content = ssh_util.update_authorized_keys(auth_key_entries, []) - self.assertEqual(user_keys, auth_key_fn) + self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn) self.assertTrue(VALID_CONTENT['rsa'] in content) self.assertTrue(VALID_CONTENT['dsa'] in content) -- cgit v1.2.3 From 9db8620beda698a67548defe76f7b75da35db4b9 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 15 Jan 2021 12:32:57 -0500 Subject: Release 20.4.1 Bump the version in cloudinit/version.py to 20.4.1 and update ChangeLog. LP: #1911680 --- ChangeLog | 3 +++ cloudinit/version.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/ChangeLog b/ChangeLog index 33b2bf74..9b41924a 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,6 @@ +20.4.1 + - Revert "ssh_util: handle non-default AuthorizedKeysFile config (#586)" + 20.4 - tox: avoid tox testenv subsvars for xenial support (#684) - Ensure proper root permissions in integration tests (#664) [James Falcon] diff --git a/cloudinit/version.py b/cloudinit/version.py index f25e9145..36ec728e 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -__VERSION__ = "20.4" +__VERSION__ = "20.4.1" _PACKAGED_VERSION = '@@PACKAGED_VERSION@@' FEATURES = [ -- cgit v1.2.3 From cdc5b81f33aee0ed3ef1ae239e5cec1906d0178a Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 19 Jan 2021 12:23:23 -0500 Subject: Revert "ssh_util: handle non-default AuthorizedKeysFile config (#586)" (#775) This reverts commit b0e73814db4027dba0b7dc0282e295b7f653325c. --- cloudinit/ssh_util.py | 6 +++--- tests/unittests/test_sshutil.py | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index d5113996..c08042d6 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -262,13 +262,13 @@ def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG): except (IOError, OSError): # Give up and use a default key filename - auth_key_fns.append(default_authorizedkeys_file) + auth_key_fns[0] = default_authorizedkeys_file util.logexc(LOG, "Failed extracting 'AuthorizedKeysFile' in SSH " "config from %r, using 'AuthorizedKeysFile' file " "%r instead", DEF_SSHD_CFG, auth_key_fns[0]) - # always store all the keys in the first file configured on sshd_config - return (auth_key_fns[0], parse_authorized_keys(auth_key_fns)) + # always store all the keys in the user's private file + return (default_authorizedkeys_file, parse_authorized_keys(auth_key_fns)) def setup_user_keys(keys, username, options=None): diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py index 88a111e3..fd1d1bac 100644 --- a/tests/unittests/test_sshutil.py +++ b/tests/unittests/test_sshutil.py @@ -593,7 +593,7 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): fpw.pw_name, sshd_config) content = ssh_util.update_authorized_keys(auth_key_entries, []) - self.assertEqual(authorized_keys, auth_key_fn) + self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn) self.assertTrue(VALID_CONTENT['rsa'] in content) self.assertTrue(VALID_CONTENT['dsa'] in content) @@ -610,7 +610,7 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): sshd_config = self.tmp_path('sshd_config') util.write_file( sshd_config, - "AuthorizedKeysFile %s %s" % (user_keys, authorized_keys) + "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys) ) (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( @@ -618,7 +618,7 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): ) content = ssh_util.update_authorized_keys(auth_key_entries, []) - self.assertEqual(user_keys, auth_key_fn) + self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn) self.assertTrue(VALID_CONTENT['rsa'] in content) self.assertTrue(VALID_CONTENT['dsa'] in content) -- cgit v1.2.3 From b3abcdc09b894249c8360a030d8aa3b815bd0c20 Mon Sep 17 00:00:00 2001 From: Dan Kenigsberg Date: Wed, 20 Jan 2021 21:50:10 +0200 Subject: Use proper spelling for Red Hat (#778) The company name has two distinct words. Signed-off-by: Dan Kenigsberg --- ChangeLog | 2 +- cloudinit/config/cc_resolv_conf.py | 4 ++-- cloudinit/config/cc_rh_subscription.py | 8 ++++---- doc/rtd/topics/examples.rst | 4 ++-- tests/cloud_tests/testcases/examples/TODO.md | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) (limited to 'cloudinit') diff --git a/ChangeLog b/ChangeLog index 33b2bf74..fb87a765 100644 --- a/ChangeLog +++ b/ChangeLog @@ -528,7 +528,7 @@ - docs: add additional details to per-instance/once [Joshua Powers] - Update doc-requirements.txt [Joshua Powers] - doc-requirements: add missing dep [Joshua Powers] - - dhcp: Support RedHat dhcp rfc3442 lease format for option 121 (#76) + - dhcp: Support Red Hat dhcp rfc3442 lease format for option 121 (#76) [Eric Lafontaine] (LP: #1850642) - network_state: handle empty v1 config (#45) (LP: #1852496) - docs: Add document on how to report bugs [Joshua Powers] diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py index 7beb11ca..466dad03 100644 --- a/cloudinit/config/cc_resolv_conf.py +++ b/cloudinit/config/cc_resolv_conf.py @@ -14,12 +14,12 @@ Resolv Conf This module is intended to manage resolv.conf in environments where early configuration of resolv.conf is necessary for further bootstrapping and/or where configuration management such as puppet or chef own dns configuration. -As Debian/Ubuntu will, by default, utilize resolvconf, and similarly RedHat +As Debian/Ubuntu will, by default, utilize resolvconf, and similarly Red Hat will use sysconfig, this module is likely to be of little use unless those are configured correctly. .. note:: - For RedHat with sysconfig, be sure to set PEERDNS=no for all DHCP + For Red Hat with sysconfig, be sure to set PEERDNS=no for all DHCP enabled NICs. .. note:: diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py index 28d62e9d..693317c2 100644 --- a/cloudinit/config/cc_rh_subscription.py +++ b/cloudinit/config/cc_rh_subscription.py @@ -5,15 +5,15 @@ # This file is part of cloud-init. See LICENSE file for license information. """ -RedHat Subscription -------------------- +Red Hat Subscription +-------------------- **Summary:** register red hat enterprise linux based system -Register a RedHat system either by username and password *or* activation and +Register a Red Hat system either by username and password *or* activation and org. Following a sucessful registration, you can auto-attach subscriptions, set the service level, add subscriptions based on pool id, enable/disable yum repositories based on repo id, and alter the rhsm_baseurl and server-hostname -in ``/etc/rhsm/rhs.conf``. For more details, see the ``Register RedHat +in ``/etc/rhsm/rhs.conf``. For more details, see the ``Register Red Hat Subscription`` example config. **Internal name:** ``cc_rh_subscription`` diff --git a/doc/rtd/topics/examples.rst b/doc/rtd/topics/examples.rst index 81860f85..97fd616d 100644 --- a/doc/rtd/topics/examples.rst +++ b/doc/rtd/topics/examples.rst @@ -149,8 +149,8 @@ Disk setup :language: yaml :linenos: -Register RedHat Subscription -============================ +Register Red Hat Subscription +============================= .. literalinclude:: ../../examples/cloud-config-rh_subscription.txt :language: yaml diff --git a/tests/cloud_tests/testcases/examples/TODO.md b/tests/cloud_tests/testcases/examples/TODO.md index 8db0e98e..cde699a7 100644 --- a/tests/cloud_tests/testcases/examples/TODO.md +++ b/tests/cloud_tests/testcases/examples/TODO.md @@ -6,7 +6,7 @@ Below lists each of the issing examples and why it is not currently added. - Puppet (takes > 60 seconds to run) - Manage resolve.conf (lxd backend overrides changes) - Adding a yum repository (need centos system) - - Register RedHat Subscription (need centos system + subscription) + - Register Red Hat Subscription (need centos system + subscription) - Adjust mount points mounted (need multiple disks) - Call a url when finished (need end point) - Reboot/poweroff when finished (how to test) -- cgit v1.2.3 From 6efe16d8f28b89423f7e6e60fc0d4ab1f385f4d8 Mon Sep 17 00:00:00 2001 From: xiaofengw-vmware <42736879+xiaofengw-vmware@users.noreply.github.com> Date: Thu, 28 Jan 2021 04:38:47 +0800 Subject: [VMware] change default max wait time to 15s (#774) If cloud-init is enabled on VMware platform, cloud-init will wait until its configuration file is ready and currently the max wait is 90 seconds by default. With our test, this configuration file should be ready within 1 second, so change it to 15 seconds for better performance. Also update the documentation about how to change the default value in cloud-init configuration file. --- cloudinit/sources/DataSourceOVF.py | 2 +- doc/rtd/topics/datasources/ovf.rst | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 94d9f1b9..bbeada0b 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -416,7 +416,7 @@ class DataSourceOVFNet(DataSourceOVF): def get_max_wait_from_cfg(cfg): - default_max_wait = 90 + default_max_wait = 15 max_wait_cfg_option = 'vmware_cust_file_max_wait' max_wait = default_max_wait diff --git a/doc/rtd/topics/datasources/ovf.rst b/doc/rtd/topics/datasources/ovf.rst index 6256e624..85b0c377 100644 --- a/doc/rtd/topics/datasources/ovf.rst +++ b/doc/rtd/topics/datasources/ovf.rst @@ -13,6 +13,15 @@ source code tree in doc/sources/ovf Configuration ------------- +The following configuration can be set for the datasource in system +configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`). + +The settings that may be configured are: + + * vmware_cust_file_max_wait: the maximum amount of clock time in seconds that + should be spent waiting for vmware customization files. (default: 15) + + On VMware platforms, VMTools use is required for OVF datasource configuration settings as well as vCloud and vSphere admin configuration. User could change the VMTools configuration options with command:: -- cgit v1.2.3 From 36ddf1ebed3f264fa86ef4f657dce29244c2e068 Mon Sep 17 00:00:00 2001 From: Jordi Massaguer Pla Date: Fri, 29 Jan 2021 15:43:56 +0100 Subject: includedir in suoders can be prefixed by "arroba" (#783) Since version 1.9.1, @includedir can be used in the sudoers files instead of #includedir: https://github.com/sudo-project/sudo/releases/tag/SUDO_1_9_1 Actually "@includedir" is the modern syntax, and "#includedir" the historic syntax. It has been considered that "#includedir" was too puzzling because it started with a "#" that otherwise denotes comments. This happens to be the default in SUSE Linux enterprise sudoer package, so cloudinit should take this into account. Otherwise, cloudinit was adding an extra #includedir, which was resulting on the files under /etc/sudoers.d being included twice, one by @includedir from the SUSE package, one by the @includedir from cloudinit. The consequence of this, was that if you were defining an Cmnd_Alias inside any of those files, this was being defined twice and creating an error when using sudo. --- cloudinit/distros/__init__.py | 2 +- tests/unittests/test_distros/test_generic.py | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 1e118472..220bd11f 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -673,7 +673,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): found_include = False for line in sudoers_contents.splitlines(): line = line.strip() - include_match = re.search(r"^#includedir\s+(.*)$", line) + include_match = re.search(r"^[#|@]includedir\s+(.*)$", line) if not include_match: continue included_dir = include_match.group(1).strip() diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py index 44607489..336150bc 100644 --- a/tests/unittests/test_distros/test_generic.py +++ b/tests/unittests/test_distros/test_generic.py @@ -119,6 +119,19 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase): self.assertIn("josh", contents) self.assertEqual(2, contents.count("josh")) + def test_sudoers_ensure_only_one_includedir(self): + cls = distros.fetch("ubuntu") + d = cls("ubuntu", {}, None) + self.patchOS(self.tmp) + self.patchUtils(self.tmp) + for char in ['#', '@']: + util.write_file("/etc/sudoers", "{}includedir /b".format(char)) + d.ensure_sudo_dir("/b") + contents = util.load_file("/etc/sudoers") + self.assertIn("includedir /b", contents) + self.assertTrue(os.path.isdir("/b")) + self.assertEqual(1, contents.count("includedir /b")) + def test_arch_package_mirror_info_unknown(self): """for an unknown arch, we should get back that with arch 'default'.""" arch_mirrors = gapmi(package_mirrors, arch="unknown") -- cgit v1.2.3 From 3cebe0df1e002bd85c8aa78e89f0ca507c17195a Mon Sep 17 00:00:00 2001 From: Andrew Bogott Date: Fri, 5 Feb 2021 10:11:14 -0600 Subject: openstack: read the dynamic metadata group vendor_data2.json (#777) Add support for openstack's dynamic vendor data, which appears under openstack/latest/vendor_data2.json This adds vendor_data2 to all pathways; it should be a no-op for non-OpenStack providers. LP: #1841104 --- cloudinit/cmd/tests/test_main.py | 3 +- cloudinit/helpers.py | 7 ++ cloudinit/settings.py | 1 + cloudinit/sources/DataSourceOpenStack.py | 8 ++ cloudinit/sources/__init__.py | 13 ++- cloudinit/sources/helpers/openstack.py | 5 + cloudinit/stages.py | 106 ++++++++++++++-------- doc/rtd/topics/datasources/openstack.rst | 8 ++ tests/unittests/test_data.py | 37 ++++++-- tests/unittests/test_datasource/test_openstack.py | 32 ++++++- 10 files changed, 171 insertions(+), 49 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py index 585b3b0e..78b27441 100644 --- a/cloudinit/cmd/tests/test_main.py +++ b/cloudinit/cmd/tests/test_main.py @@ -127,7 +127,8 @@ class TestMain(FilesystemMockingTestCase): 'syslog_fix_perms': [ 'syslog:adm', 'root:adm', 'root:wheel', 'root:root' ], - 'vendor_data': {'enabled': True, 'prefix': []}}) + 'vendor_data': {'enabled': True, 'prefix': []}, + 'vendor_data2': {'enabled': True, 'prefix': []}}) updated_cfg.pop('system_info') self.assertEqual(updated_cfg, cfg) diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index 9752ad28..fc5011ec 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -230,6 +230,10 @@ class ConfigMerger(object): cc_paths = ['cloud_config'] if self._include_vendor: + # the order is important here: we want vendor2 + # (dynamic vendor data from OpenStack) + # to override vendor (static data from OpenStack) + cc_paths.append('vendor2_cloud_config') cc_paths.append('vendor_cloud_config') for cc_p in cc_paths: @@ -337,9 +341,12 @@ class Paths(object): "obj_pkl": "obj.pkl", "cloud_config": "cloud-config.txt", "vendor_cloud_config": "vendor-cloud-config.txt", + "vendor2_cloud_config": "vendor2-cloud-config.txt", "data": "data", "vendordata_raw": "vendor-data.txt", + "vendordata2_raw": "vendor-data2.txt", "vendordata": "vendor-data.txt.i", + "vendordata2": "vendor-data2.txt.i", "instance_id": ".instance-id", "manual_clean_marker": "manual-clean", "warnings": "warnings", diff --git a/cloudinit/settings.py b/cloudinit/settings.py index ca4ffa8e..7516e17b 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -56,6 +56,7 @@ CFG_BUILTIN = { 'network': {'renderers': None}, }, 'vendor_data': {'enabled': True, 'prefix': []}, + 'vendor_data2': {'enabled': True, 'prefix': []}, } # Valid frequencies of handlers/modules diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index b3406c67..619a171e 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -167,6 +167,14 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): LOG.warning("Invalid content in vendor-data: %s", e) self.vendordata_raw = None + vd2 = results.get('vendordata2') + self.vendordata2_pure = vd2 + try: + self.vendordata2_raw = sources.convert_vendordata(vd2) + except ValueError as e: + LOG.warning("Invalid content in vendor-data2: %s", e) + self.vendordata2_raw = None + return True def _crawl_metadata(self): diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 9dccc687..1ad1880d 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -187,7 +187,8 @@ class DataSource(metaclass=abc.ABCMeta): cached_attr_defaults = ( ('ec2_metadata', UNSET), ('network_json', UNSET), ('metadata', {}), ('userdata', None), ('userdata_raw', None), - ('vendordata', None), ('vendordata_raw', None)) + ('vendordata', None), ('vendordata_raw', None), + ('vendordata2', None), ('vendordata2_raw', None)) _dirty_cache = False @@ -203,7 +204,9 @@ class DataSource(metaclass=abc.ABCMeta): self.metadata = {} self.userdata_raw = None self.vendordata = None + self.vendordata2 = None self.vendordata_raw = None + self.vendordata2_raw = None self.ds_cfg = util.get_cfg_by_path( self.sys_cfg, ("datasource", self.dsname), {}) @@ -392,6 +395,11 @@ class DataSource(metaclass=abc.ABCMeta): self.vendordata = self.ud_proc.process(self.get_vendordata_raw()) return self.vendordata + def get_vendordata2(self): + if self.vendordata2 is None: + self.vendordata2 = self.ud_proc.process(self.get_vendordata2_raw()) + return self.vendordata2 + @property def fallback_interface(self): """Determine the network interface used during local network config.""" @@ -494,6 +502,9 @@ class DataSource(metaclass=abc.ABCMeta): def get_vendordata_raw(self): return self.vendordata_raw + def get_vendordata2_raw(self): + return self.vendordata2_raw + # the data sources' config_obj is a cloud-config formated # object that came to it from ways other than cloud-config # because cloud-config content would be handled elsewhere diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 3e6365f1..4f566e64 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -247,6 +247,11 @@ class BaseReader(metaclass=abc.ABCMeta): False, load_json_anytype, ) + files['vendordata2'] = ( + self._path_join("openstack", version, 'vendor_data2.json'), + False, + load_json_anytype, + ) files['networkdata'] = ( self._path_join("openstack", version, 'network_data.json'), False, diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 0cce6e80..3ef4491c 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -360,8 +360,18 @@ class Init(object): reporter=self.reporter) def update(self): - self._store_userdata() - self._store_vendordata() + self._store_rawdata(self.datasource.get_userdata_raw(), + 'userdata') + self._store_processeddata(self.datasource.get_userdata(), + 'userdata') + self._store_rawdata(self.datasource.get_vendordata_raw(), + 'vendordata') + self._store_processeddata(self.datasource.get_vendordata(), + 'vendordata') + self._store_rawdata(self.datasource.get_vendordata2_raw(), + 'vendordata2') + self._store_processeddata(self.datasource.get_vendordata2(), + 'vendordata2') def setup_datasource(self): with events.ReportEventStack("setup-datasource", @@ -381,28 +391,18 @@ class Init(object): is_new_instance=self.is_new_instance()) self._write_to_cache() - def _store_userdata(self): - raw_ud = self.datasource.get_userdata_raw() - if raw_ud is None: - raw_ud = b'' - util.write_file(self._get_ipath('userdata_raw'), raw_ud, 0o600) - # processed userdata is a Mime message, so write it as string. - processed_ud = self.datasource.get_userdata() - if processed_ud is None: - raw_ud = '' - util.write_file(self._get_ipath('userdata'), str(processed_ud), 0o600) - - def _store_vendordata(self): - raw_vd = self.datasource.get_vendordata_raw() - if raw_vd is None: - raw_vd = b'' - util.write_file(self._get_ipath('vendordata_raw'), raw_vd, 0o600) - # processed vendor data is a Mime message, so write it as string. - processed_vd = str(self.datasource.get_vendordata()) - if processed_vd is None: - processed_vd = '' - util.write_file(self._get_ipath('vendordata'), str(processed_vd), - 0o600) + def _store_rawdata(self, data, datasource): + # Raw data is bytes, not a string + if data is None: + data = b'' + util.write_file(self._get_ipath('%s_raw' % datasource), data, 0o600) + + def _store_processeddata(self, processed_data, datasource): + # processed is a Mime message, so write as string. + if processed_data is None: + processed_data = '' + util.write_file(self._get_ipath(datasource), + str(processed_data), 0o600) def _default_handlers(self, opts=None): if opts is None: @@ -434,6 +434,11 @@ class Init(object): opts={'script_path': 'vendor_scripts', 'cloud_config_path': 'vendor_cloud_config'}) + def _default_vendordata2_handlers(self): + return self._default_handlers( + opts={'script_path': 'vendor_scripts', + 'cloud_config_path': 'vendor2_cloud_config'}) + def _do_handlers(self, data_msg, c_handlers_list, frequency, excluded=None): """ @@ -555,7 +560,12 @@ class Init(object): with events.ReportEventStack("consume-vendor-data", "reading and applying vendor-data", parent=self.reporter): - self._consume_vendordata(frequency) + self._consume_vendordata("vendordata", frequency) + + with events.ReportEventStack("consume-vendor-data2", + "reading and applying vendor-data2", + parent=self.reporter): + self._consume_vendordata("vendordata2", frequency) # Perform post-consumption adjustments so that # modules that run during the init stage reflect @@ -568,46 +578,62 @@ class Init(object): # objects before the load of the userdata happened, # this is expected. - def _consume_vendordata(self, frequency=PER_INSTANCE): + def _consume_vendordata(self, vendor_source, frequency=PER_INSTANCE): """ Consume the vendordata and run the part handlers on it """ + # User-data should have been consumed first. # So we merge the other available cloud-configs (everything except # vendor provided), and check whether or not we should consume # vendor data at all. That gives user or system a chance to override. - if not self.datasource.get_vendordata_raw(): - LOG.debug("no vendordata from datasource") - return + if vendor_source == 'vendordata': + if not self.datasource.get_vendordata_raw(): + LOG.debug("no vendordata from datasource") + return + cfg_name = 'vendor_data' + elif vendor_source == 'vendordata2': + if not self.datasource.get_vendordata2_raw(): + LOG.debug("no vendordata2 from datasource") + return + cfg_name = 'vendor_data2' + else: + raise RuntimeError("vendor_source arg must be either 'vendordata'" + " or 'vendordata2'") _cc_merger = helpers.ConfigMerger(paths=self._paths, datasource=self.datasource, additional_fns=[], base_cfg=self.cfg, include_vendor=False) - vdcfg = _cc_merger.cfg.get('vendor_data', {}) + vdcfg = _cc_merger.cfg.get(cfg_name, {}) if not isinstance(vdcfg, dict): vdcfg = {'enabled': False} - LOG.warning("invalid 'vendor_data' setting. resetting to: %s", - vdcfg) + LOG.warning("invalid %s setting. resetting to: %s", + cfg_name, vdcfg) enabled = vdcfg.get('enabled') no_handlers = vdcfg.get('disabled_handlers', None) if not util.is_true(enabled): - LOG.debug("vendordata consumption is disabled.") + LOG.debug("%s consumption is disabled.", vendor_source) return - LOG.debug("vendor data will be consumed. disabled_handlers=%s", - no_handlers) + LOG.debug("%s will be consumed. disabled_handlers=%s", + vendor_source, no_handlers) - # Ensure vendordata source fetched before activation (just incase) - vendor_data_msg = self.datasource.get_vendordata() + # Ensure vendordata source fetched before activation (just in case.) - # This keeps track of all the active handlers, while excluding what the - # users doesn't want run, i.e. boot_hook, cloud_config, shell_script - c_handlers_list = self._default_vendordata_handlers() + # c_handlers_list keeps track of all the active handlers, while + # excluding what the users doesn't want run, i.e. boot_hook, + # cloud_config, shell_script + if vendor_source == 'vendordata': + vendor_data_msg = self.datasource.get_vendordata() + c_handlers_list = self._default_vendordata_handlers() + else: + vendor_data_msg = self.datasource.get_vendordata2() + c_handlers_list = self._default_vendordata2_handlers() # Run the handlers self._do_handlers(vendor_data_msg, c_handlers_list, frequency, diff --git a/doc/rtd/topics/datasources/openstack.rst b/doc/rtd/topics/datasources/openstack.rst index b23b4b7c..62d0fc03 100644 --- a/doc/rtd/topics/datasources/openstack.rst +++ b/doc/rtd/topics/datasources/openstack.rst @@ -82,4 +82,12 @@ For more general information about how cloud-init handles vendor data, including how it can be disabled by users on instances, see :doc:`/topics/vendordata`. +OpenStack can also be configured to provide 'dynamic vendordata' +which is provided by the DynamicJSON provider and appears under a +different metadata path, /vendor_data2.json. + +Cloud-init will look for a ``cloud-init`` at the vendor_data2 path; if found, +settings are applied after (and, hence, overriding) the settings from static +vendor data. Both sets of vendor data can be overridden by user data. + .. vi: textwidth=78 diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index fb2b55e8..8c968ae9 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -33,11 +33,12 @@ INSTANCE_ID = "i-testing" class FakeDataSource(sources.DataSource): - def __init__(self, userdata=None, vendordata=None): + def __init__(self, userdata=None, vendordata=None, vendordata2=None): sources.DataSource.__init__(self, {}, None, None) self.metadata = {'instance-id': INSTANCE_ID} self.userdata_raw = userdata self.vendordata_raw = vendordata + self.vendordata2_raw = vendordata2 def count_messages(root): @@ -105,13 +106,14 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase): self.assertEqual('qux', cc['baz']) self.assertEqual('qux2', cc['bar']) - def test_simple_jsonp_vendor_and_user(self): + def test_simple_jsonp_vendor_and_vendor2_and_user(self): # test that user-data wins over vendor user_blob = ''' #cloud-config-jsonp [ { "op": "add", "path": "/baz", "value": "qux" }, - { "op": "add", "path": "/bar", "value": "qux2" } + { "op": "add", "path": "/bar", "value": "qux2" }, + { "op": "add", "path": "/foobar", "value": "qux3" } ] ''' vendor_blob = ''' @@ -119,12 +121,23 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase): [ { "op": "add", "path": "/baz", "value": "quxA" }, { "op": "add", "path": "/bar", "value": "quxB" }, - { "op": "add", "path": "/foo", "value": "quxC" } + { "op": "add", "path": "/foo", "value": "quxC" }, + { "op": "add", "path": "/corge", "value": "quxEE" } +] +''' + vendor2_blob = ''' +#cloud-config-jsonp +[ + { "op": "add", "path": "/corge", "value": "quxD" }, + { "op": "add", "path": "/grault", "value": "quxFF" }, + { "op": "add", "path": "/foobar", "value": "quxGG" } ] ''' self.reRoot() initer = stages.Init() - initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob) + initer.datasource = FakeDataSource(user_blob, + vendordata=vendor_blob, + vendordata2=vendor2_blob) initer.read_cfg() initer.initialize() initer.fetch() @@ -138,9 +151,15 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase): (_which_ran, _failures) = mods.run_section('cloud_init_modules') cfg = mods.cfg self.assertIn('vendor_data', cfg) + self.assertIn('vendor_data2', cfg) + # Confirm that vendordata2 overrides vendordata, and that + # userdata overrides both self.assertEqual('qux', cfg['baz']) self.assertEqual('qux2', cfg['bar']) + self.assertEqual('qux3', cfg['foobar']) self.assertEqual('quxC', cfg['foo']) + self.assertEqual('quxD', cfg['corge']) + self.assertEqual('quxFF', cfg['grault']) def test_simple_jsonp_no_vendor_consumed(self): # make sure that vendor data is not consumed @@ -293,6 +312,10 @@ run: vendor_blob = ''' #!/bin/bash echo "test" +''' + vendor2_blob = ''' +#!/bin/bash +echo "dynamic test" ''' user_blob = ''' @@ -303,7 +326,9 @@ vendor_data: ''' new_root = self.reRoot() initer = stages.Init() - initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob) + initer.datasource = FakeDataSource(user_blob, + vendordata=vendor_blob, + vendordata2=vendor2_blob) initer.read_cfg() initer.initialize() initer.fetch() diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index 415755aa..478f3503 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -40,6 +40,9 @@ USER_DATA = b'#!/bin/sh\necho This is user data\n' VENDOR_DATA = { 'magic': '', } +VENDOR_DATA2 = { + 'static': {} +} OSTACK_META = { 'availability_zone': 'nova', 'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'}, @@ -60,6 +63,7 @@ OS_FILES = { {'links': [], 'networks': [], 'services': []}), 'openstack/latest/user_data': USER_DATA, 'openstack/latest/vendor_data.json': json.dumps(VENDOR_DATA), + 'openstack/latest/vendor_data2.json': json.dumps(VENDOR_DATA2), } EC2_FILES = { 'latest/user-data': USER_DATA, @@ -142,6 +146,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) f = _read_metadata_service() self.assertEqual(VENDOR_DATA, f.get('vendordata')) + self.assertEqual(VENDOR_DATA2, f.get('vendordata2')) self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) self.assertEqual(2, len(f['files'])) @@ -163,6 +168,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): _register_uris(self.VERSION, {}, {}, OS_FILES) f = _read_metadata_service() self.assertEqual(VENDOR_DATA, f.get('vendordata')) + self.assertEqual(VENDOR_DATA2, f.get('vendordata2')) self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) self.assertEqual(USER_DATA, f.get('userdata')) @@ -195,6 +201,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): _register_uris(self.VERSION, {}, {}, os_files) f = _read_metadata_service() self.assertEqual(VENDOR_DATA, f.get('vendordata')) + self.assertEqual(VENDOR_DATA2, f.get('vendordata2')) self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) self.assertFalse(f.get('userdata')) @@ -210,6 +217,17 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) self.assertFalse(f.get('vendordata')) + def test_vendordata2_empty(self): + os_files = copy.deepcopy(OS_FILES) + for k in list(os_files.keys()): + if k.endswith('vendor_data2.json'): + os_files.pop(k, None) + _register_uris(self.VERSION, {}, {}, os_files) + f = _read_metadata_service() + self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) + self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) + self.assertFalse(f.get('vendordata2')) + def test_vendordata_invalid(self): os_files = copy.deepcopy(OS_FILES) for k in list(os_files.keys()): @@ -218,6 +236,14 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): _register_uris(self.VERSION, {}, {}, os_files) self.assertRaises(BrokenMetadata, _read_metadata_service) + def test_vendordata2_invalid(self): + os_files = copy.deepcopy(OS_FILES) + for k in list(os_files.keys()): + if k.endswith('vendor_data2.json'): + os_files[k] = '{' # some invalid json + _register_uris(self.VERSION, {}, {}, os_files) + self.assertRaises(BrokenMetadata, _read_metadata_service) + def test_metadata_invalid(self): os_files = copy.deepcopy(OS_FILES) for k in list(os_files.keys()): @@ -246,6 +272,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertEqual(USER_DATA, ds_os.userdata_raw) self.assertEqual(2, len(ds_os.files)) self.assertEqual(VENDOR_DATA, ds_os.vendordata_pure) + self.assertEqual(VENDOR_DATA2, ds_os.vendordata2_pure) self.assertIsNone(ds_os.vendordata_raw) m_dhcp.assert_not_called() @@ -278,6 +305,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertEqual(USER_DATA, ds_os_local.userdata_raw) self.assertEqual(2, len(ds_os_local.files)) self.assertEqual(VENDOR_DATA, ds_os_local.vendordata_pure) + self.assertEqual(VENDOR_DATA2, ds_os_local.vendordata2_pure) self.assertIsNone(ds_os_local.vendordata_raw) m_dhcp.assert_called_with('eth9', None) @@ -401,7 +429,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): self.assertIsNone(ds_os.vendordata_raw) self.assertEqual( ['dsmode', 'ec2-metadata', 'files', 'metadata', 'networkdata', - 'userdata', 'vendordata', 'version'], + 'userdata', 'vendordata', 'vendordata2', 'version'], sorted(crawled_data.keys())) self.assertEqual('local', crawled_data['dsmode']) self.assertEqual(EC2_META, crawled_data['ec2-metadata']) @@ -415,6 +443,7 @@ class TestOpenStackDataSource(test_helpers.HttprettyTestCase): crawled_data['networkdata']) self.assertEqual(USER_DATA, crawled_data['userdata']) self.assertEqual(VENDOR_DATA, crawled_data['vendordata']) + self.assertEqual(VENDOR_DATA2, crawled_data['vendordata2']) self.assertEqual(2, crawled_data['version']) @@ -681,6 +710,7 @@ class TestMetadataReader(test_helpers.HttprettyTestCase): 'version': 2, 'metadata': expected_md, 'vendordata': vendor_data, + 'vendordata2': vendor_data2, 'networkdata': network_data, 'ec2-metadata': mock_read_ec2.return_value, 'files': {}, -- cgit v1.2.3 From 0497c7b1f752c7011006b36f9c07ac141c0bb3c2 Mon Sep 17 00:00:00 2001 From: Antti Myyrä Date: Mon, 8 Feb 2021 17:24:36 +0200 Subject: Datasource for UpCloud (#743) New datasource utilizing UpCloud metadata API, including relevant unit tests and documentation. --- cloudinit/apport.py | 1 + cloudinit/settings.py | 1 + cloudinit/sources/DataSourceUpCloud.py | 165 +++++++++++++ cloudinit/sources/helpers/upcloud.py | 231 +++++++++++++++++ doc/rtd/topics/availability.rst | 1 + doc/rtd/topics/datasources.rst | 1 + doc/rtd/topics/datasources/upcloud.rst | 24 ++ doc/rtd/topics/network-config.rst | 5 + tests/unittests/test_datasource/test_common.py | 3 + tests/unittests/test_datasource/test_upcloud.py | 314 ++++++++++++++++++++++++ tools/ds-identify | 7 +- 11 files changed, 752 insertions(+), 1 deletion(-) create mode 100644 cloudinit/sources/DataSourceUpCloud.py create mode 100644 cloudinit/sources/helpers/upcloud.py create mode 100644 doc/rtd/topics/datasources/upcloud.rst create mode 100644 tests/unittests/test_datasource/test_upcloud.py (limited to 'cloudinit') diff --git a/cloudinit/apport.py b/cloudinit/apport.py index 9bded16c..25f254e3 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -39,6 +39,7 @@ KNOWN_CLOUD_NAMES = [ 'SAP Converged Cloud', 'Scaleway', 'SmartOS', + 'UpCloud', 'VMware', 'ZStack', 'Other' diff --git a/cloudinit/settings.py b/cloudinit/settings.py index 7516e17b..91e1bfe7 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -41,6 +41,7 @@ CFG_BUILTIN = { 'Oracle', 'Exoscale', 'RbxCloud', + 'UpCloud', # At the end to act as a 'catch' when none of the above work... 'None', ], diff --git a/cloudinit/sources/DataSourceUpCloud.py b/cloudinit/sources/DataSourceUpCloud.py new file mode 100644 index 00000000..209b9672 --- /dev/null +++ b/cloudinit/sources/DataSourceUpCloud.py @@ -0,0 +1,165 @@ +# Author: Antti Myyrä +# +# This file is part of cloud-init. See LICENSE file for license information. + +# UpCloud server metadata API: +# https://developers.upcloud.com/1.3/8-servers/#metadata-service + +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import util +from cloudinit import net as cloudnet +from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError + + +from cloudinit.sources.helpers import upcloud as uc_helper + +LOG = logging.getLogger(__name__) + +BUILTIN_DS_CONFIG = {"metadata_url": "http://169.254.169.254/metadata/v1.json"} + +# Wait for a up to a minute, retrying the meta-data server +# every 2 seconds. +MD_RETRIES = 30 +MD_TIMEOUT = 2 +MD_WAIT_RETRY = 2 + + +class DataSourceUpCloud(sources.DataSource): + + dsname = "UpCloud" + + # We'll perform DHCP setup only in init-local, see DataSourceUpCloudLocal + perform_dhcp_setup = False + + def __init__(self, sys_cfg, distro, paths): + sources.DataSource.__init__(self, sys_cfg, distro, paths) + self.distro = distro + self.metadata = dict() + self.ds_cfg = util.mergemanydict( + [ + util.get_cfg_by_path(sys_cfg, ["datasource", "UpCloud"], {}), + BUILTIN_DS_CONFIG, + ] + ) + self.metadata_address = self.ds_cfg["metadata_url"] + self.retries = self.ds_cfg.get("retries", MD_RETRIES) + self.timeout = self.ds_cfg.get("timeout", MD_TIMEOUT) + self.wait_retry = self.ds_cfg.get("wait_retry", MD_WAIT_RETRY) + self._network_config = None + + def _get_sysinfo(self): + return uc_helper.read_sysinfo() + + def _read_metadata(self): + return uc_helper.read_metadata( + self.metadata_address, + timeout=self.timeout, + sec_between=self.wait_retry, + retries=self.retries, + ) + + def _get_data(self): + (is_upcloud, server_uuid) = self._get_sysinfo() + + # only proceed if we know we are on UpCloud + if not is_upcloud: + return False + + LOG.info("Running on UpCloud. server_uuid=%s", server_uuid) + + if self.perform_dhcp_setup: # Setup networking in init-local stage. + try: + LOG.debug("Finding a fallback NIC") + nic = cloudnet.find_fallback_nic() + LOG.debug("Discovering metadata via DHCP interface %s", nic) + with EphemeralDHCPv4(nic): + md = util.log_time( + logfunc=LOG.debug, + msg="Reading from metadata service", + func=self._read_metadata, + ) + except (NoDHCPLeaseError, sources.InvalidMetaDataException) as e: + util.logexc(LOG, str(e)) + return False + else: + try: + LOG.debug( + "Discovering metadata without DHCP-configured networking" + ) + md = util.log_time( + logfunc=LOG.debug, + msg="Reading from metadata service", + func=self._read_metadata, + ) + except sources.InvalidMetaDataException as e: + util.logexc(LOG, str(e)) + LOG.info( + "No DHCP-enabled interfaces available, " + "unable to fetch metadata for %s", + server_uuid, + ) + return False + + self.metadata_full = md + self.metadata["instance-id"] = md.get("instance_id", server_uuid) + self.metadata["local-hostname"] = md.get("hostname") + self.metadata["network"] = md.get("network") + self.metadata["public-keys"] = md.get("public_keys") + self.metadata["availability_zone"] = md.get("region", "default") + self.vendordata_raw = md.get("vendor_data", None) + self.userdata_raw = md.get("user_data", None) + + return True + + def check_instance_id(self, sys_cfg): + return sources.instance_id_matches_system_uuid(self.get_instance_id()) + + @property + def network_config(self): + """ + Configure the networking. This needs to be done each boot, + since the IP and interface information might have changed + due to reconfiguration. + """ + + if self._network_config: + return self._network_config + + raw_network_config = self.metadata.get("network") + if not raw_network_config: + raise Exception("Unable to get network meta-data from server....") + + self._network_config = uc_helper.convert_network_config( + raw_network_config, + ) + + return self._network_config + + +class DataSourceUpCloudLocal(DataSourceUpCloud): + """ + Run in init-local using a DHCP discovery prior to metadata crawl. + + In init-local, no network is available. This subclass sets up minimal + networking with dhclient on a viable nic so that it can talk to the + metadata service. If the metadata service provides network configuration + then render the network configuration for that instance based on metadata. + """ + + perform_dhcp_setup = True # Get metadata network config if present + + +# Used to match classes to dependencies +datasources = [ + (DataSourceUpCloudLocal, (sources.DEP_FILESYSTEM, )), + (DataSourceUpCloud, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) + + +# vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/upcloud.py b/cloudinit/sources/helpers/upcloud.py new file mode 100644 index 00000000..199baa58 --- /dev/null +++ b/cloudinit/sources/helpers/upcloud.py @@ -0,0 +1,231 @@ +# Author: Antti Myyrä +# +# This file is part of cloud-init. See LICENSE file for license information. + +import json + +from cloudinit import dmi +from cloudinit import log as logging +from cloudinit import net as cloudnet +from cloudinit import url_helper + +LOG = logging.getLogger(__name__) + + +def convert_to_network_config_v1(config): + """ + Convert the UpCloud network metadata description into + Cloud-init's version 1 netconfig format. + + Example JSON: + { + "interfaces": [ + { + "index": 1, + "ip_addresses": [ + { + "address": "94.237.105.53", + "dhcp": true, + "dns": [ + "94.237.127.9", + "94.237.40.9" + ], + "family": "IPv4", + "floating": false, + "gateway": "94.237.104.1", + "network": "94.237.104.0/22" + }, + { + "address": "94.237.105.50", + "dhcp": false, + "dns": [], + "family": "IPv4", + "floating": true, + "gateway": "", + "network": "94.237.105.50/32" + } + ], + "mac": "32:d5:ba:4a:36:e7", + "network_id": "031457f4-0f8c-483c-96f2-eccede02909c", + "type": "public" + }, + { + "index": 2, + "ip_addresses": [ + { + "address": "10.6.3.27", + "dhcp": true, + "dns": [], + "family": "IPv4", + "floating": false, + "gateway": "10.6.0.1", + "network": "10.6.0.0/22" + } + ], + "mac": "32:d5:ba:4a:84:cc", + "network_id": "03d82553-5bea-4132-b29a-e1cf67ec2dd1", + "type": "utility" + }, + { + "index": 3, + "ip_addresses": [ + { + "address": "2a04:3545:1000:720:38d6:baff:fe4a:63e7", + "dhcp": true, + "dns": [ + "2a04:3540:53::1", + "2a04:3544:53::1" + ], + "family": "IPv6", + "floating": false, + "gateway": "2a04:3545:1000:720::1", + "network": "2a04:3545:1000:720::/64" + } + ], + "mac": "32:d5:ba:4a:63:e7", + "network_id": "03000000-0000-4000-8046-000000000000", + "type": "public" + }, + { + "index": 4, + "ip_addresses": [ + { + "address": "172.30.1.10", + "dhcp": true, + "dns": [], + "family": "IPv4", + "floating": false, + "gateway": "172.30.1.1", + "network": "172.30.1.0/24" + } + ], + "mac": "32:d5:ba:4a:8a:e1", + "network_id": "035a0a4a-77b4-4de5-820d-189fc8135714", + "type": "private" + } + ], + "dns": [ + "94.237.127.9", + "94.237.40.9" + ] + } + """ + + def _get_subnet_config(ip_addr, dns): + if ip_addr.get("dhcp"): + dhcp_type = "dhcp" + if ip_addr.get("family") == "IPv6": + # UpCloud currently passes IPv6 addresses via + # StateLess Address Auto Configuration (SLAAC) + dhcp_type = "ipv6_dhcpv6-stateless" + return {"type": dhcp_type} + + static_type = "static" + if ip_addr.get("family") == "IPv6": + static_type = "static6" + subpart = { + "type": static_type, + "control": "auto", + "address": ip_addr.get("address"), + } + + if ip_addr.get("gateway"): + subpart["gateway"] = ip_addr.get("gateway") + + if "/" in ip_addr.get("network"): + subpart["netmask"] = ip_addr.get("network").split("/")[1] + + if dns != ip_addr.get("dns") and ip_addr.get("dns"): + subpart["dns_nameservers"] = ip_addr.get("dns") + + return subpart + + nic_configs = [] + macs_to_interfaces = cloudnet.get_interfaces_by_mac() + LOG.debug("NIC mapping: %s", macs_to_interfaces) + + for raw_iface in config.get("interfaces"): + LOG.debug("Considering %s", raw_iface) + + mac_address = raw_iface.get("mac") + if mac_address not in macs_to_interfaces: + raise RuntimeError( + "Did not find network interface on system " + "with mac '%s'. Cannot apply configuration: %s" + % (mac_address, raw_iface) + ) + + iface_type = raw_iface.get("type") + sysfs_name = macs_to_interfaces.get(mac_address) + + LOG.debug( + "Found %s interface '%s' with address '%s' (index %d)", + iface_type, + sysfs_name, + mac_address, + raw_iface.get("index"), + ) + + interface = { + "type": "physical", + "name": sysfs_name, + "mac_address": mac_address + } + + subnets = [] + for ip_address in raw_iface.get("ip_addresses"): + sub_part = _get_subnet_config(ip_address, config.get("dns")) + subnets.append(sub_part) + + interface["subnets"] = subnets + nic_configs.append(interface) + + if config.get("dns"): + LOG.debug("Setting DNS nameservers to %s", config.get("dns")) + nic_configs.append({ + "type": "nameserver", + "address": config.get("dns") + }) + + return {"version": 1, "config": nic_configs} + + +def convert_network_config(config): + return convert_to_network_config_v1(config) + + +def read_metadata(url, timeout=2, sec_between=2, retries=30): + response = url_helper.readurl( + url, timeout=timeout, sec_between=sec_between, retries=retries + ) + if not response.ok(): + raise RuntimeError("unable to read metadata at %s" % url) + return json.loads(response.contents.decode()) + + +def read_sysinfo(): + # UpCloud embeds vendor ID and server UUID in the + # SMBIOS information + + # Detect if we are on UpCloud and return the UUID + + vendor_name = dmi.read_dmi_data("system-manufacturer") + if vendor_name != "UpCloud": + return False, None + + server_uuid = dmi.read_dmi_data("system-uuid") + if server_uuid: + LOG.debug( + "system identified via SMBIOS as UpCloud server: %s", + server_uuid + ) + else: + msg = ( + "system identified via SMBIOS as a UpCloud server, but " + "did not provide an ID. Please contact support via" + "https://hub.upcloud.com or via email with support@upcloud.com" + ) + LOG.critical(msg) + raise RuntimeError(msg) + + return True, server_uuid diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst index 8f56a7d2..f58b2b38 100644 --- a/doc/rtd/topics/availability.rst +++ b/doc/rtd/topics/availability.rst @@ -55,6 +55,7 @@ environments in the public cloud: - CloudStack - AltCloud - SmartOS +- UpCloud Additionally, cloud-init is supported on these private clouds: diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index 3d026143..228173d2 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -47,6 +47,7 @@ The following is a list of documents for each supported datasource: datasources/ovf.rst datasources/rbxcloud.rst datasources/smartos.rst + datasources/upcloud.rst datasources/zstack.rst diff --git a/doc/rtd/topics/datasources/upcloud.rst b/doc/rtd/topics/datasources/upcloud.rst new file mode 100644 index 00000000..0b7a9bb0 --- /dev/null +++ b/doc/rtd/topics/datasources/upcloud.rst @@ -0,0 +1,24 @@ +.. _datasource_upcloud: + +UpCloud +============= + +The `UpCloud`_ datasource consumes information from UpCloud's `metadata +service`_. This metadata service serves information about the +running server via HTTP over the address 169.254.169.254 available in every +DHCP-configured interface. The metadata API endpoints are fully described in +UpCloud API documentation at +`https://developers.upcloud.com/1.3/8-servers/#metadata-service +`_. + +Providing user-data +------------------- + +When creating a server, user-data is provided by specifying it as `user_data` +in the API or via the server creation tool in the control panel. User-data is +immutable during server's lifetime and can be removed by deleting the server. + +.. _UpCloud: https://upcloud.com/ +.. _metadata service: https://upcloud.com/community/tutorials/upcloud-metadata-service/ + +.. vi: textwidth=78 diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst index 08db04d8..07cad765 100644 --- a/doc/rtd/topics/network-config.rst +++ b/doc/rtd/topics/network-config.rst @@ -144,6 +144,10 @@ The following Datasources optionally provide network configuration: - `SmartOS JSON Metadata`_ +- :ref:`datasource_upcloud` + + - `UpCloud JSON metadata`_ + For more information on network configuration formats .. toctree:: @@ -257,5 +261,6 @@ Example output converting V2 to sysconfig: .. _DigitalOcean JSON metadata: https://developers.digitalocean.com/documentation/metadata/#network-interfaces-index .. _OpenStack Metadata Service Network: https://specs.openstack.org/openstack/nova-specs/specs/liberty/implemented/metadata-service-network-info.html .. _SmartOS JSON Metadata: https://eng.joyent.com/mdata/datadict.html +.. _UpCloud JSON metadata: https://developers.upcloud.com/1.3/8-servers/#metadata-service .. vi: textwidth=78 diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index 4ab5d471..5912f7ee 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -27,6 +27,7 @@ from cloudinit.sources import ( DataSourceRbxCloud as RbxCloud, DataSourceScaleway as Scaleway, DataSourceSmartOS as SmartOS, + DataSourceUpCloud as UpCloud, ) from cloudinit.sources import DataSourceNone as DSNone @@ -48,6 +49,7 @@ DEFAULT_LOCAL = [ OpenStack.DataSourceOpenStackLocal, RbxCloud.DataSourceRbxCloud, Scaleway.DataSourceScaleway, + UpCloud.DataSourceUpCloudLocal, ] DEFAULT_NETWORK = [ @@ -63,6 +65,7 @@ DEFAULT_NETWORK = [ NoCloud.DataSourceNoCloudNet, OpenStack.DataSourceOpenStack, OVF.DataSourceOVFNet, + UpCloud.DataSourceUpCloud, ] diff --git a/tests/unittests/test_datasource/test_upcloud.py b/tests/unittests/test_datasource/test_upcloud.py new file mode 100644 index 00000000..cec48b4b --- /dev/null +++ b/tests/unittests/test_datasource/test_upcloud.py @@ -0,0 +1,314 @@ +# Author: Antti Myyrä +# +# This file is part of cloud-init. See LICENSE file for license information. + +import json + +from cloudinit import helpers +from cloudinit import settings +from cloudinit import sources +from cloudinit.sources.DataSourceUpCloud import DataSourceUpCloud, \ + DataSourceUpCloudLocal + +from cloudinit.tests.helpers import mock, CiTestCase + +UC_METADATA = json.loads(""" +{ + "cloud_name": "upcloud", + "instance_id": "00322b68-0096-4042-9406-faad61922128", + "hostname": "test.example.com", + "platform": "servers", + "subplatform": "metadata (http://169.254.169.254)", + "public_keys": [ + "ssh-rsa AAAAB.... test1@example.com", + "ssh-rsa AAAAB.... test2@example.com" + ], + "region": "fi-hel2", + "network": { + "interfaces": [ + { + "index": 1, + "ip_addresses": [ + { + "address": "94.237.105.53", + "dhcp": true, + "dns": [ + "94.237.127.9", + "94.237.40.9" + ], + "family": "IPv4", + "floating": false, + "gateway": "94.237.104.1", + "network": "94.237.104.0/22" + }, + { + "address": "94.237.105.50", + "dhcp": false, + "dns": null, + "family": "IPv4", + "floating": true, + "gateway": "", + "network": "94.237.105.50/32" + } + ], + "mac": "3a:d6:ba:4a:36:e7", + "network_id": "031457f4-0f8c-483c-96f2-eccede02909c", + "type": "public" + }, + { + "index": 2, + "ip_addresses": [ + { + "address": "10.6.3.27", + "dhcp": true, + "dns": null, + "family": "IPv4", + "floating": false, + "gateway": "10.6.0.1", + "network": "10.6.0.0/22" + } + ], + "mac": "3a:d6:ba:4a:84:cc", + "network_id": "03d82553-5bea-4132-b29a-e1cf67ec2dd1", + "type": "utility" + }, + { + "index": 3, + "ip_addresses": [ + { + "address": "2a04:3545:1000:720:38d6:baff:fe4a:63e7", + "dhcp": true, + "dns": [ + "2a04:3540:53::1", + "2a04:3544:53::1" + ], + "family": "IPv6", + "floating": false, + "gateway": "2a04:3545:1000:720::1", + "network": "2a04:3545:1000:720::/64" + } + ], + "mac": "3a:d6:ba:4a:63:e7", + "network_id": "03000000-0000-4000-8046-000000000000", + "type": "public" + }, + { + "index": 4, + "ip_addresses": [ + { + "address": "172.30.1.10", + "dhcp": true, + "dns": null, + "family": "IPv4", + "floating": false, + "gateway": "172.30.1.1", + "network": "172.30.1.0/24" + } + ], + "mac": "3a:d6:ba:4a:8a:e1", + "network_id": "035a0a4a-7704-4de5-820d-189fc8132714", + "type": "private" + } + ], + "dns": [ + "94.237.127.9", + "94.237.40.9" + ] + }, + "storage": { + "disks": [ + { + "id": "014efb65-223b-4d44-8f0a-c29535b88dcf", + "serial": "014efb65223b4d448f0a", + "size": 10240, + "type": "disk", + "tier": "maxiops" + } + ] + }, + "tags": [], + "user_data": "", + "vendor_data": "" +} +""") + +UC_METADATA["user_data"] = b"""#cloud-config +runcmd: +- [touch, /root/cloud-init-worked ] +""" + +MD_URL = 'http://169.254.169.254/metadata/v1.json' + + +def _mock_dmi(): + return True, "00322b68-0096-4042-9406-faad61922128" + + +class TestUpCloudMetadata(CiTestCase): + """ + Test reading the meta-data + """ + def setUp(self): + super(TestUpCloudMetadata, self).setUp() + self.tmp = self.tmp_dir() + + def get_ds(self, get_sysinfo=_mock_dmi): + ds = DataSourceUpCloud( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + if get_sysinfo: + ds._get_sysinfo = get_sysinfo + return ds + + @mock.patch('cloudinit.sources.helpers.upcloud.read_sysinfo') + def test_returns_false_not_on_upcloud(self, m_read_sysinfo): + m_read_sysinfo.return_value = (False, None) + ds = self.get_ds(get_sysinfo=None) + self.assertEqual(False, ds.get_data()) + self.assertTrue(m_read_sysinfo.called) + + @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata') + def test_metadata(self, mock_readmd): + mock_readmd.return_value = UC_METADATA.copy() + + ds = self.get_ds() + ds.perform_dhcp_setup = False + + ret = ds.get_data() + self.assertTrue(ret) + + self.assertTrue(mock_readmd.called) + + self.assertEqual(UC_METADATA.get('user_data'), ds.get_userdata_raw()) + self.assertEqual(UC_METADATA.get('vendor_data'), + ds.get_vendordata_raw()) + self.assertEqual(UC_METADATA.get('region'), ds.availability_zone) + self.assertEqual(UC_METADATA.get('instance_id'), ds.get_instance_id()) + self.assertEqual(UC_METADATA.get('cloud_name'), ds.cloud_name) + + self.assertEqual(UC_METADATA.get('public_keys'), + ds.get_public_ssh_keys()) + self.assertIsInstance(ds.get_public_ssh_keys(), list) + + +class TestUpCloudNetworkSetup(CiTestCase): + """ + Test reading the meta-data on networked context + """ + + def setUp(self): + super(TestUpCloudNetworkSetup, self).setUp() + self.tmp = self.tmp_dir() + + def get_ds(self, get_sysinfo=_mock_dmi): + ds = DataSourceUpCloudLocal( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + if get_sysinfo: + ds._get_sysinfo = get_sysinfo + return ds + + @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata') + @mock.patch('cloudinit.net.find_fallback_nic') + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') + def test_network_configured_metadata(self, m_net, m_dhcp, + m_fallback_nic, mock_readmd): + mock_readmd.return_value = UC_METADATA.copy() + + m_fallback_nic.return_value = 'eth1' + m_dhcp.return_value = [{ + 'interface': 'eth1', 'fixed-address': '10.6.3.27', + 'routers': '10.6.0.1', 'subnet-mask': '22', + 'broadcast-address': '10.6.3.255'} + ] + + ds = self.get_ds() + + ret = ds.get_data() + self.assertTrue(ret) + + self.assertTrue(m_dhcp.called) + m_dhcp.assert_called_with('eth1', None) + + m_net.assert_called_once_with( + broadcast='10.6.3.255', interface='eth1', + ip='10.6.3.27', prefix_or_mask='22', + router='10.6.0.1', static_routes=None + ) + + self.assertTrue(mock_readmd.called) + + self.assertEqual(UC_METADATA.get('region'), ds.availability_zone) + self.assertEqual(UC_METADATA.get('instance_id'), ds.get_instance_id()) + self.assertEqual(UC_METADATA.get('cloud_name'), ds.cloud_name) + + @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata') + @mock.patch('cloudinit.net.get_interfaces_by_mac') + def test_network_configuration(self, m_get_by_mac, mock_readmd): + mock_readmd.return_value = UC_METADATA.copy() + + raw_ifaces = UC_METADATA.get('network').get('interfaces') + self.assertEqual(4, len(raw_ifaces)) + + m_get_by_mac.return_value = { + raw_ifaces[0].get('mac'): 'eth0', + raw_ifaces[1].get('mac'): 'eth1', + raw_ifaces[2].get('mac'): 'eth2', + raw_ifaces[3].get('mac'): 'eth3', + } + + ds = self.get_ds() + ds.perform_dhcp_setup = False + + ret = ds.get_data() + self.assertTrue(ret) + + self.assertTrue(mock_readmd.called) + + netcfg = ds.network_config + + self.assertEqual(1, netcfg.get('version')) + + config = netcfg.get('config') + self.assertIsInstance(config, list) + self.assertEqual(5, len(config)) + self.assertEqual('physical', config[3].get('type')) + + self.assertEqual(raw_ifaces[2].get('mac'), config[2] + .get('mac_address')) + self.assertEqual(1, len(config[2].get('subnets'))) + self.assertEqual('ipv6_dhcpv6-stateless', config[2].get('subnets')[0] + .get('type')) + + self.assertEqual(2, len(config[0].get('subnets'))) + self.assertEqual('static', config[0].get('subnets')[1].get('type')) + + dns = config[4] + self.assertEqual('nameserver', dns.get('type')) + self.assertEqual(2, len(dns.get('address'))) + self.assertEqual( + UC_METADATA.get('network').get('dns')[1], + dns.get('address')[1] + ) + + +class TestUpCloudDatasourceLoading(CiTestCase): + def test_get_datasource_list_returns_in_local(self): + deps = (sources.DEP_FILESYSTEM, ) + ds_list = sources.DataSourceUpCloud.get_datasource_list(deps) + self.assertEqual(ds_list, + [DataSourceUpCloudLocal]) + + def test_get_datasource_list_returns_in_normal(self): + deps = (sources.DEP_FILESYSTEM, sources.DEP_NETWORK) + ds_list = sources.DataSourceUpCloud.get_datasource_list(deps) + self.assertEqual(ds_list, + [DataSourceUpCloud]) + + def test_list_sources_finds_ds(self): + found = sources.list_sources( + ['UpCloud'], (sources.DEP_FILESYSTEM, sources.DEP_NETWORK), + ['cloudinit.sources']) + self.assertEqual([DataSourceUpCloud], + found) + +# vi: ts=4 expandtab diff --git a/tools/ds-identify b/tools/ds-identify index 496dbb8a..2f2486f7 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -125,7 +125,7 @@ DI_DSNAME="" # be searched if there is no setting found in config. DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \ -OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud" +OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud UpCloud" DI_DSLIST="" DI_MODE="" DI_ON_FOUND="" @@ -883,6 +883,11 @@ dscheck_RbxCloud() { return ${DS_NOT_FOUND} } +dscheck_UpCloud() { + dmi_sys_vendor_is UpCloud && return ${DS_FOUND} + return ${DS_NOT_FOUND} +} + ovf_vmware_guest_customization() { # vmware guest customization -- cgit v1.2.3 From 66e2d42dd1b722dc8e59f4e5990cea54f81ccd2a Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Fri, 19 Feb 2021 15:37:57 -0700 Subject: azure: case-insensitive UUID to avoid new IID during kernel upgrade (#798) Kernel's newer than 4.15 present /sys/dmi/id/product_uuid as a lowercase value. Previously UUID was uppercase. Azure datasource reads the product_uuid directly as their platform's instance-id. This presents a problem if a kernel is either upgraded or downgraded across the 4.15 kernel version boundary because the case of the UUID will change, resulting in cloud-init seeing a "new" instance id and re-running all modules. Re-running cc_ssh in cloud-init deletes and regenerates ssh_host keys on a system which can cause concern on long-running instances that somethingnefarious has happened. Also add: - An integration test for this for Azure Bionic Ubuntu FIPS upgrading from a FIPS kernel with uppercase UUID to a lowercase UUID in linux-azure - A new pytest.mark.sru_next to collect all integration tests related to our next SRU LP: #1835584 --- cloudinit/sources/DataSourceAzure.py | 12 ++- tests/integration_tests/bugs/test_lp1835584.py | 104 +++++++++++++++++++++++++ tests/integration_tests/instances.py | 6 +- tests/unittests/test_datasource/test_azure.py | 39 ++++++++-- tox.ini | 1 + 5 files changed, 153 insertions(+), 9 deletions(-) create mode 100644 tests/integration_tests/bugs/test_lp1835584.py (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 090dd66b..748a9716 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -683,10 +683,18 @@ class DataSourceAzure(sources.DataSource): def _iid(self, previous=None): prev_iid_path = os.path.join( self.paths.get_cpath('data'), 'instance-id') - iid = dmi.read_dmi_data('system-uuid') + # Older kernels than 4.15 will have UPPERCASE product_uuid. + # We don't want Azure to react to an UPPER/lower difference as a new + # instance id as it rewrites SSH host keys. + # LP: #1835584 + iid = dmi.read_dmi_data('system-uuid').lower() if os.path.exists(prev_iid_path): previous = util.load_file(prev_iid_path).strip() - if is_byte_swapped(previous, iid): + if previous.lower() == iid: + # If uppercase/lowercase equivalent, return the previous value + # to avoid new instance id. + return previous + if is_byte_swapped(previous.lower(), iid): return previous return iid diff --git a/tests/integration_tests/bugs/test_lp1835584.py b/tests/integration_tests/bugs/test_lp1835584.py new file mode 100644 index 00000000..660d2a2a --- /dev/null +++ b/tests/integration_tests/bugs/test_lp1835584.py @@ -0,0 +1,104 @@ +""" Integration test for LP #1835584 + +Upstream linux kernels prior to 4.15 providate DMI product_uuid in uppercase. +More recent kernels switched to lowercase for DMI product_uuid. Azure +datasource uses this product_uuid as the instance-id for cloud-init. + +The linux-azure-fips kernel installed in PRO FIPs images, that product UUID is +uppercase whereas the linux-azure cloud-optimized kernel reports the UUID as +lowercase. + +In cases where product_uuid changes case, ensure cloud-init doesn't +recreate ssh hostkeys across reboot (due to detecting an instance_id change). + +This currently only affects linux-azure-fips -> linux-azure on Bionic. +This test won't run on Xenial because both linux-azure-fips and linux-azure +report uppercase product_uuids. + +The test will launch a specific Bionic Ubuntu PRO FIPS image which has a +linux-azure-fips kernel known to report product_uuid as uppercase. Then upgrade +and reboot into linux-azure kernel which is known to report product_uuid as +lowercase. + +Across the reboot, assert that we didn't re-run config_ssh by virtue of +seeing only one semaphore creation log entry of type: + + Writing to /var/lib/cloud/instances//sem/config_ssh - + +https://bugs.launchpad.net/cloud-init/+bug/1835584 +""" +import re + +import pytest + +from tests.integration_tests.instances import IntegrationAzureInstance +from tests.integration_tests.clouds import ( + ImageSpecification, IntegrationCloud +) +from tests.integration_tests.conftest import get_validated_source + + +IMG_AZURE_UBUNTU_PRO_FIPS_BIONIC = ( + "Canonical:0001-com-ubuntu-pro-bionic-fips:pro-fips-18_04:18.04.202010201" +) + + +def _check_iid_insensitive_across_kernel_upgrade( + instance: IntegrationAzureInstance +): + uuid = instance.read_from_file("/sys/class/dmi/id/product_uuid") + assert uuid.isupper(), ( + "Expected uppercase UUID on Ubuntu FIPS image {}".format( + uuid + ) + ) + orig_kernel = instance.execute("uname -r").strip() + assert "azure-fips" in orig_kernel + result = instance.execute("apt-get update") + # Install a 5.4+ kernel which provides lowercase product_uuid + result = instance.execute("apt-get install linux-azure --assume-yes") + if not result.ok: + pytest.fail("Unable to install linux-azure kernel: {}".format(result)) + instance.restart() + new_kernel = instance.execute("uname -r").strip() + assert orig_kernel != new_kernel + assert "azure-fips" not in new_kernel + assert "azure" in new_kernel + new_uuid = instance.read_from_file("/sys/class/dmi/id/product_uuid") + assert ( + uuid.lower() == new_uuid + ), "Expected UUID on linux-azure to be lowercase of FIPS: {}".format(uuid) + log = instance.read_from_file("/var/log/cloud-init.log") + RE_CONFIG_SSH_SEMAPHORE = r"Writing.*sem/config_ssh " + ssh_runs = len(re.findall(RE_CONFIG_SSH_SEMAPHORE, log)) + assert 1 == ssh_runs, "config_ssh ran too many times {}".format(ssh_runs) + + +@pytest.mark.azure +@pytest.mark.sru_next +def test_azure_kernel_upgrade_case_insensitive_uuid( + session_cloud: IntegrationCloud +): + cfg_image_spec = ImageSpecification.from_os_image() + if (cfg_image_spec.os, cfg_image_spec.release) != ("ubuntu", "bionic"): + pytest.skip( + "Test only supports ubuntu:bionic not {0.os}:{0.release}".format( + cfg_image_spec + ) + ) + source = get_validated_source(session_cloud) + if not source.installs_new_version(): + pytest.skip( + "Provide CLOUD_INIT_SOURCE to install expected working cloud-init" + ) + image_id = IMG_AZURE_UBUNTU_PRO_FIPS_BIONIC + with session_cloud.launch( + launch_kwargs={"image_id": image_id} + ) as instance: + # We can't use setup_image fixture here because we want to avoid + # taking a snapshot or cleaning the booted machine after cloud-init + # upgrade. + instance.install_new_cloud_init( + source, take_snapshot=False, clean=False + ) + _check_iid_insensitive_across_kernel_upgrade(instance) diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py index 0d9852c3..055ec758 100644 --- a/tests/integration_tests/instances.py +++ b/tests/integration_tests/instances.py @@ -116,7 +116,8 @@ class IntegrationInstance: def install_new_cloud_init( self, source: CloudInitSource, - take_snapshot=True + take_snapshot=True, + clean=True, ): if source == CloudInitSource.DEB_PACKAGE: self.install_deb() @@ -133,7 +134,8 @@ class IntegrationInstance: ) version = self.execute('cloud-init -v').split()[-1] log.info('Installed cloud-init version: %s', version) - self.instance.clean() + if clean: + self.instance.clean() if take_snapshot: snapshot_id = self.snapshot() self.cloud.snapshot_id = snapshot_id diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index dc615309..152a2e1a 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -201,6 +201,7 @@ IMDS_NETWORK_METADATA = { } MOCKPATH = 'cloudinit.sources.DataSourceAzure.' +EXAMPLE_UUID = 'd0df4c54-4ecb-4a4b-9954-5bdf3ed5c3b8' class TestParseNetworkConfig(CiTestCase): @@ -630,7 +631,7 @@ scbus-1 on xpt0 bus 0 return dsaz def _get_ds(self, data, agent_command=None, distro='ubuntu', - apply_network=None): + apply_network=None, instance_id=None): def dsdevs(): return data.get('dsdevs', []) @@ -659,7 +660,10 @@ scbus-1 on xpt0 bus 0 self.m_ephemeral_dhcpv4 = mock.MagicMock() self.m_ephemeral_dhcpv4_with_reporting = mock.MagicMock() - self.instance_id = 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8' + if instance_id: + self.instance_id = instance_id + else: + self.instance_id = EXAMPLE_UUID def _dmi_mocks(key): if key == 'system-uuid': @@ -910,7 +914,7 @@ scbus-1 on xpt0 bus 0 'azure_data': { 'configurationsettype': 'LinuxProvisioningConfiguration'}, 'imds': NETWORK_METADATA, - 'instance-id': 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8', + 'instance-id': EXAMPLE_UUID, 'local-hostname': u'myhost', 'random_seed': 'wild'} @@ -1613,6 +1617,32 @@ scbus-1 on xpt0 bus 0 self.assertTrue(ret) self.assertEqual('value', dsrc.metadata['test']) + def test_instance_id_case_insensitive(self): + """Return the previous iid when current is a case-insensitive match.""" + lower_iid = EXAMPLE_UUID.lower() + upper_iid = EXAMPLE_UUID.upper() + # lowercase current UUID + ds = self._get_ds( + {'ovfcontent': construct_valid_ovf_env()}, instance_id=lower_iid + ) + # UPPERCASE previous + write_file( + os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), + upper_iid) + ds.get_data() + self.assertEqual(upper_iid, ds.metadata['instance-id']) + + # UPPERCASE current UUID + ds = self._get_ds( + {'ovfcontent': construct_valid_ovf_env()}, instance_id=upper_iid + ) + # lowercase previous + write_file( + os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), + lower_iid) + ds.get_data() + self.assertEqual(lower_iid, ds.metadata['instance-id']) + def test_instance_id_endianness(self): """Return the previous iid when dmi uuid is the byteswapped iid.""" ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) @@ -1628,8 +1658,7 @@ scbus-1 on xpt0 bus 0 os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), '644CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8') ds.get_data() - self.assertEqual( - 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8', ds.metadata['instance-id']) + self.assertEqual(self.instance_id, ds.metadata['instance-id']) def test_instance_id_from_dmidecode_used(self): ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) diff --git a/tox.ini b/tox.ini index b359bb98..1887e582 100644 --- a/tox.ini +++ b/tox.ini @@ -184,5 +184,6 @@ markers = instance_name: the name to be used for the test instance sru_2020_11: test is part of the 2020/11 SRU verification sru_2021_01: test is part of the 2021/01 SRU verification + sru_next: test is part of the next SRU verification ubuntu: this test should run on Ubuntu unstable: skip this test because it is flakey -- cgit v1.2.3 From e384a5436560c9494118f0999c314982d4912d27 Mon Sep 17 00:00:00 2001 From: Michael Hudson-Doyle Date: Tue, 23 Feb 2021 08:20:46 +1300 Subject: cc_keys_to_console: add option to disable key emission (#811) Specifically: ssh: emit_keys_to_console: false We also port the cc_keys_to_console cloud tests to the new integration testing framework, and add a test for this new option. LP: #1915460 --- cloudinit/config/cc_keys_to_console.py | 5 +++ cloudinit/config/tests/test_keys_to_console.py | 34 +++++++++++++++ doc/examples/cloud-config-ssh-keys.txt | 10 +++++ .../modules/test_keys_to_console.py | 48 ++++++++++++++++++++++ 4 files changed, 97 insertions(+) create mode 100644 cloudinit/config/tests/test_keys_to_console.py create mode 100644 tests/integration_tests/modules/test_keys_to_console.py (limited to 'cloudinit') diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py index 0f2be52b..646d1f67 100644 --- a/cloudinit/config/cc_keys_to_console.py +++ b/cloudinit/config/cc_keys_to_console.py @@ -51,6 +51,11 @@ def _get_helper_tool_path(distro): def handle(name, cfg, cloud, log, _args): + if util.is_false(cfg.get("ssh", {}).get("emit_keys_to_console", True)): + log.debug(("Skipping module named %s, " + "logging of SSH host keys disabled"), name) + return + helper_path = _get_helper_tool_path(cloud.distro) if not os.path.exists(helper_path): log.warning(("Unable to activate module %s," diff --git a/cloudinit/config/tests/test_keys_to_console.py b/cloudinit/config/tests/test_keys_to_console.py new file mode 100644 index 00000000..4083fc54 --- /dev/null +++ b/cloudinit/config/tests/test_keys_to_console.py @@ -0,0 +1,34 @@ +"""Tests for cc_keys_to_console.""" +from unittest import mock + +import pytest + +from cloudinit.config import cc_keys_to_console + + +class TestHandle: + """Tests for cloudinit.config.cc_keys_to_console.handle. + + TODO: These tests only cover the emit_keys_to_console config option, they + should be expanded to cover the full functionality. + """ + + @mock.patch("cloudinit.config.cc_keys_to_console.util.multi_log") + @mock.patch("cloudinit.config.cc_keys_to_console.os.path.exists") + @mock.patch("cloudinit.config.cc_keys_to_console.subp.subp") + @pytest.mark.parametrize("cfg,subp_called", [ + ({}, True), # Default to emitting keys + ({"ssh": {}}, True), # Default even if we have the parent key + ({"ssh": {"emit_keys_to_console": True}}, True), # Explicitly enabled + ({"ssh": {"emit_keys_to_console": False}}, False), # Disabled + ]) + def test_emit_keys_to_console_config( + self, m_subp, m_path_exists, _m_multi_log, cfg, subp_called + ): + # Ensure we always find the helper + m_path_exists.return_value = True + m_subp.return_value = ("", "") + + cc_keys_to_console.handle("name", cfg, mock.Mock(), mock.Mock(), ()) + + assert subp_called == (m_subp.call_count == 1) diff --git a/doc/examples/cloud-config-ssh-keys.txt b/doc/examples/cloud-config-ssh-keys.txt index aad8b683..bfe5ab44 100644 --- a/doc/examples/cloud-config-ssh-keys.txt +++ b/doc/examples/cloud-config-ssh-keys.txt @@ -42,3 +42,13 @@ ssh_keys: -----END DSA PRIVATE KEY----- dsa_public: ssh-dss AAAAB3NzaC1kc3MAAACBAM/Ycu7ulMTEvz1RLIzTbrhELJZf8Iwua6TFfQl1ubb1rHwUElOkus7xMhdVjms8AmbV1Meem7ImE69T0bszy09QAG3NImHgZVIeXBoJ/JzByku/1NcOBYilKP7oSIcLJpGUHX8IGn1GJoH7XRBwVub6Vqm4RP78C7q9IOn0hG2VAAAAFQCDEfCrnL1GGzhCPsr/uS1vbt8/wQAAAIEAjSrok/4m8mbBkVp4IwxXFdRuqJKSj8/WWxos00Ednn/ww5QibysHYULrOKJ1+54mmpMyp5CZICUQELCfCt5ScZ9GsqgmnI80Q1h3Xkwbo3kn7PzWwRwcV6muvJn4PcZ71WM+rdN/c2EorAINDTbjRo97NueM94WbiYdtjHFxn0YAAACAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI38UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC/QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQ= smoser@localhost + +# By default, the fingerprints of the authorized keys for the users +# cloud-init adds are printed to the console. Setting +# no_ssh_fingerprints to true suppresses this output. +no_ssh_fingerprints: false + +# By default, (most) ssh host keys are printed to the console. Setting +# emit_keys_to_console to false suppresses this output. +ssh: + emit_keys_to_console: false diff --git a/tests/integration_tests/modules/test_keys_to_console.py b/tests/integration_tests/modules/test_keys_to_console.py new file mode 100644 index 00000000..298c9e6d --- /dev/null +++ b/tests/integration_tests/modules/test_keys_to_console.py @@ -0,0 +1,48 @@ +"""Integration tests for the cc_keys_to_console module. + +(This is ported from +``tests/cloud_tests/testcases/modules/keys_to_console.yaml``.)""" +import pytest + +BLACKLIST_USER_DATA = """\ +#cloud-config +ssh_fp_console_blacklist: [ssh-dss, ssh-dsa, ecdsa-sha2-nistp256] +ssh_key_console_blacklist: [ssh-dss, ssh-dsa, ecdsa-sha2-nistp256] +""" + +DISABLED_USER_DATA = """\ +#cloud-config +ssh: + emit_keys_to_console: false +""" + + +@pytest.mark.user_data(BLACKLIST_USER_DATA) +class TestKeysToConsoleBlacklist: + """Test that the blacklist options work as expected.""" + @pytest.mark.parametrize("key_type", ["DSA", "ECDSA"]) + def test_excluded_keys(self, class_client, key_type): + syslog = class_client.read_from_file("/var/log/syslog") + assert "({})".format(key_type) not in syslog + + @pytest.mark.parametrize("key_type", ["ED25519", "RSA"]) + def test_included_keys(self, class_client, key_type): + syslog = class_client.read_from_file("/var/log/syslog") + assert "({})".format(key_type) in syslog + + +@pytest.mark.user_data(DISABLED_USER_DATA) +class TestKeysToConsoleDisabled: + """Test that output can be fully disabled.""" + @pytest.mark.parametrize("key_type", ["DSA", "ECDSA", "ED25519", "RSA"]) + def test_keys_excluded(self, class_client, key_type): + syslog = class_client.read_from_file("/var/log/syslog") + assert "({})".format(key_type) not in syslog + + def test_header_excluded(self, class_client): + syslog = class_client.read_from_file("/var/log/syslog") + assert "BEGIN SSH HOST KEY FINGERPRINTS" not in syslog + + def test_footer_excluded(self, class_client): + syslog = class_client.read_from_file("/var/log/syslog") + assert "END SSH HOST KEY FINGERPRINTS" not in syslog -- cgit v1.2.3 From a64b73808857fa7b4f262422ce2c87eedbce10d5 Mon Sep 17 00:00:00 2001 From: Johnson Shi Date: Mon, 22 Feb 2021 13:50:59 -0800 Subject: Azure: Support for VMs without ephemeral resource disks. (#800) Changes: * Only merge in default Azure cloud ephemeral disk configs during DataSourceAzure._get_data() if the ephemeral disk exists. * DataSourceAzure.address_ephemeral_resize() (which is invoked in DataSourceAzure.activate() should only set up the ephemeral disk if the disk exists. Azure VMs may or may not come with ephemeral resource disks depending on the VM SKU. For VM SKUs that come with ephemeral resource disks, the Azure platform guarantees that the ephemeral resource disk is attached to the VM before the VM is booted. For VM SKUs that do not come with ephemeral resource disks, cloud-init currently attempts to wait and set up a non-existent ephemeral resource disk, which wastes boot time. It also causes disk setup modules to fail (due to non-existent references to the ephemeral resource disk). udevadm settle is invoked by cloud-init very early in boot. udevadm settle is invoked very early, before DataSourceAzure's _get_data() and activate() methods. Within DataSourceAzure's _get_data() and activate() methods, the ephemeral resource disk path should exist if the VM SKU comes with an ephemeral resource disk. The ephemeral resource disk path should not exist if the VM SKU does not come with an ephemeral resource disk. LP: #1901011 --- cloudinit/sources/DataSourceAzure.py | 53 +++++++++++++---------- integration-requirements.txt | 2 +- tests/integration_tests/bugs/test_lp1901011.py | 58 ++++++++++++++++++++++++++ tests/unittests/test_datasource/test_azure.py | 52 +++++++++++++++++------ 4 files changed, 130 insertions(+), 35 deletions(-) create mode 100644 tests/integration_tests/bugs/test_lp1901011.py (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 748a9716..cee630f7 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -270,7 +270,7 @@ BUILTIN_DS_CONFIG = { } # RELEASE_BLOCKER: Xenial and earlier apply_network_config default is False -BUILTIN_CLOUD_CONFIG = { +BUILTIN_CLOUD_EPHEMERAL_DISK_CONFIG = { 'disk_setup': { 'ephemeral0': {'table_type': 'gpt', 'layout': [100], @@ -618,8 +618,26 @@ class DataSourceAzure(sources.DataSource): maybe_remove_ubuntu_network_config_scripts() # Process crawled data and augment with various config defaults - self.cfg = util.mergemanydict( - [crawled_data['cfg'], BUILTIN_CLOUD_CONFIG]) + + # Only merge in default cloud config related to the ephemeral disk + # if the ephemeral disk exists + devpath = RESOURCE_DISK_PATH + if os.path.exists(devpath): + report_diagnostic_event( + "Ephemeral resource disk '%s' exists. " + "Merging default Azure cloud ephemeral disk configs." + % devpath, + logger_func=LOG.debug) + self.cfg = util.mergemanydict( + [crawled_data['cfg'], BUILTIN_CLOUD_EPHEMERAL_DISK_CONFIG]) + else: + report_diagnostic_event( + "Ephemeral resource disk '%s' does not exist. " + "Not merging default Azure cloud ephemeral disk configs." + % devpath, + logger_func=LOG.debug) + self.cfg = crawled_data['cfg'] + self._metadata_imds = crawled_data['metadata']['imds'] self.metadata = util.mergemanydict( [crawled_data['metadata'], DEFAULT_METADATA]) @@ -1468,26 +1486,17 @@ def can_dev_be_reformatted(devpath, preserve_ntfs): @azure_ds_telemetry_reporter -def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, +def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, is_new_instance=False, preserve_ntfs=False): - # wait for ephemeral disk to come up - naplen = .2 - with events.ReportEventStack( - name="wait-for-ephemeral-disk", - description="wait for ephemeral disk", - parent=azure_ds_reporter - ): - missing = util.wait_for_files([devpath], - maxwait=maxwait, - naplen=naplen, - log_pre="Azure ephemeral disk: ") - - if missing: - report_diagnostic_event( - "ephemeral device '%s' did not appear after %d seconds." % - (devpath, maxwait), - logger_func=LOG.warning) - return + if not os.path.exists(devpath): + report_diagnostic_event( + "Ephemeral resource disk '%s' does not exist." % devpath, + logger_func=LOG.debug) + return + else: + report_diagnostic_event( + "Ephemeral resource disk '%s' exists." % devpath, + logger_func=LOG.debug) result = False msg = None diff --git a/integration-requirements.txt b/integration-requirements.txt index c64b3b26..6b596426 100644 --- a/integration-requirements.txt +++ b/integration-requirements.txt @@ -1,5 +1,5 @@ # PyPI requirements for cloud-init integration testing # https://cloudinit.readthedocs.io/en/latest/topics/integration_tests.html # -pycloudlib @ git+https://github.com/canonical/pycloudlib.git@3a6c668fed769f00d83d1e6bea7d68953787cc38 +pycloudlib @ git+https://github.com/canonical/pycloudlib.git@da8445325875674394ffd85aaefaa3d2d0e0020d pytest diff --git a/tests/integration_tests/bugs/test_lp1901011.py b/tests/integration_tests/bugs/test_lp1901011.py new file mode 100644 index 00000000..2b47f0a8 --- /dev/null +++ b/tests/integration_tests/bugs/test_lp1901011.py @@ -0,0 +1,58 @@ +"""Integration test for LP: #1901011 + +Ensure an ephemeral disk exists after boot. + +See https://github.com/canonical/cloud-init/pull/800 +""" +import pytest + +from tests.integration_tests.clouds import IntegrationCloud + + +@pytest.mark.azure +@pytest.mark.parametrize('instance_type,is_ephemeral', [ + ('Standard_DS1_v2', True), + ('Standard_D2s_v4', False), +]) +def test_ephemeral(instance_type, is_ephemeral, + session_cloud: IntegrationCloud, setup_image): + if is_ephemeral: + expected_log = ( + "Ephemeral resource disk '/dev/disk/cloud/azure_resource' exists. " + "Merging default Azure cloud ephemeral disk configs." + ) + else: + expected_log = ( + "Ephemeral resource disk '/dev/disk/cloud/azure_resource' does " + "not exist. Not merging default Azure cloud ephemeral disk " + "configs." + ) + + with session_cloud.launch( + launch_kwargs={'instance_type': instance_type} + ) as client: + # Verify log file + log = client.read_from_file('/var/log/cloud-init.log') + assert expected_log in log + + # Verify devices + dev_links = client.execute('ls /dev/disk/cloud') + assert 'azure_root' in dev_links + assert 'azure_root-part1' in dev_links + if is_ephemeral: + assert 'azure_resource' in dev_links + assert 'azure_resource-part1' in dev_links + + # Verify mounts + blks = client.execute('lsblk -pPo NAME,TYPE,MOUNTPOINT') + root_device = client.execute( + 'realpath /dev/disk/cloud/azure_root-part1' + ) + assert 'NAME="{}" TYPE="part" MOUNTPOINT="/"'.format( + root_device) in blks + if is_ephemeral: + ephemeral_device = client.execute( + 'realpath /dev/disk/cloud/azure_resource-part1' + ) + assert 'NAME="{}" TYPE="part" MOUNTPOINT="/mnt"'.format( + ephemeral_device) in blks diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 152a2e1a..f597c723 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1354,23 +1354,51 @@ scbus-1 on xpt0 bus 0 for mypk in mypklist: self.assertIn(mypk['value'], dsrc.metadata['public-keys']) - def test_default_ephemeral(self): - # make sure the ephemeral device works + def test_default_ephemeral_configs_ephemeral_exists(self): + # make sure the ephemeral configs are correct if disk present odata = {} data = {'ovfcontent': construct_valid_ovf_env(data=odata), 'sys_cfg': {}} - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - cfg = dsrc.get_config_obj() + orig_exists = dsaz.os.path.exists + + def changed_exists(path): + return True if path == dsaz.RESOURCE_DISK_PATH else orig_exists( + path) + + with mock.patch(MOCKPATH + 'os.path.exists', new=changed_exists): + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + cfg = dsrc.get_config_obj() + + self.assertEqual(dsrc.device_name_to_device("ephemeral0"), + dsaz.RESOURCE_DISK_PATH) + assert 'disk_setup' in cfg + assert 'fs_setup' in cfg + self.assertIsInstance(cfg['disk_setup'], dict) + self.assertIsInstance(cfg['fs_setup'], list) + + def test_default_ephemeral_configs_ephemeral_does_not_exist(self): + # make sure the ephemeral configs are correct if disk not present + odata = {} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + + orig_exists = dsaz.os.path.exists + + def changed_exists(path): + return False if path == dsaz.RESOURCE_DISK_PATH else orig_exists( + path) + + with mock.patch(MOCKPATH + 'os.path.exists', new=changed_exists): + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + cfg = dsrc.get_config_obj() - self.assertEqual(dsrc.device_name_to_device("ephemeral0"), - dsaz.RESOURCE_DISK_PATH) - assert 'disk_setup' in cfg - assert 'fs_setup' in cfg - self.assertIsInstance(cfg['disk_setup'], dict) - self.assertIsInstance(cfg['fs_setup'], list) + assert 'disk_setup' not in cfg + assert 'fs_setup' not in cfg def test_provide_disk_aliases(self): # Make sure that user can affect disk aliases -- cgit v1.2.3 From d873b9dcf30cdc619655dc7ea37c264ca7ba1845 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 23 Feb 2021 09:41:41 -0500 Subject: Release 21.1 (#820) Bump the version in cloudinit/version.py to 21.1 and update ChangeLog. LP: #1916540 --- ChangeLog | 107 +++++++++++++++++++++++++++++++++++++++++++++++++++ cloudinit/version.py | 2 +- 2 files changed, 108 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/ChangeLog b/ChangeLog index d0781ded..44b50410 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,110 @@ +21.1 + - Azure: Support for VMs without ephemeral resource disks. (#800) + [Johnson Shi] (LP: #1901011) + - cc_keys_to_console: add option to disable key emission (#811) + [Michael Hudson-Doyle] (LP: #1915460) + - integration_tests: introduce lxd_use_exec mark (#802) + - azure: case-insensitive UUID to avoid new IID during kernel upgrade + (#798) (LP: #1835584) + - stale.yml: don't ask submitters to reopen PRs (#816) + - integration_tests: fix use of SSH agent within tox (#815) + - integration_tests: add UPGRADE CloudInitSource (#812) + - integration_tests: use unique MAC addresses for tests (#813) + - Update .gitignore (#814) + - Port apt cloud_tests to integration tests (#808) + - integration_tests: fix test_gh626 on LXD VMs (#809) + - Fix attempting to decode binary data in test_seed_random_data test (#806) + - Remove wait argument from tests with session_cloud calls (#805) + - Datasource for UpCloud (#743) [Antti Myyrä] + - test_gh668: fix failure on LXD VMs (#801) + - openstack: read the dynamic metadata group vendor_data2.json (#777) + [Andrew Bogott] (LP: #1841104) + - includedir in suoders can be prefixed by "arroba" (#783) + [Jordi Massaguer Pla] + - [VMware] change default max wait time to 15s (#774) [xiaofengw-vmware] + - Revert integration test associated with reverted #586 (#784) + - Add jordimassaguerpla as contributor (#787) [Jordi Massaguer Pla] + - Add Rick Harding to CLA signers (#792) [Rick Harding] + - HACKING.rst: add clarifying note to LP CLA process section (#789) + - Stop linting cloud_tests (#791) + - cloud-tests: update cryptography requirement (#790) [Joshua Powers] + - Remove 'remove-raise-on-failure' calls from integration_tests (#788) + - Use more cloud defaults in integration tests (#757) + - Adding self to cla signers (#776) [Andrew Bogott] + - doc: avoid two warnings (#781) [Dan Kenigsberg] + - Use proper spelling for Red Hat (#778) [Dan Kenigsberg] + - Add antonyc to .github-cla-signers (#747) [Anton Chaporgin] + - integration_tests: log image serial if available (#772) + - [VMware] Support cloudinit raw data feature (#691) [xiaofengw-vmware] + - net: Fix static routes to host in eni renderer (#668) [Pavel Abalikhin] + - .travis.yml: don't run cloud_tests in CI (#756) + - test_upgrade: add some missing commas (#769) + - cc_seed_random: update documentation and fix integration test (#771) + (LP: #1911227) + - Fix test gh-632 test to only run on NoCloud (#770) (LP: #1911230) + - archlinux: fix package upgrade command handling (#768) [Bao Trinh] + - integration_tests: add integration test for LP: #1910835 (#761) + - Fix regression with handling of IMDS ssh keys (#760) [Thomas Stringer] + - integration_tests: log cloud-init version in SUT (#758) + - Add ajmyyra as contributor (#742) [Antti Myyrä] + - net_convert: add some missing help text (#755) + - Missing IPV6_AUTOCONF=no to render sysconfig dhcp6 stateful on RHEL + (#753) [Eduardo Otubo] + - doc: document missing IPv6 subnet types (#744) [Antti Myyrä] + - Add example configuration for datasource `AliYun` (#751) [Xiaoyu Zhong] + - integration_tests: add SSH key selection settings (#754) + - fix a typo in man page cloud-init.1 (#752) [Amy Chen] + - network-config-format-v2.rst: add Netplan Passthrough section (#750) + - stale: re-enable post holidays (#749) + - integration_tests: port ca_certs tests from cloud_tests (#732) + - Azure: Add telemetry for poll IMDS (#741) [Johnson Shi] + - doc: move testing section from HACKING to its own doc (#739) + - No longer allow integration test failures on travis (#738) + - stale: fix error in definition (#740) + - integration_tests: set log-cli-level to INFO by default (#737) + - PULL_REQUEST_TEMPLATE.md: use backticks around commit message (#736) + - stale: disable check for holiday break (#735) + - integration_tests: log the path we collect logs into (#733) + - .travis.yml: add (most) supported Python versions to CI (#734) + - integration_tests: fix IN_PLACE CLOUD_INIT_SOURCE (#731) + - cc_ca_certs: add RHEL support (#633) [cawamata] + - Azure: only generate config for NICs with addresses (#709) + [Thomas Stringer] + - doc: fix CloudStack configuration example (#707) [Olivier Lemasle] + - integration_tests: restrict test_lxd_bridge appropriately (#730) + - Add integration tests for CLI functionality (#729) + - Integration test for gh-626 (#728) + - Some test_upgrade fixes (#726) + - Ensure overriding test vars with env vars works for booleans (#727) + - integration_tests: port lxd_bridge test from cloud_tests (#718) + - Integration test for gh-632. (#725) + - Integration test for gh-671 (#724) + - integration-requirements.txt: bump pycloudlib commit (#723) + - Drop unnecessary shebang from cmd/main.py (#722) [Eduardo Otubo] + - Integration test for LP: #1813396 and #669 (#719) + - integration_tests: include timestamp in log output (#720) + - integration_tests: add test for LP: #1898997 (#713) + - Add integration test for power_state_change module (#717) + - Update documentation for network-config-format-v2 (#701) [ggiesen] + - sandbox CA Cert tests to not require ca-certificates (#715) + [Eduardo Otubo] + - Add upgrade integration test (#693) + - Integration test for 570 (#712) + - Add ability to keep snapshotted images in integration tests (#711) + - Integration test for pull #586 (#706) + - integration_tests: introduce skipping of tests by OS (#702) + - integration_tests: introduce IntegrationInstance.restart (#708) + - Add lxd-vm to list of valid integration test platforms (#705) + - Adding BOOTPROTO = dhcp to render sysconfig dhcp6 stateful on RHEL + (#685) [Eduardo Otubo] + - Delete image snapshots created for integration tests (#682) + - Parametrize ssh_keys_provided integration test (#700) [lucasmoura] + - Drop use_sudo attribute on IntegrationInstance (#694) [lucasmoura] + - cc_apt_configure: add riscv64 as a ports arch (#687) + [Dimitri John Ledkov] + - cla: add xnox (#692) [Dimitri John Ledkov] + - Collect logs from integration test runs (#675) + 20.4.1 - Revert "ssh_util: handle non-default AuthorizedKeysFile config (#586)" diff --git a/cloudinit/version.py b/cloudinit/version.py index 36ec728e..94afd60d 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -__VERSION__ = "20.4.1" +__VERSION__ = "21.1" _PACKAGED_VERSION = '@@PACKAGED_VERSION@@' FEATURES = [ -- cgit v1.2.3 From 695c4f8f46585dd7feac2fdc0729f410c539d3bc Mon Sep 17 00:00:00 2001 From: Toshi Aoyama Date: Wed, 24 Feb 2021 01:09:23 +0900 Subject: Update cc_set_hostname documentation (#818) It is distro dependent whether hostname or fqdn is used --- cloudinit/config/cc_set_hostname.py | 4 ++-- tools/.github-cla-signers | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py index 1d23d80d..d4017478 100644 --- a/cloudinit/config/cc_set_hostname.py +++ b/cloudinit/config/cc_set_hostname.py @@ -18,8 +18,8 @@ A hostname and fqdn can be provided by specifying a full domain name under the ``fqdn`` key. Alternatively, a hostname can be specified using the ``hostname`` key, and the fqdn of the cloud wil be used. If a fqdn specified with the ``hostname`` key, it will be handled properly, although it is better to use -the ``fqdn`` config key. If both ``fqdn`` and ``hostname`` are set, ``fqdn`` -will be used. +the ``fqdn`` config key. If both ``fqdn`` and ``hostname`` are set, +it is distro dependent whether ``hostname`` or ``fqdn`` is used. This module will run in the init-local stage before networking is configured if the hostname is set by metadata or user data on the local system. diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index 689d7902..5dfa2eb3 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -37,6 +37,7 @@ slyon smoser sshedi TheRealFalcon +taoyama tnt-dev tomponline tsanghan -- cgit v1.2.3 From 402d98edaa3a266bd5fab2b3a10d716346da6eb9 Mon Sep 17 00:00:00 2001 From: dermotbradley Date: Wed, 24 Feb 2021 15:04:16 +0000 Subject: cc_keys_to_console.py: Add documentation for recently added config key (#824) PR #811 added a new config key, emit_keys_to_console, but didn't update the documentation for mention it. --- cloudinit/config/cc_keys_to_console.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py index 646d1f67..d72b5244 100644 --- a/cloudinit/config/cc_keys_to_console.py +++ b/cloudinit/config/cc_keys_to_console.py @@ -9,14 +9,17 @@ """ Keys to Console --------------- -**Summary:** control which SSH keys may be written to console - -For security reasons it may be desirable not to write SSH fingerprints and keys -to the console. To avoid the fingerprint of types of SSH keys being written to -console the ``ssh_fp_console_blacklist`` config key can be used. By default all -types of keys will have their fingerprints written to console. To avoid keys -of a key type being written to console the ``ssh_key_console_blacklist`` config -key can be used. By default ``ssh-dss`` keys are not written to console. +**Summary:** control which SSH host keys may be written to console + +For security reasons it may be desirable not to write SSH host keys and their +fingerprints to the console. To avoid either being written to the console the +``emit_keys_to_console`` config key under the main ``ssh`` config key can be +used. To avoid the fingerprint of types of SSH host keys being written to +console the ``ssh_fp_console_blacklist`` config key can be used. By default +all types of keys will have their fingerprints written to console. To avoid +host keys of a key type being written to console the +``ssh_key_console_blacklist`` config key can be used. By default ``ssh-dss`` +host keys are not written to console. **Internal name:** ``cc_keys_to_console`` @@ -26,6 +29,9 @@ key can be used. By default ``ssh-dss`` keys are not written to console. **Config keys**:: + ssh: + emit_keys_to_console: false + ssh_fp_console_blacklist: ssh_key_console_blacklist: """ -- cgit v1.2.3 From 2757333e844f597b85980093bddc52552ef73aa5 Mon Sep 17 00:00:00 2001 From: Kristian Klausen Date: Thu, 25 Feb 2021 17:12:17 +0100 Subject: archlinux: Use hostnamectl to set the transient hostname (#797) hostname (inetutils) isn't installed per default on arch, so switch to hostnamectl which is installed per default (systemd). --- cloudinit/distros/arch.py | 11 +++++++++++ tools/.github-cla-signers | 1 + 2 files changed, 12 insertions(+) (limited to 'cloudinit') diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py index 378a6daa..5f42a24c 100644 --- a/cloudinit/distros/arch.py +++ b/cloudinit/distros/arch.py @@ -137,6 +137,17 @@ class Distro(distros.Distro): return default return hostname + # hostname (inetutils) isn't installed per default on arch, so we use + # hostnamectl which is installed per default (systemd). + def _apply_hostname(self, hostname): + LOG.debug("Non-persistently setting the system hostname to %s", + hostname) + try: + subp.subp(['hostnamectl', '--transient', 'set-hostname', hostname]) + except subp.ProcessExecutionError: + util.logexc(LOG, "Failed to non-persistently adjust the system " + "hostname to %s", hostname) + def set_timezone(self, tz): distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index 5dfa2eb3..aca0ee5e 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -20,6 +20,7 @@ johnsonshi jordimassaguerpla jqueuniet jsf9k +klausenbusk landon912 lucasmoura lungj -- cgit v1.2.3 From 3be666306023caf6f236464fb655741b6605bdf7 Mon Sep 17 00:00:00 2001 From: Thomas Stringer Date: Wed, 3 Mar 2021 11:07:43 -0500 Subject: Add flexibility to IMDS api-version (#793) Add flexibility to IMDS api-version by having both a desired IMDS api-version and a minimum api-version. The desired api-version will be used first, and if that fails it will fall back to the minimum api-version. --- cloudinit/sources/DataSourceAzure.py | 113 ++++++++++++++++++++------ tests/unittests/test_datasource/test_azure.py | 42 +++++++++- 2 files changed, 129 insertions(+), 26 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index cee630f7..6cae9e82 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -78,17 +78,15 @@ AGENT_SEED_DIR = '/var/lib/waagent' # In the event where the IMDS primary server is not # available, it takes 1s to fallback to the secondary one IMDS_TIMEOUT_IN_SECONDS = 2 -IMDS_URL = "http://169.254.169.254/metadata/" -IMDS_VER = "2019-06-01" -IMDS_VER_PARAM = "api-version={}".format(IMDS_VER) +IMDS_URL = "http://169.254.169.254/metadata" +IMDS_VER_MIN = "2019-06-01" +IMDS_VER_WANT = "2020-09-01" class metadata_type(Enum): - compute = "{}instance?{}".format(IMDS_URL, IMDS_VER_PARAM) - network = "{}instance/network?{}".format(IMDS_URL, - IMDS_VER_PARAM) - reprovisiondata = "{}reprovisiondata?{}".format(IMDS_URL, - IMDS_VER_PARAM) + compute = "{}/instance".format(IMDS_URL) + network = "{}/instance/network".format(IMDS_URL) + reprovisiondata = "{}/reprovisiondata".format(IMDS_URL) PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0" @@ -349,6 +347,8 @@ class DataSourceAzure(sources.DataSource): self.update_events['network'].add(EventType.BOOT) self._ephemeral_dhcp_ctx = None + self.failed_desired_api_version = False + def __str__(self): root = sources.DataSource.__str__(self) return "%s [seed=%s]" % (root, self.seed) @@ -520,8 +520,10 @@ class DataSourceAzure(sources.DataSource): self._wait_for_all_nics_ready() ret = self._reprovision() - imds_md = get_metadata_from_imds( - self.fallback_interface, retries=10) + imds_md = self.get_imds_data_with_api_fallback( + self.fallback_interface, + retries=10 + ) (md, userdata_raw, cfg, files) = ret self.seed = cdev crawled_data.update({ @@ -652,6 +654,57 @@ class DataSourceAzure(sources.DataSource): self.ds_cfg['data_dir'], crawled_data['files'], dirmode=0o700) return True + @azure_ds_telemetry_reporter + def get_imds_data_with_api_fallback( + self, + fallback_nic, + retries, + md_type=metadata_type.compute): + """ + Wrapper for get_metadata_from_imds so that we can have flexibility + in which IMDS api-version we use. If a particular instance of IMDS + does not have the api version that is desired, we want to make + this fault tolerant and fall back to a good known minimum api + version. + """ + + if not self.failed_desired_api_version: + for _ in range(retries): + try: + LOG.info( + "Attempting IMDS api-version: %s", + IMDS_VER_WANT + ) + return get_metadata_from_imds( + fallback_nic=fallback_nic, + retries=0, + md_type=md_type, + api_version=IMDS_VER_WANT + ) + except UrlError as err: + LOG.info( + "UrlError with IMDS api-version: %s", + IMDS_VER_WANT + ) + if err.code == 400: + log_msg = "Fall back to IMDS api-version: {}".format( + IMDS_VER_MIN + ) + report_diagnostic_event( + log_msg, + logger_func=LOG.info + ) + self.failed_desired_api_version = True + break + + LOG.info("Using IMDS api-version: %s", IMDS_VER_MIN) + return get_metadata_from_imds( + fallback_nic=fallback_nic, + retries=retries, + md_type=md_type, + api_version=IMDS_VER_MIN + ) + def device_name_to_device(self, name): return self.ds_cfg['disk_aliases'].get(name) @@ -880,10 +933,11 @@ class DataSourceAzure(sources.DataSource): # primary nic is being attached first helps here. Otherwise each nic # could add several seconds of delay. try: - imds_md = get_metadata_from_imds( + imds_md = self.get_imds_data_with_api_fallback( ifname, 5, - metadata_type.network) + metadata_type.network + ) except Exception as e: LOG.warning( "Failed to get network metadata using nic %s. Attempt to " @@ -1017,7 +1071,10 @@ class DataSourceAzure(sources.DataSource): def _poll_imds(self): """Poll IMDS for the new provisioning data until we get a valid response. Then return the returned JSON object.""" - url = metadata_type.reprovisiondata.value + url = "{}?api-version={}".format( + metadata_type.reprovisiondata.value, + IMDS_VER_MIN + ) headers = {"Metadata": "true"} nl_sock = None report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE)) @@ -2059,7 +2116,8 @@ def _generate_network_config_from_fallback_config() -> dict: @azure_ds_telemetry_reporter def get_metadata_from_imds(fallback_nic, retries, - md_type=metadata_type.compute): + md_type=metadata_type.compute, + api_version=IMDS_VER_MIN): """Query Azure's instance metadata service, returning a dictionary. If network is not up, setup ephemeral dhcp on fallback_nic to talk to the @@ -2069,13 +2127,16 @@ def get_metadata_from_imds(fallback_nic, @param fallback_nic: String. The name of the nic which requires active network in order to query IMDS. @param retries: The number of retries of the IMDS_URL. + @param md_type: Metadata type for IMDS request. + @param api_version: IMDS api-version to use in the request. @return: A dict of instance metadata containing compute and network info. """ kwargs = {'logfunc': LOG.debug, 'msg': 'Crawl of Azure Instance Metadata Service (IMDS)', - 'func': _get_metadata_from_imds, 'args': (retries, md_type,)} + 'func': _get_metadata_from_imds, + 'args': (retries, md_type, api_version,)} if net.is_up(fallback_nic): return util.log_time(**kwargs) else: @@ -2091,20 +2152,26 @@ def get_metadata_from_imds(fallback_nic, @azure_ds_telemetry_reporter -def _get_metadata_from_imds(retries, md_type=metadata_type.compute): - - url = md_type.value +def _get_metadata_from_imds( + retries, + md_type=metadata_type.compute, + api_version=IMDS_VER_MIN): + url = "{}?api-version={}".format(md_type.value, api_version) headers = {"Metadata": "true"} try: response = readurl( url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers, retries=retries, exception_cb=retry_on_url_exc) except Exception as e: - report_diagnostic_event( - 'Ignoring IMDS instance metadata. ' - 'Get metadata from IMDS failed: %s' % e, - logger_func=LOG.warning) - return {} + # pylint:disable=no-member + if isinstance(e, UrlError) and e.code == 400: + raise + else: + report_diagnostic_event( + 'Ignoring IMDS instance metadata. ' + 'Get metadata from IMDS failed: %s' % e, + logger_func=LOG.warning) + return {} try: from json.decoder import JSONDecodeError json_decode_error = JSONDecodeError diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index f597c723..dedebeb1 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -408,7 +408,9 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): def setUp(self): super(TestGetMetadataFromIMDS, self).setUp() - self.network_md_url = dsaz.IMDS_URL + "instance?api-version=2019-06-01" + self.network_md_url = "{}/instance?api-version=2019-06-01".format( + dsaz.IMDS_URL + ) @mock.patch(MOCKPATH + 'readurl') @mock.patch(MOCKPATH + 'EphemeralDHCPv4', autospec=True) @@ -518,7 +520,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): """Return empty dict when IMDS network metadata is absent.""" httpretty.register_uri( httpretty.GET, - dsaz.IMDS_URL + 'instance?api-version=2017-12-01', + dsaz.IMDS_URL + '/instance?api-version=2017-12-01', body={}, status=404) m_net_is_up.return_value = True # skips dhcp @@ -1877,6 +1879,40 @@ scbus-1 on xpt0 bus 0 ssh_keys = dsrc.get_public_ssh_keys() self.assertEqual(ssh_keys, ['key2']) + @mock.patch(MOCKPATH + 'get_metadata_from_imds') + def test_imds_api_version_wanted_nonexistent( + self, + m_get_metadata_from_imds): + def get_metadata_from_imds_side_eff(*args, **kwargs): + if kwargs['api_version'] == dsaz.IMDS_VER_WANT: + raise url_helper.UrlError("No IMDS version", code=400) + return NETWORK_METADATA + m_get_metadata_from_imds.side_effect = get_metadata_from_imds_side_eff + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertIsNotNone(dsrc.metadata) + self.assertTrue(dsrc.failed_desired_api_version) + + @mock.patch( + MOCKPATH + 'get_metadata_from_imds', return_value=NETWORK_METADATA) + def test_imds_api_version_wanted_exists(self, m_get_metadata_from_imds): + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertIsNotNone(dsrc.metadata) + self.assertFalse(dsrc.failed_desired_api_version) + class TestAzureBounce(CiTestCase): @@ -2657,7 +2693,7 @@ class TestPreprovisioningHotAttachNics(CiTestCase): @mock.patch(MOCKPATH + 'DataSourceAzure.wait_for_link_up') @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_attach_event') @mock.patch('cloudinit.sources.net.find_fallback_nic') - @mock.patch(MOCKPATH + 'get_metadata_from_imds') + @mock.patch(MOCKPATH + 'DataSourceAzure.get_imds_data_with_api_fallback') @mock.patch(MOCKPATH + 'EphemeralDHCPv4') @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') @mock.patch('os.path.isfile') -- cgit v1.2.3 From 121bc04cdf0e6732fe143b7419131dc250c13384 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Mon, 8 Mar 2021 12:50:57 -0500 Subject: net: exclude OVS internal interfaces in get_interfaces (#829) `get_interfaces` is used to in two ways, broadly: firstly, to determine the available interfaces when converting cloud network configuration formats to cloud-init's network configuration formats; and, secondly, to ensure that any interfaces which are specified in network configuration are (a) available, and (b) named correctly. The first of these is unaffected by this commit, as no clouds support Open vSwitch configuration in their network configuration formats. For the second, we check that MAC addresses of physical devices are unique. In some OVS configurations, there are OVS-created devices which have duplicate MAC addresses, either with each other or with physical devices. As these interfaces are created by OVS, we can be confident that (a) they will be available when appropriate, and (b) that OVS will name them correctly. As such, this commit excludes any OVS-internal interfaces from the set of interfaces returned by `get_interfaces`. LP: #1912844 --- cloudinit/net/__init__.py | 62 +++++++++++ cloudinit/net/tests/test_init.py | 119 +++++++++++++++++++++ cloudinit/sources/helpers/tests/test_openstack.py | 5 + cloudinit/sources/tests/test_oracle.py | 4 + tests/integration_tests/bugs/test_lp1912844.py | 103 ++++++++++++++++++ .../unittests/test_datasource/test_configdrive.py | 8 ++ tests/unittests/test_net.py | 20 ++++ 7 files changed, 321 insertions(+) create mode 100644 tests/integration_tests/bugs/test_lp1912844.py (limited to 'cloudinit') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index de65e7af..385b7bcc 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -6,6 +6,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import errno +import functools import ipaddress import logging import os @@ -19,6 +20,19 @@ from cloudinit.url_helper import UrlError, readurl LOG = logging.getLogger(__name__) SYS_CLASS_NET = "/sys/class/net/" DEFAULT_PRIMARY_INTERFACE = 'eth0' +OVS_INTERNAL_INTERFACE_LOOKUP_CMD = [ + "ovs-vsctl", + "--format", + "csv", + "--no-headings", + "--timeout", + "10", + "--columns", + "name", + "find", + "interface", + "type=internal", +] def natural_sort_key(s, _nsre=re.compile('([0-9]+)')): @@ -133,6 +147,52 @@ def master_is_openvswitch(devname): return os.path.exists(ovs_path) +@functools.lru_cache(maxsize=None) +def openvswitch_is_installed() -> bool: + """Return a bool indicating if Open vSwitch is installed in the system.""" + ret = bool(subp.which("ovs-vsctl")) + if not ret: + LOG.debug( + "ovs-vsctl not in PATH; not detecting Open vSwitch interfaces" + ) + return ret + + +@functools.lru_cache(maxsize=None) +def get_ovs_internal_interfaces() -> list: + """Return a list of the names of OVS internal interfaces on the system. + + These will all be strings, and are used to exclude OVS-specific interface + from cloud-init's network configuration handling. + """ + try: + out, _err = subp.subp(OVS_INTERNAL_INTERFACE_LOOKUP_CMD) + except subp.ProcessExecutionError as exc: + if "database connection failed" in exc.stderr: + LOG.info( + "Open vSwitch is not yet up; no interfaces will be detected as" + " OVS-internal" + ) + return [] + raise + else: + return out.splitlines() + + +def is_openvswitch_internal_interface(devname: str) -> bool: + """Returns True if this is an OVS internal interface. + + If OVS is not installed or not yet running, this will return False. + """ + if not openvswitch_is_installed(): + return False + ovs_bridges = get_ovs_internal_interfaces() + if devname in ovs_bridges: + LOG.debug("Detected %s as an OVS interface", devname) + return True + return False + + def is_netfailover(devname, driver=None): """ netfailover driver uses 3 nics, master, primary and standby. this returns True if the device is either the primary or standby @@ -884,6 +944,8 @@ def get_interfaces(blacklist_drivers=None) -> list: # skip nics that have no mac (00:00....) if name != 'lo' and mac == zero_mac[:len(mac)]: continue + if is_openvswitch_internal_interface(name): + continue # skip nics that have drivers blacklisted driver = device_driver(name) if driver in blacklist_drivers: diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py index 0535387a..946f8ee2 100644 --- a/cloudinit/net/tests/test_init.py +++ b/cloudinit/net/tests/test_init.py @@ -391,6 +391,10 @@ class TestGetDeviceList(CiTestCase): self.assertCountEqual(['eth0', 'eth1'], net.get_devicelist()) +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False), +) class TestGetInterfaceMAC(CiTestCase): def setUp(self): @@ -1224,6 +1228,121 @@ class TestNetFailOver(CiTestCase): self.assertFalse(net.is_netfailover(devname, driver)) +class TestOpenvswitchIsInstalled: + """Test cloudinit.net.openvswitch_is_installed. + + Uses the ``clear_lru_cache`` local autouse fixture to allow us to test + despite the ``lru_cache`` decorator on the unit under test. + """ + + @pytest.fixture(autouse=True) + def clear_lru_cache(self): + net.openvswitch_is_installed.cache_clear() + + @pytest.mark.parametrize( + "expected,which_return", [(True, "/some/path"), (False, None)] + ) + @mock.patch("cloudinit.net.subp.which") + def test_mirrors_which_result(self, m_which, expected, which_return): + m_which.return_value = which_return + assert expected == net.openvswitch_is_installed() + + @mock.patch("cloudinit.net.subp.which") + def test_only_calls_which_once(self, m_which): + net.openvswitch_is_installed() + net.openvswitch_is_installed() + assert 1 == m_which.call_count + + +@mock.patch("cloudinit.net.subp.subp", return_value=("", "")) +class TestGetOVSInternalInterfaces: + """Test cloudinit.net.get_ovs_internal_interfaces. + + Uses the ``clear_lru_cache`` local autouse fixture to allow us to test + despite the ``lru_cache`` decorator on the unit under test. + """ + @pytest.fixture(autouse=True) + def clear_lru_cache(self): + net.get_ovs_internal_interfaces.cache_clear() + + def test_command_used(self, m_subp): + """Test we use the correct command when we call subp""" + net.get_ovs_internal_interfaces() + + assert [ + mock.call(net.OVS_INTERNAL_INTERFACE_LOOKUP_CMD) + ] == m_subp.call_args_list + + def test_subp_contents_split_and_returned(self, m_subp): + """Test that the command output is appropriately mangled.""" + stdout = "iface1\niface2\niface3\n" + m_subp.return_value = (stdout, "") + + assert [ + "iface1", + "iface2", + "iface3", + ] == net.get_ovs_internal_interfaces() + + def test_database_connection_error_handled_gracefully(self, m_subp): + """Test that the error indicating OVS is down is handled gracefully.""" + m_subp.side_effect = ProcessExecutionError( + stderr="database connection failed" + ) + + assert [] == net.get_ovs_internal_interfaces() + + def test_other_errors_raised(self, m_subp): + """Test that only database connection errors are handled.""" + m_subp.side_effect = ProcessExecutionError() + + with pytest.raises(ProcessExecutionError): + net.get_ovs_internal_interfaces() + + def test_only_runs_once(self, m_subp): + """Test that we cache the value.""" + net.get_ovs_internal_interfaces() + net.get_ovs_internal_interfaces() + + assert 1 == m_subp.call_count + + +@mock.patch("cloudinit.net.get_ovs_internal_interfaces") +@mock.patch("cloudinit.net.openvswitch_is_installed") +class TestIsOpenVSwitchInternalInterface: + def test_false_if_ovs_not_installed( + self, m_openvswitch_is_installed, _m_get_ovs_internal_interfaces + ): + """Test that OVS' absence returns False.""" + m_openvswitch_is_installed.return_value = False + + assert not net.is_openvswitch_internal_interface("devname") + + @pytest.mark.parametrize( + "detected_interfaces,devname,expected_return", + [ + ([], "devname", False), + (["notdevname"], "devname", False), + (["devname"], "devname", True), + (["some", "other", "devices", "and", "ours"], "ours", True), + ], + ) + def test_return_value_based_on_detected_interfaces( + self, + m_openvswitch_is_installed, + m_get_ovs_internal_interfaces, + detected_interfaces, + devname, + expected_return, + ): + """Test that the detected interfaces are used correctly.""" + m_openvswitch_is_installed.return_value = True + m_get_ovs_internal_interfaces.return_value = detected_interfaces + assert expected_return == net.is_openvswitch_internal_interface( + devname + ) + + class TestIsIpAddress: """Tests for net.is_ip_address. diff --git a/cloudinit/sources/helpers/tests/test_openstack.py b/cloudinit/sources/helpers/tests/test_openstack.py index 2bde1e3f..95fb9743 100644 --- a/cloudinit/sources/helpers/tests/test_openstack.py +++ b/cloudinit/sources/helpers/tests/test_openstack.py @@ -1,10 +1,15 @@ # This file is part of cloud-init. See LICENSE file for license information. # ./cloudinit/sources/helpers/tests/test_openstack.py +from unittest import mock from cloudinit.sources.helpers import openstack from cloudinit.tests import helpers as test_helpers +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False) +) class TestConvertNetJson(test_helpers.CiTestCase): def test_phy_types(self): diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py index a7bbdfd9..dcf33b9b 100644 --- a/cloudinit/sources/tests/test_oracle.py +++ b/cloudinit/sources/tests/test_oracle.py @@ -173,6 +173,10 @@ class TestIsPlatformViable(test_helpers.CiTestCase): m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')]) +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False) +) class TestNetworkConfigFromOpcImds: def test_no_secondary_nics_does_not_mutate_input(self, oracle_ds): oracle_ds._vnics_data = [{}] diff --git a/tests/integration_tests/bugs/test_lp1912844.py b/tests/integration_tests/bugs/test_lp1912844.py new file mode 100644 index 00000000..efafae50 --- /dev/null +++ b/tests/integration_tests/bugs/test_lp1912844.py @@ -0,0 +1,103 @@ +"""Integration test for LP: #1912844 + +cloud-init should ignore OVS-internal interfaces when performing its own +interface determination: these interfaces are handled fully by OVS, so +cloud-init should never need to touch them. + +This test is a semi-synthetic reproducer for the bug. It uses a similar +network configuration, tweaked slightly to DHCP in a way that will succeed even +on "failed" boots. The exact bug doesn't reproduce with the NoCloud +datasource, because it runs at init-local time (whereas the MAAS datasource, +from the report, runs only at init (network) time): this means that the +networking code runs before OVS creates its interfaces (which happens after +init-local but, of course, before networking is up), and so doesn't generate +the traceback that they cause. We work around this by calling +``get_interfaces_by_mac` directly in the test code. +""" +import pytest + +from tests.integration_tests import random_mac_address + +MAC_ADDRESS = random_mac_address() + +NETWORK_CONFIG = """\ +bonds: + bond0: + interfaces: + - enp5s0 + macaddress: {0} + mtu: 1500 +bridges: + ovs-br: + interfaces: + - bond0 + macaddress: {0} + mtu: 1500 + openvswitch: {{}} + dhcp4: true +ethernets: + enp5s0: + mtu: 1500 + set-name: enp5s0 + match: + macaddress: {0} +version: 2 +vlans: + ovs-br.100: + id: 100 + link: ovs-br + mtu: 1500 + ovs-br.200: + id: 200 + link: ovs-br + mtu: 1500 +""".format(MAC_ADDRESS) + + +SETUP_USER_DATA = """\ +#cloud-config +packages: +- openvswitch-switch +""" + + +@pytest.fixture +def ovs_enabled_session_cloud(session_cloud): + """A session_cloud wrapper, to use an OVS-enabled image for tests. + + This implementation is complicated by wanting to use ``session_cloud``s + snapshot cleanup/retention logic, to avoid having to reimplement that here. + """ + old_snapshot_id = session_cloud.snapshot_id + with session_cloud.launch( + user_data=SETUP_USER_DATA, + ) as instance: + instance.instance.clean() + session_cloud.snapshot_id = instance.snapshot() + + yield session_cloud + + try: + session_cloud.delete_snapshot() + finally: + session_cloud.snapshot_id = old_snapshot_id + + +@pytest.mark.lxd_vm +def test_get_interfaces_by_mac_doesnt_traceback(ovs_enabled_session_cloud): + """Launch our OVS-enabled image and confirm the bug doesn't reproduce.""" + launch_kwargs = { + "config_dict": { + "user.network-config": NETWORK_CONFIG, + "volatile.eth0.hwaddr": MAC_ADDRESS, + }, + } + with ovs_enabled_session_cloud.launch( + launch_kwargs=launch_kwargs, + ) as client: + result = client.execute( + "python3 -c" + "'from cloudinit.net import get_interfaces_by_mac;" + "get_interfaces_by_mac()'" + ) + assert result.ok diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 6f830cc6..2e2b7847 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -494,6 +494,10 @@ class TestConfigDriveDataSource(CiTestCase): self.assertEqual('config-disk (/dev/anything)', cfg_ds.subplatform) +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False) +) class TestNetJson(CiTestCase): def setUp(self): super(TestNetJson, self).setUp() @@ -654,6 +658,10 @@ class TestNetJson(CiTestCase): self.assertEqual(out_data, conv_data) +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False) +) class TestConvertNetworkData(CiTestCase): with_logs = True diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 38d934d4..cb636f41 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -2933,6 +2933,10 @@ iface eth1 inet dhcp self.assertEqual(0, mock_settle.call_count) +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False) +) class TestRhelSysConfigRendering(CiTestCase): with_logs = True @@ -3620,6 +3624,10 @@ USERCTL=no expected, self._render_and_read(network_config=v2data)) +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False) +) class TestOpenSuseSysConfigRendering(CiTestCase): with_logs = True @@ -5037,6 +5045,10 @@ class TestNetRenderers(CiTestCase): self.assertTrue(result) +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False) +) class TestGetInterfaces(CiTestCase): _data = {'bonds': ['bond1'], 'bridges': ['bridge1'], @@ -5186,6 +5198,10 @@ class TestInterfaceHasOwnMac(CiTestCase): self.assertFalse(interface_has_own_mac("eth0")) +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False) +) class TestGetInterfacesByMac(CiTestCase): _data = {'bonds': ['bond1'], 'bridges': ['bridge1'], @@ -5342,6 +5358,10 @@ class TestInterfacesSorting(CiTestCase): ['enp0s3', 'enp0s8', 'enp0s13', 'enp1s2', 'enp2s0', 'enp2s3']) +@mock.patch( + "cloudinit.net.is_openvswitch_internal_interface", + mock.Mock(return_value=False) +) class TestGetIBHwaddrsByInterface(CiTestCase): _ib_addr = '80:00:00:28:fe:80:00:00:00:00:00:00:00:11:22:03:00:33:44:56' -- cgit v1.2.3 From 9bd19645a61586b82e86db6f518dd05c3363b17f Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 8 Mar 2021 14:09:47 -0600 Subject: Fix requiring device-number on EC2 derivatives (#836) #342 (70dbccbb) introduced the ability to determine route-metrics based on the `device-number` provided by the EC2 IMDS. Not all datasources that subclass EC2 will have this attribute, so allow the old behavior if `device-number` is not present. LP: #1917875 --- cloudinit/sources/DataSourceEc2.py | 3 ++- tests/unittests/test_datasource/test_aliyun.py | 30 ++++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 1930a509..a2105dc7 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -765,13 +765,14 @@ def convert_ec2_metadata_network_config( netcfg['ethernets'][nic_name] = dev_config return netcfg # Apply network config for all nics and any secondary IPv4/v6 addresses + nic_idx = 0 for mac, nic_name in sorted(macs_to_nics.items()): nic_metadata = macs_metadata.get(mac) if not nic_metadata: continue # Not a physical nic represented in metadata # device-number is zero-indexed, we want it 1-indexed for the # multiplication on the following line - nic_idx = int(nic_metadata['device-number']) + 1 + nic_idx = int(nic_metadata.get('device-number', nic_idx)) + 1 dhcp_override = {'route-metric': nic_idx * 100} dev_config = {'dhcp4': True, 'dhcp4-overrides': dhcp_override, 'dhcp6': False, diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py index eb2828d5..cab1ac2b 100644 --- a/tests/unittests/test_datasource/test_aliyun.py +++ b/tests/unittests/test_datasource/test_aliyun.py @@ -7,6 +7,7 @@ from unittest import mock from cloudinit import helpers from cloudinit.sources import DataSourceAliYun as ay +from cloudinit.sources.DataSourceEc2 import convert_ec2_metadata_network_config from cloudinit.tests import helpers as test_helpers DEFAULT_METADATA = { @@ -183,6 +184,35 @@ class TestAliYunDatasource(test_helpers.HttprettyTestCase): self.assertEqual(ay.parse_public_keys(public_keys), public_keys['key-pair-0']['openssh-key']) + def test_route_metric_calculated_without_device_number(self): + """Test that route-metric code works without `device-number` + + `device-number` is part of EC2 metadata, but not supported on aliyun. + Attempting to access it will raise a KeyError. + + LP: #1917875 + """ + netcfg = convert_ec2_metadata_network_config( + {"interfaces": {"macs": { + "06:17:04:d7:26:09": { + "interface-id": "eni-e44ef49e", + }, + "06:17:04:d7:26:08": { + "interface-id": "eni-e44ef49f", + } + }}}, + macs_to_nics={ + '06:17:04:d7:26:09': 'eth0', + '06:17:04:d7:26:08': 'eth1', + } + ) + + met0 = netcfg['ethernets']['eth0']['dhcp4-overrides']['route-metric'] + met1 = netcfg['ethernets']['eth1']['dhcp4-overrides']['route-metric'] + + # route-metric numbers should be 100 apart + assert 100 == abs(met0 - met1) + class TestIsAliYun(test_helpers.CiTestCase): ALIYUN_PRODUCT = 'Alibaba Cloud ECS' -- cgit v1.2.3 From 3aeb14cd46613b97afefc4632909f6e9b83d0230 Mon Sep 17 00:00:00 2001 From: Kristian Klausen Date: Mon, 15 Mar 2021 20:30:03 +0100 Subject: archlinux: Fix broken locale logic (#841) The locale wasn't persisted correct nor set. LP: #1402406 --- cloudinit/distros/arch.py | 18 ++++++++++------- .../unittests/test_handler/test_handler_locale.py | 23 ++++++++++++++++++++++ 2 files changed, 34 insertions(+), 7 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py index 5f42a24c..f8385f7f 100644 --- a/cloudinit/distros/arch.py +++ b/cloudinit/distros/arch.py @@ -23,7 +23,7 @@ LOG = logging.getLogger(__name__) class Distro(distros.Distro): - locale_conf_fn = "/etc/locale.gen" + locale_gen_fn = "/etc/locale.gen" network_conf_dir = "/etc/netctl" resolve_conf_fn = "/etc/resolv.conf" init_cmd = ['systemctl'] # init scripts @@ -43,16 +43,20 @@ class Distro(distros.Distro): cfg['ssh_svcname'] = 'sshd' def apply_locale(self, locale, out_fn=None): - if not out_fn: - out_fn = self.locale_conf_fn - subp.subp(['locale-gen', '-G', locale], capture=False) - # "" provides trailing newline during join + if out_fn is not None and out_fn != "/etc/locale.conf": + LOG.warning("Invalid locale_configfile %s, only supported " + "value is /etc/locale.conf", out_fn) lines = [ util.make_header(), - 'LANG="%s"' % (locale), + # Hard-coding the charset isn't ideal, but there is no other way. + '%s UTF-8' % (locale), "", ] - util.write_file(out_fn, "\n".join(lines)) + util.write_file(self.locale_gen_fn, "\n".join(lines)) + subp.subp(['locale-gen'], capture=False) + # In the future systemd can handle locale-gen stuff: + # https://github.com/systemd/systemd/pull/9864 + subp.subp(['localectl', 'set-locale', locale], capture=False) def install_packages(self, pkglist): self.update_package_sources() diff --git a/tests/unittests/test_handler/test_handler_locale.py b/tests/unittests/test_handler/test_handler_locale.py index 47e7d804..15fe7b23 100644 --- a/tests/unittests/test_handler/test_handler_locale.py +++ b/tests/unittests/test_handler/test_handler_locale.py @@ -44,6 +44,29 @@ class TestLocale(t_help.FilesystemMockingTestCase): cc = cloud.Cloud(ds, paths, {}, d, None) return cc + def test_set_locale_arch(self): + locale = 'en_GB.UTF-8' + locale_configfile = '/etc/invalid-locale-path' + cfg = { + 'locale': locale, + 'locale_configfile': locale_configfile, + } + cc = self._get_cloud('arch') + + with mock.patch('cloudinit.distros.arch.subp.subp') as m_subp: + with mock.patch('cloudinit.distros.arch.LOG.warning') as m_LOG: + cc_locale.handle('cc_locale', cfg, cc, LOG, []) + m_LOG.assert_called_with('Invalid locale_configfile %s, ' + 'only supported value is ' + '/etc/locale.conf', + locale_configfile) + + contents = util.load_file(cc.distro.locale_gen_fn) + self.assertIn('%s UTF-8' % locale, contents) + m_subp.assert_called_with(['localectl', + 'set-locale', + locale], capture=False) + def test_set_locale_sles(self): cfg = { -- cgit v1.2.3 From f35181fa970453ba6c7c14575b12185533391b97 Mon Sep 17 00:00:00 2001 From: eb3095 <45504889+eb3095@users.noreply.github.com> Date: Tue, 16 Mar 2021 12:35:05 -0400 Subject: Fix stack trace if vendordata_raw contained an array (#837) The implementation in existing datasources means that vendordata_raw is not "raw" as it ideally would be. Instead, actual values may include bytes, string or list. If the value was a list, then the attempt to persist that data to a file in '_store_rawdata' would raise a TypeError. The change is to encode with util.json_dumps (which is safe for binary data) before writing. --- cloudinit/stages.py | 18 ++++++++++++++---- tools/.github-cla-signers | 1 + 2 files changed, 15 insertions(+), 4 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 3ef4491c..5bacc85d 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -364,12 +364,12 @@ class Init(object): 'userdata') self._store_processeddata(self.datasource.get_userdata(), 'userdata') - self._store_rawdata(self.datasource.get_vendordata_raw(), - 'vendordata') + self._store_raw_vendordata(self.datasource.get_vendordata_raw(), + 'vendordata') self._store_processeddata(self.datasource.get_vendordata(), 'vendordata') - self._store_rawdata(self.datasource.get_vendordata2_raw(), - 'vendordata2') + self._store_raw_vendordata(self.datasource.get_vendordata2_raw(), + 'vendordata2') self._store_processeddata(self.datasource.get_vendordata2(), 'vendordata2') @@ -397,6 +397,16 @@ class Init(object): data = b'' util.write_file(self._get_ipath('%s_raw' % datasource), data, 0o600) + def _store_raw_vendordata(self, data, datasource): + # Only these data types + if data is not None and type(data) not in [bytes, str, list]: + raise TypeError("vendordata_raw is unsupported type '%s'" % + str(type(data))) + # This data may be a list, convert it to a string if so + if isinstance(data, list): + data = util.json_dumps(data) + self._store_rawdata(data, datasource) + def _store_processeddata(self, processed_data, datasource): # processed is a Mime message, so write as string. if processed_data is None: diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index aca0ee5e..5c57acac 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -14,6 +14,7 @@ dankenigsberg dermotbradley dhensby eandersson +eb3095 emmanuelthome izzyleung johnsonshi -- cgit v1.2.3 From b794d426b9ab43ea9d6371477466070d86e10668 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 19 Mar 2021 10:06:42 -0400 Subject: write passwords only to serial console, lock down cloud-init-output.log (#847) Prior to this commit, when a user specified configuration which would generate random passwords for users, cloud-init would cause those passwords to be written to the serial console by emitting them on stderr. In the default configuration, any stdout or stderr emitted by cloud-init is also written to `/var/log/cloud-init-output.log`. This file is world-readable, meaning that those randomly-generated passwords were available to be read by any user with access to the system. This presents an obvious security issue. This commit responds to this issue in two ways: * We address the direct issue by moving from writing the passwords to sys.stderr to writing them directly to /dev/console (via util.multi_log); this means that the passwords will never end up in cloud-init-output.log * To avoid future issues like this, we also modify the logging code so that any files created in a log sink subprocess will only be owner/group readable and, if it exists, will be owned by the adm group. This results in `/var/log/cloud-init-output.log` no longer being world-readable, meaning that if there are other parts of the codebase that are emitting sensitive data intended for the serial console, that data is no longer available to all users of the system. LP: #1918303 --- cloudinit/config/cc_set_passwords.py | 5 +- cloudinit/config/tests/test_set_passwords.py | 40 ++++++++++++---- cloudinit/tests/test_util.py | 56 ++++++++++++++++++++++ cloudinit/util.py | 38 +++++++++++++-- .../integration_tests/modules/test_set_password.py | 24 ++++++++++ tests/integration_tests/test_logging.py | 22 +++++++++ tests/unittests/test_util.py | 4 ++ 7 files changed, 173 insertions(+), 16 deletions(-) create mode 100644 tests/integration_tests/test_logging.py (limited to 'cloudinit') diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index d6b5682d..433de751 100755 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -78,7 +78,6 @@ password. """ import re -import sys from cloudinit.distros import ug_util from cloudinit import log as logging @@ -214,7 +213,9 @@ def handle(_name, cfg, cloud, log, args): if len(randlist): blurb = ("Set the following 'random' passwords\n", '\n'.join(randlist)) - sys.stderr.write("%s\n%s\n" % blurb) + util.multi_log( + "%s\n%s\n" % blurb, stderr=False, fallback_to_stdout=False + ) if expire: expired_users = [] diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py index daa1ef51..bbe2ee8f 100644 --- a/cloudinit/config/tests/test_set_passwords.py +++ b/cloudinit/config/tests/test_set_passwords.py @@ -74,10 +74,6 @@ class TestSetPasswordsHandle(CiTestCase): with_logs = True - def setUp(self): - super(TestSetPasswordsHandle, self).setUp() - self.add_patch('cloudinit.config.cc_set_passwords.sys.stderr', 'm_err') - def test_handle_on_empty_config(self, *args): """handle logs that no password has changed when config is empty.""" cloud = self.tmp_cloud(distro='ubuntu') @@ -129,10 +125,12 @@ class TestSetPasswordsHandle(CiTestCase): mock.call(['pw', 'usermod', 'ubuntu', '-p', '01-Jan-1970'])], m_subp.call_args_list) + @mock.patch(MODPATH + "util.multi_log") @mock.patch(MODPATH + "util.is_BSD") @mock.patch(MODPATH + "subp.subp") - def test_handle_on_chpasswd_list_creates_random_passwords(self, m_subp, - m_is_bsd): + def test_handle_on_chpasswd_list_creates_random_passwords( + self, m_subp, m_is_bsd, m_multi_log + ): """handle parses command set random passwords.""" m_is_bsd.return_value = False cloud = self.tmp_cloud(distro='ubuntu') @@ -146,10 +144,32 @@ class TestSetPasswordsHandle(CiTestCase): self.assertIn( 'DEBUG: Handling input for chpasswd as list.', self.logs.getvalue()) - self.assertNotEqual( - [mock.call(['chpasswd'], - '\n'.join(valid_random_pwds) + '\n')], - m_subp.call_args_list) + + self.assertEqual(1, m_subp.call_count) + args, _kwargs = m_subp.call_args + self.assertEqual(["chpasswd"], args[0]) + + stdin = args[1] + user_pass = { + user: password + for user, password + in (line.split(":") for line in stdin.splitlines()) + } + + self.assertEqual(1, m_multi_log.call_count) + self.assertEqual( + mock.call(mock.ANY, stderr=False, fallback_to_stdout=False), + m_multi_log.call_args + ) + + self.assertEqual(set(["root", "ubuntu"]), set(user_pass.keys())) + written_lines = m_multi_log.call_args[0][0].splitlines() + for password in user_pass.values(): + for line in written_lines: + if password in line: + break + else: + self.fail("Password not emitted to console") # vi: ts=4 expandtab diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index b7a302f1..e811917e 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -851,4 +851,60 @@ class TestEnsureFile: assert "ab" == kwargs["omode"] +@mock.patch("cloudinit.util.grp.getgrnam") +@mock.patch("cloudinit.util.os.setgid") +@mock.patch("cloudinit.util.os.umask") +class TestRedirectOutputPreexecFn: + """This tests specifically the preexec_fn used in redirect_output.""" + + @pytest.fixture(params=["outfmt", "errfmt"]) + def preexec_fn(self, request): + """A fixture to gather the preexec_fn used by redirect_output. + + This enables simpler direct testing of it, and parameterises any tests + using it to cover both the stdout and stderr code paths. + """ + test_string = "| piped output to invoke subprocess" + if request.param == "outfmt": + args = (test_string, None) + elif request.param == "errfmt": + args = (None, test_string) + with mock.patch("cloudinit.util.subprocess.Popen") as m_popen: + util.redirect_output(*args) + + assert 1 == m_popen.call_count + _args, kwargs = m_popen.call_args + assert "preexec_fn" in kwargs, "preexec_fn not passed to Popen" + return kwargs["preexec_fn"] + + def test_preexec_fn_sets_umask( + self, m_os_umask, _m_setgid, _m_getgrnam, preexec_fn + ): + """preexec_fn should set a mask that avoids world-readable files.""" + preexec_fn() + + assert [mock.call(0o037)] == m_os_umask.call_args_list + + def test_preexec_fn_sets_group_id_if_adm_group_present( + self, _m_os_umask, m_setgid, m_getgrnam, preexec_fn + ): + """We should setgrp to adm if present, so files are owned by them.""" + fake_group = mock.Mock(gr_gid=mock.sentinel.gr_gid) + m_getgrnam.return_value = fake_group + + preexec_fn() + + assert [mock.call("adm")] == m_getgrnam.call_args_list + assert [mock.call(mock.sentinel.gr_gid)] == m_setgid.call_args_list + + def test_preexec_fn_handles_absent_adm_group_gracefully( + self, _m_os_umask, m_setgid, m_getgrnam, preexec_fn + ): + """We should handle an absent adm group gracefully.""" + m_getgrnam.side_effect = KeyError("getgrnam(): name not found: 'adm'") + + preexec_fn() + + assert 0 == m_setgid.call_count + # vi: ts=4 expandtab diff --git a/cloudinit/util.py b/cloudinit/util.py index 769f3425..4e0a72db 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -359,7 +359,7 @@ def find_modules(root_dir): def multi_log(text, console=True, stderr=True, - log=None, log_level=logging.DEBUG): + log=None, log_level=logging.DEBUG, fallback_to_stdout=True): if stderr: sys.stderr.write(text) if console: @@ -368,7 +368,7 @@ def multi_log(text, console=True, stderr=True, with open(conpath, 'w') as wfh: wfh.write(text) wfh.flush() - else: + elif fallback_to_stdout: # A container may lack /dev/console (arguably a container bug). If # it does not exist, then write output to stdout. this will result # in duplicate stderr and stdout messages if stderr was True. @@ -623,6 +623,26 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None): if not o_err: o_err = sys.stderr + # pylint: disable=subprocess-popen-preexec-fn + def set_subprocess_umask_and_gid(): + """Reconfigure umask and group ID to create output files securely. + + This is passed to subprocess.Popen as preexec_fn, so it is executed in + the context of the newly-created process. It: + + * sets the umask of the process so created files aren't world-readable + * if an adm group exists in the system, sets that as the process' GID + (so that the created file(s) are owned by root:adm) + """ + os.umask(0o037) + try: + group_id = grp.getgrnam("adm").gr_gid + except KeyError: + # No adm group, don't set a group + pass + else: + os.setgid(group_id) + if outfmt: LOG.debug("Redirecting %s to %s", o_out, outfmt) (mode, arg) = outfmt.split(" ", 1) @@ -632,7 +652,12 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None): owith = "wb" new_fp = open(arg, owith) elif mode == "|": - proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE) + proc = subprocess.Popen( + arg, + shell=True, + stdin=subprocess.PIPE, + preexec_fn=set_subprocess_umask_and_gid, + ) new_fp = proc.stdin else: raise TypeError("Invalid type for output format: %s" % outfmt) @@ -654,7 +679,12 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None): owith = "wb" new_fp = open(arg, owith) elif mode == "|": - proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE) + proc = subprocess.Popen( + arg, + shell=True, + stdin=subprocess.PIPE, + preexec_fn=set_subprocess_umask_and_gid, + ) new_fp = proc.stdin else: raise TypeError("Invalid type for error format: %s" % errfmt) diff --git a/tests/integration_tests/modules/test_set_password.py b/tests/integration_tests/modules/test_set_password.py index b13f76fb..d7cf91a5 100644 --- a/tests/integration_tests/modules/test_set_password.py +++ b/tests/integration_tests/modules/test_set_password.py @@ -116,6 +116,30 @@ class Mixin: # Which are not the same assert shadow_users["harry"] != shadow_users["dick"] + def test_random_passwords_not_stored_in_cloud_init_output_log( + self, class_client + ): + """We should not emit passwords to the in-instance log file. + + LP: #1918303 + """ + cloud_init_output = class_client.read_from_file( + "/var/log/cloud-init-output.log" + ) + assert "dick:" not in cloud_init_output + assert "harry:" not in cloud_init_output + + def test_random_passwords_emitted_to_serial_console(self, class_client): + """We should emit passwords to the serial console. (LP: #1918303)""" + try: + console_log = class_client.instance.console_log() + except NotImplementedError: + # Assume that an exception here means that we can't use the console + # log + pytest.skip("NotImplementedError when requesting console log") + assert "dick:" in console_log + assert "harry:" in console_log + def test_explicit_password_set_correctly(self, class_client): """Test that an explicitly-specified password is set correctly.""" shadow_users, _ = self._fetch_and_parse_etc_shadow(class_client) diff --git a/tests/integration_tests/test_logging.py b/tests/integration_tests/test_logging.py new file mode 100644 index 00000000..b31a0434 --- /dev/null +++ b/tests/integration_tests/test_logging.py @@ -0,0 +1,22 @@ +"""Integration tests relating to cloud-init's logging.""" + + +class TestVarLogCloudInitOutput: + """Integration tests relating to /var/log/cloud-init-output.log.""" + + def test_var_log_cloud_init_output_not_world_readable(self, client): + """ + The log can contain sensitive data, it shouldn't be world-readable. + + LP: #1918303 + """ + # Check the file exists + assert client.execute("test -f /var/log/cloud-init-output.log").ok + + # Check its permissions are as we expect + perms, user, group = client.execute( + "stat -c %a:%U:%G /var/log/cloud-init-output.log" + ).split(":") + assert "640" == perms + assert "root" == user + assert "adm" == group diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 857629f1..e5292001 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -572,6 +572,10 @@ class TestMultiLog(helpers.FilesystemMockingTestCase): util.multi_log(logged_string) self.assertEqual(logged_string, self.stdout.getvalue()) + def test_logs_dont_go_to_stdout_if_fallback_to_stdout_is_false(self): + util.multi_log('something', fallback_to_stdout=False) + self.assertEqual('', self.stdout.getvalue()) + def test_logs_go_to_log_if_given(self): log = mock.MagicMock() logged_string = 'something very important' -- cgit v1.2.3 From 6ae1145f5e980a47ac2b1ff3afa228a5da3f6e70 Mon Sep 17 00:00:00 2001 From: Johnson Shi Date: Thu, 25 Mar 2021 07:20:10 -0700 Subject: Azure helper: Ensure Azure http handler sleeps between retries (#842) Ensure that the Azure helper's http handler sleeps a fixed duration between retry failure attempts. The http handler will sleep a fixed duration between failed attempts regardless of whether the attempt failed due to (1) request timing out or (2) instant failure (no timeout). Due to certain platform issues, the http request to the Azure endpoint may instantly fail without reaching the http timeout duration. Without sleeping a fixed duration in between retry attempts, the http handler will loop through the max retry attempts quickly. This causes the communication between cloud-init and the Azure platform to be less resilient due to the short total duration if there is no sleep in between retries. --- cloudinit/sources/helpers/azure.py | 2 ++ tests/unittests/test_datasource/test_azure_helper.py | 11 +++++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index d3055d08..03e7156b 100755 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -303,6 +303,7 @@ def http_with_retries(url, **kwargs) -> str: max_readurl_attempts = 240 default_readurl_timeout = 5 + sleep_duration_between_retries = 5 periodic_logging_attempts = 12 if 'timeout' not in kwargs: @@ -338,6 +339,7 @@ def http_with_retries(url, **kwargs) -> str: 'attempt %d with exception: %s' % (url, attempt, e), logger_func=LOG.debug) + time.sleep(sleep_duration_between_retries) raise exc diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index b8899807..63482c6c 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -384,6 +384,7 @@ class TestAzureHelperHttpWithRetries(CiTestCase): max_readurl_attempts = 240 default_readurl_timeout = 5 + sleep_duration_between_retries = 5 periodic_logging_attempts = 12 def setUp(self): @@ -394,8 +395,8 @@ class TestAzureHelperHttpWithRetries(CiTestCase): self.m_readurl = patches.enter_context( mock.patch.object( azure_helper.url_helper, 'readurl', mock.MagicMock())) - patches.enter_context( - mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock())) + self.m_sleep = patches.enter_context( + mock.patch.object(azure_helper.time, 'sleep', autospec=True)) def test_http_with_retries(self): self.m_readurl.return_value = 'TestResp' @@ -438,6 +439,12 @@ class TestAzureHelperHttpWithRetries(CiTestCase): self.m_readurl.call_count, self.periodic_logging_attempts + 1) + # Ensure that cloud-init did sleep between each failed request + self.assertEqual( + self.m_sleep.call_count, + self.periodic_logging_attempts) + self.m_sleep.assert_called_with(self.sleep_duration_between_retries) + def test_http_with_retries_long_delay_logs_periodic_failure_msg(self): self.m_readurl.side_effect = \ [SentinelException] * self.periodic_logging_attempts + \ -- cgit v1.2.3 From 3b7e2e82310d417c0d59b268a6f47bc8f7996cab Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 29 Mar 2021 10:24:58 -0500 Subject: Fix mis-detecting network configuration in initramfs cmdline (#844) klibc initramfs in debian allows the 'iscsi_target_ip=' cmdline parameter to specify an iscsi device attachment. This can cause cloud-init to mis-detect the cmdline paramter as a networking config. LP: #1919188 --- cloudinit/net/cmdline.py | 6 ++++-- tests/unittests/test_net.py | 50 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+), 2 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py index cc8dc17b..7cdd428d 100755 --- a/cloudinit/net/cmdline.py +++ b/cloudinit/net/cmdline.py @@ -12,6 +12,7 @@ import gzip import io import logging import os +import shlex from cloudinit import util @@ -72,8 +73,9 @@ class KlibcNetworkConfigSource(InitramfsNetworkConfigSource): (ii) an open-iscsi interface file is present in the system """ if self._files: - if 'ip=' in self._cmdline or 'ip6=' in self._cmdline: - return True + for item in shlex.split(self._cmdline): + if item.startswith('ip=') or item.startswith('ip6='): + return True if os.path.exists(_OPEN_ISCSI_INTERFACE_FILE): # iBft can configure networking without ip= return True diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index cb636f41..2bd50e72 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -4387,6 +4387,56 @@ class TestCmdlineKlibcNetworkConfigSource(FilesystemMockingTestCase): ) self.assertFalse(src.is_applicable()) + def test_with_faux_ip(self): + content = {'net6-eno1.conf': DHCP6_CONTENT_1} + files = sorted(populate_dir(self.tmp_dir(), content)) + src = cmdline.KlibcNetworkConfigSource( + _files=files, + _cmdline='foo iscsi_target_ip=root=/dev/sda', + _mac_addrs=self.macs, + ) + self.assertFalse(src.is_applicable()) + + def test_empty_cmdline(self): + content = {'net6-eno1.conf': DHCP6_CONTENT_1} + files = sorted(populate_dir(self.tmp_dir(), content)) + src = cmdline.KlibcNetworkConfigSource( + _files=files, + _cmdline='', + _mac_addrs=self.macs, + ) + self.assertFalse(src.is_applicable()) + + def test_whitespace_cmdline(self): + content = {'net6-eno1.conf': DHCP6_CONTENT_1} + files = sorted(populate_dir(self.tmp_dir(), content)) + src = cmdline.KlibcNetworkConfigSource( + _files=files, + _cmdline=' ', + _mac_addrs=self.macs, + ) + self.assertFalse(src.is_applicable()) + + def test_cmdline_no_lhand(self): + content = {'net6-eno1.conf': DHCP6_CONTENT_1} + files = sorted(populate_dir(self.tmp_dir(), content)) + src = cmdline.KlibcNetworkConfigSource( + _files=files, + _cmdline='=wut', + _mac_addrs=self.macs, + ) + self.assertFalse(src.is_applicable()) + + def test_cmdline_embedded_ip(self): + content = {'net6-eno1.conf': DHCP6_CONTENT_1} + files = sorted(populate_dir(self.tmp_dir(), content)) + src = cmdline.KlibcNetworkConfigSource( + _files=files, + _cmdline='opt="some things and ip=foo"', + _mac_addrs=self.macs, + ) + self.assertFalse(src.is_applicable()) + def test_with_both_ip_ip6(self): content = { '/run/net-eth0.conf': DHCP_CONTENT_1, -- cgit v1.2.3 From 74fa008bfcd3263eb691cc0b3f7a055b17569f8b Mon Sep 17 00:00:00 2001 From: Eduardo Otubo Date: Tue, 30 Mar 2021 18:08:25 +0200 Subject: Add support to resize rootfs if using LVM (#721) This patch adds support to resize a single partition of a VM if it's using an LVM underneath. The patch detects if it's LVM if the given block device is a device mapper by its name (e.g. `/dev/dm-1`) and if it has slave devices under it on sysfs. After that syspath is updated to the real block device and growpart will be called to resize it (and automatically its Physical Volume). The Volume Group will be updated automatically and a final call to extend the rootfs to the remaining space available will be made. Using the same growpart configuration, the user can specify only one device to be resized when using LVM and growpart, otherwise cloud-init won't know which one should be resized and will fail. rhbz: #1810878 LP: #1799953 Signed-off-by: Eduardo Otubo Signed-off-by: Scott Moser --- cloudinit/config/cc_growpart.py | 83 +++++++++++++++++++++- doc/examples/cloud-config-growpart.txt | 2 + .../test_handler/test_handler_growpart.py | 56 ++++++++++++++- 3 files changed, 137 insertions(+), 4 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 9f338ad1..6399bfb7 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -68,7 +68,9 @@ import os import os.path import re import stat +import platform +from functools import lru_cache from cloudinit import log as logging from cloudinit.settings import PER_ALWAYS from cloudinit import subp @@ -93,6 +95,58 @@ class RESIZE(object): LOG = logging.getLogger(__name__) +@lru_cache() +def is_lvm_lv(devpath): + if util.is_Linux(): + # all lvm lvs will have a realpath as a 'dm-*' name. + rpath = os.path.realpath(devpath) + if not os.path.basename(rpath).startswith("dm-"): + return False + out, _ = subp.subp("udevadm", "info", devpath) + # lvs should have DM_LV_NAME= and also DM_VG_NAME + return 'DM_LV_NAME=' in out + else: + LOG.info("Not an LVM Logical Volume partition") + return False + + +@lru_cache() +def get_pvs_for_lv(devpath): + myenv = {'LANG': 'C'} + + if not util.is_Linux(): + LOG.info("No support for LVM on %s", platform.system()) + return None + if not subp.which('lvm'): + LOG.info("No 'lvm' command present") + return None + + try: + (out, _err) = subp.subp(["lvm", "lvs", devpath, "--options=vgname", + "--noheadings"], update_env=myenv) + vgname = out.strip() + except subp.ProcessExecutionError as e: + if e.exit_code != 0: + util.logexc(LOG, "Failed: can't get Volume Group information " + "from %s", devpath) + raise ResizeFailedException(e) from e + + try: + (out, _err) = subp.subp(["lvm", "vgs", vgname, "--options=pvname", + "--noheadings"], update_env=myenv) + pvs = [p.strip() for p in out.splitlines()] + if len(pvs) > 1: + LOG.info("Do not know how to resize multiple Physical" + " Volumes") + else: + return pvs[0] + except subp.ProcessExecutionError as e: + if e.exit_code != 0: + util.logexc(LOG, "Failed: can't get Physical Volume " + "information from Volume Group %s", vgname) + raise ResizeFailedException(e) from e + + def resizer_factory(mode): resize_class = None if mode == "auto": @@ -208,13 +262,18 @@ def get_size(filename): os.close(fd) -def device_part_info(devpath): +def device_part_info(devpath, is_lvm): # convert an entry in /dev/ to parent disk and partition number # input of /dev/vdb or /dev/disk/by-label/foo # rpath is hopefully a real-ish path in /dev (vda, sdb..) rpath = os.path.realpath(devpath) + # first check if this is an LVM and get its PVs + lvm_rpath = get_pvs_for_lv(devpath) + if is_lvm and lvm_rpath: + rpath = lvm_rpath + bname = os.path.basename(rpath) syspath = "/sys/class/block/%s" % bname @@ -244,7 +303,7 @@ def device_part_info(devpath): # diskdevpath has something like 253:0 # and udev has put links in /dev/block/253:0 to the device name in /dev/ - return (diskdevpath, ptnum) + return diskdevpath, ptnum def devent2dev(devent): @@ -294,8 +353,9 @@ def resize_devices(resizer, devices): "device '%s' not a block device" % blockdev,)) continue + is_lvm = is_lvm_lv(blockdev) try: - (disk, ptnum) = device_part_info(blockdev) + disk, ptnum = device_part_info(blockdev, is_lvm) except (TypeError, ValueError) as e: info.append((devent, RESIZE.SKIPPED, "device_part_info(%s) failed: %s" % (blockdev, e),)) @@ -316,6 +376,23 @@ def resize_devices(resizer, devices): "failed to resize: disk=%s, ptnum=%s: %s" % (disk, ptnum, e),)) + if is_lvm and isinstance(resizer, ResizeGrowPart): + try: + if len(devices) == 1: + (_out, _err) = subp.subp( + ["lvm", "lvextend", "--extents=100%FREE", blockdev], + update_env={'LANG': 'C'}) + info.append((devent, RESIZE.CHANGED, + "Logical Volume %s extended" % devices[0],)) + else: + LOG.info("Exactly one device should be configured to be " + "resized when using LVM. More than one configured" + ": %s", devices) + except (subp.ProcessExecutionError, ValueError) as e: + info.append((devent, RESIZE.NOCHANGE, + "Logical Volume %s resize failed: %s" % + (blockdev, e),)) + return info diff --git a/doc/examples/cloud-config-growpart.txt b/doc/examples/cloud-config-growpart.txt index 393d5164..09268117 100644 --- a/doc/examples/cloud-config-growpart.txt +++ b/doc/examples/cloud-config-growpart.txt @@ -13,6 +13,8 @@ # # devices: # a list of things to resize. +# if the devices are under LVM, the list should be a single entry, +# cloud-init will then extend the single entry, otherwise it will fail. # items can be filesystem paths or devices (in /dev) # examples: # devices: [/, /dev/vdb1] diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index 7f039b79..cc0a9248 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -172,6 +172,53 @@ class TestResize(unittest.TestCase): self.name = "growpart" self.log = logging.getLogger("TestResize") + def test_lvm_resize(self): + # LVM resize should work only if a single device is configured. More + # than one device should fail. + lvm_pass = ["/dev/XXdm-0"] + lvm_fail = ["/dev/XXdm-1", "/dev/YYdm-1"] + devstat_ret = Bunch(st_mode=25008, st_ino=6078, st_dev=5, + st_nlink=1, st_uid=0, st_gid=6, st_size=0, + st_atime=0, st_mtime=0, st_ctime=0) + real_stat = os.stat + resize_calls = [] + + class myresizer(object): + def resize(self, diskdev, partnum, partdev): + resize_calls.append((diskdev, partnum, partdev)) + if partdev == "/dev/XXdm-0": + return (1024, 2048) + return (1024, 1024) # old size, new size + + def mystat(path): + if path in lvm_pass or path in lvm_fail: + return devstat_ret + return real_stat(path) + + try: + opinfo = cc_growpart.device_part_info + cc_growpart.device_part_info = simple_device_part_info_lvm + os.stat = mystat + + resized = cc_growpart.resize_devices(myresizer(), lvm_pass) + not_resized = cc_growpart.resize_devices(myresizer(), lvm_fail) + + def find(name, res): + for f in res: + if f[0] == name: + return f + return None + + self.assertEqual(cc_growpart.RESIZE.CHANGED, + find("/dev/XXdm-0", resized)[1]) + self.assertEqual(cc_growpart.RESIZE.NOCHANGE, + find("/dev/XXdm-1", not_resized)[1]) + self.assertEqual(cc_growpart.RESIZE.NOCHANGE, + find("/dev/YYdm-1", not_resized)[1]) + finally: + cc_growpart.device_part_info = opinfo + os.stat = real_stat + def test_simple_devices(self): # test simple device list # this patches out devent2dev, os.stat, and device_part_info @@ -227,7 +274,14 @@ class TestResize(unittest.TestCase): os.stat = real_stat -def simple_device_part_info(devpath): +def simple_device_part_info_lvm(devpath, is_lvm): + # simple stupid return (/dev/vda, 1) for /dev/vda + ret = re.search("([^0-9]*)([0-9]*)$", devpath) + x = (ret.group(1), ret.group(2)) + return x + + +def simple_device_part_info(devpath, is_lvm): # simple stupid return (/dev/vda, 1) for /dev/vda ret = re.search("([^0-9]*)([0-9]*)$", devpath) x = (ret.group(1), ret.group(2)) -- cgit v1.2.3 From 15dd3601c484e189ea82917600322b7d0e25c088 Mon Sep 17 00:00:00 2001 From: Petr Fedchenkov Date: Wed, 7 Apr 2021 19:16:30 +0300 Subject: bringup_static_routes: fix gateway check (#850) When bringing up DHCP-provided static routes, we check for "0.0.0.0/0" to indicate an unspecified gateway. However, when parsing the static route in `parse_static_routes`, the gateway is never specified with a net length, so the "/0" will never happen. This change updates the gateway check to check only for "0.0.0.0". --- cloudinit/net/__init__.py | 2 +- cloudinit/net/tests/test_dhcp.py | 5 +++++ cloudinit/net/tests/test_init.py | 15 +++++++++++---- tools/.github-cla-signers | 1 + 4 files changed, 18 insertions(+), 5 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 385b7bcc..6b3b84f7 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -1135,7 +1135,7 @@ class EphemeralIPv4Network(object): # ("0.0.0.0/0", "130.56.240.1")] for net_address, gateway in self.static_routes: via_arg = [] - if gateway != "0.0.0.0/0": + if gateway != "0.0.0.0": via_arg = ['via', gateway] subp.subp( ['ip', '-4', 'route', 'add', net_address] + via_arg + diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py index 74cf4b94..6f9a02de 100644 --- a/cloudinit/net/tests/test_dhcp.py +++ b/cloudinit/net/tests/test_dhcp.py @@ -194,6 +194,11 @@ class TestDHCPParseStaticRoutes(CiTestCase): self.assertEqual([('0.0.0.0/0', '130.56.240.1')], parse_static_routes(rfc3442)) + def test_unspecified_gateway(self): + rfc3442 = "32,169,254,169,254,0,0,0,0" + self.assertEqual([('169.254.169.254/32', '0.0.0.0')], + parse_static_routes(rfc3442)) + def test_parse_static_routes_class_c_b_a(self): class_c = "24,192,168,74,192,168,0,4" class_b = "16,172,16,172,16,0,4" diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py index 946f8ee2..ad9c90ff 100644 --- a/cloudinit/net/tests/test_init.py +++ b/cloudinit/net/tests/test_init.py @@ -706,18 +706,22 @@ class TestEphemeralIPV4Network(CiTestCase): def test_ephemeral_ipv4_network_with_rfc3442_static_routes(self, m_subp): params = { 'interface': 'eth0', 'ip': '192.168.2.2', - 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255', - 'static_routes': [('169.254.169.254/32', '192.168.2.1'), + 'prefix_or_mask': '255.255.255.255', 'broadcast': '192.168.2.255', + 'static_routes': [('192.168.2.1/32', '0.0.0.0'), + ('169.254.169.254/32', '192.168.2.1'), ('0.0.0.0/0', '192.168.2.1')], 'router': '192.168.2.1'} expected_setup_calls = [ mock.call( - ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/24', + ['ip', '-family', 'inet', 'addr', 'add', '192.168.2.2/32', 'broadcast', '192.168.2.255', 'dev', 'eth0'], capture=True, update_env={'LANG': 'C'}), mock.call( ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'up'], capture=True), + mock.call( + ['ip', '-4', 'route', 'add', '192.168.2.1/32', + 'dev', 'eth0'], capture=True), mock.call( ['ip', '-4', 'route', 'add', '169.254.169.254/32', 'via', '192.168.2.1', 'dev', 'eth0'], capture=True), @@ -731,12 +735,15 @@ class TestEphemeralIPV4Network(CiTestCase): mock.call( ['ip', '-4', 'route', 'del', '169.254.169.254/32', 'via', '192.168.2.1', 'dev', 'eth0'], capture=True), + mock.call( + ['ip', '-4', 'route', 'del', '192.168.2.1/32', + 'dev', 'eth0'], capture=True), mock.call( ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'down'], capture=True), mock.call( ['ip', '-family', 'inet', 'addr', 'del', - '192.168.2.2/24', 'dev', 'eth0'], capture=True) + '192.168.2.2/32', 'dev', 'eth0'], capture=True) ] with net.EphemeralIPv4Network(**params): self.assertEqual(expected_setup_calls, m_subp.call_args_list) diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index 0a573f9e..b39f4198 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -16,6 +16,7 @@ dhensby eandersson eb3095 emmanuelthome +giggsoff izzyleung johnsonshi jordimassaguerpla -- cgit v1.2.3 From fb38aa591bca82b7aa89187695a2bff676d687cb Mon Sep 17 00:00:00 2001 From: Jens Sandmann Date: Fri, 9 Apr 2021 18:05:43 +0200 Subject: sysconfig: use BONDING_MODULE_OPTS on SUSE (#831) Update sysconfig configuration to use BONDING_MODULES_OPTS instead of BONDING_OPTS when on a SUSE system. The sysconfig support requires use of BONDING_MODULE_OPTS whereas the initscript support that rhel uses requires BONDING_OPTS. --- cloudinit/net/sysconfig.py | 20 ++++++++++++++++---- tests/unittests/test_net.py | 4 ++-- 2 files changed, 18 insertions(+), 6 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 99a4bae4..e4607804 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -313,7 +313,8 @@ class Renderer(renderer.Renderer): } # If these keys exist, then their values will be used to form - # a BONDING_OPTS grouping; otherwise no grouping will be set. + # a BONDING_OPTS / BONDING_MODULE_OPTS grouping; otherwise no + # grouping will be set. bond_tpl_opts = tuple([ ('bond_mode', "mode=%s"), ('bond_xmit_hash_policy', "xmit_hash_policy=%s"), @@ -622,7 +623,7 @@ class Renderer(renderer.Renderer): route_cfg[new_key] = route[old_key] @classmethod - def _render_bonding_opts(cls, iface_cfg, iface): + def _render_bonding_opts(cls, iface_cfg, iface, flavor): bond_opts = [] for (bond_key, value_tpl) in cls.bond_tpl_opts: # Seems like either dash or underscore is possible? @@ -635,7 +636,18 @@ class Renderer(renderer.Renderer): bond_opts.append(value_tpl % (bond_value)) break if bond_opts: - iface_cfg['BONDING_OPTS'] = " ".join(bond_opts) + if flavor == 'suse': + # suse uses the sysconfig support which requires + # BONDING_MODULE_OPTS see + # https://www.kernel.org/doc/Documentation/networking/bonding.txt + # 3.1 Configuration with Sysconfig Support + iface_cfg['BONDING_MODULE_OPTS'] = " ".join(bond_opts) + else: + # rhel uses initscript support and thus requires BONDING_OPTS + # this is also the old default see + # https://www.kernel.org/doc/Documentation/networking/bonding.txt + # 3.2 Configuration with Initscripts Support + iface_cfg['BONDING_OPTS'] = " ".join(bond_opts) @classmethod def _render_physical_interfaces( @@ -663,7 +675,7 @@ class Renderer(renderer.Renderer): for iface in network_state.iter_interfaces(bond_filter): iface_name = iface['name'] iface_cfg = iface_contents[iface_name] - cls._render_bonding_opts(iface_cfg, iface) + cls._render_bonding_opts(iface_cfg, iface, flavor) # Ensure that the master interface (and any of its children) # are actually marked as being bond types... diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 2bd50e72..b72a62b8 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1654,7 +1654,7 @@ pre-down route del -net 10.0.0.0/8 gw 11.0.0.1 metric 3 || true 'expected_sysconfig_opensuse': { 'ifcfg-bond0': textwrap.dedent("""\ BONDING_MASTER=yes - BONDING_OPTS="mode=active-backup """ + BONDING_MODULE_OPTS="mode=active-backup """ """xmit_hash_policy=layer3+4 """ """miimon=100" BONDING_SLAVE_0=eth1 @@ -2240,7 +2240,7 @@ iface bond0 inet6 static 'expected_sysconfig_opensuse': { 'ifcfg-bond0': textwrap.dedent("""\ BONDING_MASTER=yes - BONDING_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """ + BONDING_MODULE_OPTS="mode=active-backup xmit_hash_policy=layer3+4 """ """miimon=100 num_grat_arp=5 """ """downdelay=10 updelay=20 """ """fail_over_mac=active """ -- cgit v1.2.3 From 83f6bbfbe5b924be61a3c098f4202377d69c8947 Mon Sep 17 00:00:00 2001 From: lucasmoura Date: Mon, 12 Apr 2021 13:22:22 -0300 Subject: Fix unpickle for source paths missing run_dir (#863) On the datasource class, we require the use of paths.run_dir to perform some operations. On older cloud-init version, the Paths class does not have the run_dir attribute. To fix that, we are now manually adding that attribute in the Paths object if doesn't exist in the unpickle operation. LP: #1899299 --- cloudinit/helpers.py | 17 +- cloudinit/tests/test_upgrade.py | 3 + tests/data/old_pickles/trusty-14.04.1-0.7.5.pkl | 504 ++++++++++++++++++++++++ 3 files changed, 523 insertions(+), 1 deletion(-) create mode 100644 tests/data/old_pickles/trusty-14.04.1-0.7.5.pkl (limited to 'cloudinit') diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index fc5011ec..b8f9d2c3 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -20,6 +20,7 @@ from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE, from cloudinit import log as logging from cloudinit import type_utils +from cloudinit import persistence from cloudinit import util LOG = logging.getLogger(__name__) @@ -317,7 +318,9 @@ class ContentHandlers(object): return list(self.registered.items()) -class Paths(object): +class Paths(persistence.CloudInitPickleMixin): + _ci_pkl_version = 1 + def __init__(self, path_cfgs, ds=None): self.cfgs = path_cfgs # Populate all the initial paths @@ -354,6 +357,18 @@ class Paths(object): # Set when a datasource becomes active self.datasource = ds + def _unpickle(self, ci_pkl_version: int) -> None: + """Perform deserialization fixes for Paths.""" + if not hasattr(self, "run_dir"): + # On older versions of cloud-init the Paths class do not + # have the run_dir attribute. This is problematic because + # when loading the pickle object on newer versions of cloud-init + # we will rely on this attribute. To fix that, we are now + # manually adding that attribute here. + self.run_dir = Paths( + path_cfgs=self.cfgs, + ds=self.datasource).run_dir + # get_ipath_cur: get the current instance path for an item def get_ipath_cur(self, name=None): return self._get_path(self.instance_link, name) diff --git a/cloudinit/tests/test_upgrade.py b/cloudinit/tests/test_upgrade.py index f79a2536..05eefd2a 100644 --- a/cloudinit/tests/test_upgrade.py +++ b/cloudinit/tests/test_upgrade.py @@ -43,3 +43,6 @@ class TestUpgrade: def test_blacklist_drivers_set_on_networking(self, previous_obj_pkl): """We always expect Networking.blacklist_drivers to be initialised.""" assert previous_obj_pkl.distro.networking.blacklist_drivers is None + + def test_paths_has_run_dir_attribute(self, previous_obj_pkl): + assert previous_obj_pkl.paths.run_dir is not None diff --git a/tests/data/old_pickles/trusty-14.04.1-0.7.5.pkl b/tests/data/old_pickles/trusty-14.04.1-0.7.5.pkl new file mode 100644 index 00000000..c7d7844b --- /dev/null +++ b/tests/data/old_pickles/trusty-14.04.1-0.7.5.pkl @@ -0,0 +1,504 @@ +ccopy_reg +_reconstructor +p1 +(ccloudinit.sources.DataSourceNoCloud +DataSourceNoCloudNet +p2 +c__builtin__ +object +p3 +NtRp4 +(dp5 +S'paths' +p6 +g1 +(ccloudinit.helpers +Paths +p7 +g3 +NtRp8 +(dp9 +S'lookups' +p10 +(dp11 +S'cloud_config' +p12 +S'cloud-config.txt' +p13 +sS'userdata' +p14 +S'user-data.txt.i' +p15 +sS'vendordata' +p16 +S'vendor-data.txt.i' +p17 +sS'userdata_raw' +p18 +S'user-data.txt' +p19 +sS'boothooks' +p20 +g20 +sS'scripts' +p21 +g21 +sS'sem' +p22 +g22 +sS'data' +p23 +g23 +sS'vendor_scripts' +p24 +S'scripts/vendor' +p25 +sS'handlers' +p26 +g26 +sS'obj_pkl' +p27 +S'obj.pkl' +p28 +sS'vendordata_raw' +p29 +S'vendor-data.txt' +p30 +sS'vendor_cloud_config' +p31 +S'vendor-cloud-config.txt' +p32 +ssS'template_tpl' +p33 +S'/etc/cloud/templates/%s.tmpl' +p34 +sS'cfgs' +p35 +(dp36 +S'cloud_dir' +p37 +S'/var/lib/cloud/' +p38 +sS'templates_dir' +p39 +S'/etc/cloud/templates/' +p40 +sS'upstart_dir' +p41 +S'/etc/init/' +p42 +ssS'cloud_dir' +p43 +g38 +sS'datasource' +p44 +NsS'upstart_conf_d' +p45 +g42 +sS'boot_finished' +p46 +S'/var/lib/cloud/instance/boot-finished' +p47 +sS'instance_link' +p48 +S'/var/lib/cloud/instance' +p49 +sS'seed_dir' +p50 +S'/var/lib/cloud/seed' +p51 +sbsS'supported_seed_starts' +p52 +(S'http://' +S'https://' +S'ftp://' +tp53 +sS'sys_cfg' +p54 +(dp55 +S'output' +p56 +(dp57 +S'all' +p58 +S'| tee -a /var/log/cloud-init-output.log' +p59 +ssS'users' +p60 +(lp61 +S'default' +p62 +asS'def_log_file' +p63 +S'/var/log/cloud-init.log' +p64 +sS'cloud_final_modules' +p65 +(lp66 +S'rightscale_userdata' +p67 +aS'scripts-vendor' +p68 +aS'scripts-per-once' +p69 +aS'scripts-per-boot' +p70 +aS'scripts-per-instance' +p71 +aS'scripts-user' +p72 +aS'ssh-authkey-fingerprints' +p73 +aS'keys-to-console' +p74 +aS'phone-home' +p75 +aS'final-message' +p76 +aS'power-state-change' +p77 +asS'disable_root' +p78 +I01 +sS'syslog_fix_perms' +p79 +S'syslog:adm' +p80 +sS'log_cfgs' +p81 +(lp82 +(lp83 +S'[loggers]\nkeys=root,cloudinit\n\n[handlers]\nkeys=consoleHandler,cloudLogHandler\n\n[formatters]\nkeys=simpleFormatter,arg0Formatter\n\n[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler,cloudLogHandler\n\n[logger_cloudinit]\nlevel=DEBUG\nqualname=cloudinit\nhandlers=\npropagate=1\n\n[handler_consoleHandler]\nclass=StreamHandler\nlevel=WARNING\nformatter=arg0Formatter\nargs=(sys.stderr,)\n\n[formatter_arg0Formatter]\nformat=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s\n\n[formatter_simpleFormatter]\nformat=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s\n' +p84 +aS'[handler_cloudLogHandler]\nclass=handlers.SysLogHandler\nlevel=DEBUG\nformatter=simpleFormatter\nargs=("/dev/log", handlers.SysLogHandler.LOG_USER)\n' +p85 +aa(lp86 +g84 +aS"[handler_cloudLogHandler]\nclass=FileHandler\nlevel=DEBUG\nformatter=arg0Formatter\nargs=('/var/log/cloud-init.log',)\n" +p87 +aasS'cloud_init_modules' +p88 +(lp89 +S'migrator' +p90 +aS'seed_random' +p91 +aS'bootcmd' +p92 +aS'write-files' +p93 +aS'growpart' +p94 +aS'resizefs' +p95 +aS'set_hostname' +p96 +aS'update_hostname' +p97 +aS'update_etc_hosts' +p98 +aS'ca-certs' +p99 +aS'rsyslog' +p100 +aS'users-groups' +p101 +aS'ssh' +p102 +asS'preserve_hostname' +p103 +I00 +sS'_log' +p104 +(lp105 +g84 +ag87 +ag85 +asS'datasource_list' +p106 +(lp107 +S'NoCloud' +p108 +aS'ConfigDrive' +p109 +aS'OpenNebula' +p110 +aS'Azure' +p111 +aS'AltCloud' +p112 +aS'OVF' +p113 +aS'MAAS' +p114 +aS'GCE' +p115 +aS'OpenStack' +p116 +aS'CloudSigma' +p117 +aS'Ec2' +p118 +aS'CloudStack' +p119 +aS'SmartOS' +p120 +aS'None' +p121 +asS'vendor_data' +p122 +(dp123 +S'prefix' +p124 +(lp125 +sS'enabled' +p126 +I01 +ssS'cloud_config_modules' +p127 +(lp128 +S'emit_upstart' +p129 +aS'disk_setup' +p130 +aS'mounts' +p131 +aS'ssh-import-id' +p132 +aS'locale' +p133 +aS'set-passwords' +p134 +aS'grub-dpkg' +p135 +aS'apt-pipelining' +p136 +aS'apt-configure' +p137 +aS'package-update-upgrade-install' +p138 +aS'landscape' +p139 +aS'timezone' +p140 +aS'puppet' +p141 +aS'chef' +p142 +aS'salt-minion' +p143 +aS'mcollective' +p144 +aS'disable-ec2-metadata' +p145 +aS'runcmd' +p146 +aS'byobu' +p147 +assg14 +Nsg16 +Nsg18 +S'#cloud-config\n{}\n\n' +p148 +sg29 +S'#cloud-config\n{}\n\n' +p149 +sS'dsmode' +p150 +S'net' +p151 +sS'seed' +p152 +S'/var/lib/cloud/seed/nocloud-net' +p153 +sS'cmdline_id' +p154 +S'ds=nocloud-net' +p155 +sS'ud_proc' +p156 +g1 +(ccloudinit.user_data +UserDataProcessor +p157 +g3 +NtRp158 +(dp159 +g6 +g8 +sS'ssl_details' +p160 +(dp161 +sbsg50 +g153 +sS'ds_cfg' +p162 +(dp163 +sS'distro' +p164 +g1 +(ccloudinit.distros.ubuntu +Distro +p165 +g3 +NtRp166 +(dp167 +S'osfamily' +p168 +S'debian' +p169 +sS'_paths' +p170 +g8 +sS'name' +p171 +S'ubuntu' +p172 +sS'_runner' +p173 +g1 +(ccloudinit.helpers +Runners +p174 +g3 +NtRp175 +(dp176 +g6 +g8 +sS'sems' +p177 +(dp178 +sbsS'_cfg' +p179 +(dp180 +S'paths' +p181 +(dp182 +g37 +g38 +sg39 +g40 +sg41 +g42 +ssS'default_user' +p183 +(dp184 +S'shell' +p185 +S'/bin/bash' +p186 +sS'name' +p187 +S'ubuntu' +p188 +sS'sudo' +p189 +(lp190 +S'ALL=(ALL) NOPASSWD:ALL' +p191 +asS'lock_passwd' +p192 +I01 +sS'gecos' +p193 +S'Ubuntu' +p194 +sS'groups' +p195 +(lp196 +S'adm' +p197 +aS'audio' +p198 +aS'cdrom' +p199 +aS'dialout' +p200 +aS'dip' +p201 +aS'floppy' +p202 +aS'netdev' +p203 +aS'plugdev' +p204 +aS'sudo' +p205 +aS'video' +p206 +assS'package_mirrors' +p207 +(lp208 +(dp209 +S'arches' +p210 +(lp211 +S'i386' +p212 +aS'amd64' +p213 +asS'failsafe' +p214 +(dp215 +S'security' +p216 +S'http://security.ubuntu.com/ubuntu' +p217 +sS'primary' +p218 +S'http://archive.ubuntu.com/ubuntu' +p219 +ssS'search' +p220 +(dp221 +S'security' +p222 +(lp223 +sS'primary' +p224 +(lp225 +S'http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/' +p226 +aS'http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/' +p227 +aS'http://%(region)s.clouds.archive.ubuntu.com/ubuntu/' +p228 +assa(dp229 +S'arches' +p230 +(lp231 +S'armhf' +p232 +aS'armel' +p233 +aS'default' +p234 +asS'failsafe' +p235 +(dp236 +S'security' +p237 +S'http://ports.ubuntu.com/ubuntu-ports' +p238 +sS'primary' +p239 +S'http://ports.ubuntu.com/ubuntu-ports' +p240 +ssasS'ssh_svcname' +p241 +S'ssh' +p242 +ssbsS'metadata' +p243 +(dp244 +g150 +g151 +sS'local-hostname' +p245 +S'trusty-upgrade2' +p246 +sS'instance-id' +p247 +S'trusty-upgrade2' +p248 +ssb. \ No newline at end of file -- cgit v1.2.3 From 0ae0b1d4336acdcab12bd49e9bddb46922fb19c7 Mon Sep 17 00:00:00 2001 From: David Dymko Date: Tue, 13 Apr 2021 14:15:34 -0400 Subject: Add Vultr support (#827) This PR adds in support so that cloud-init can run on instances deployed on Vultr cloud. This was originally brought up in #628. Co-authored-by: Eric Benner --- README.md | 2 +- cloudinit/apport.py | 1 + cloudinit/settings.py | 1 + cloudinit/sources/DataSourceVultr.py | 147 +++++++++++ cloudinit/sources/helpers/vultr.py | 242 +++++++++++++++++ doc/rtd/topics/availability.rst | 1 + doc/rtd/topics/datasources.rst | 2 +- doc/rtd/topics/datasources/vultr.rst | 35 +++ doc/rtd/topics/network-config.rst | 5 + tests/unittests/test_datasource/test_common.py | 2 + tests/unittests/test_datasource/test_vultr.py | 343 +++++++++++++++++++++++++ tools/.github-cla-signers | 1 + tools/ds-identify | 16 +- 13 files changed, 795 insertions(+), 3 deletions(-) create mode 100644 cloudinit/sources/DataSourceVultr.py create mode 100644 cloudinit/sources/helpers/vultr.py create mode 100644 doc/rtd/topics/datasources/vultr.rst create mode 100644 tests/unittests/test_datasource/test_vultr.py (limited to 'cloudinit') diff --git a/README.md b/README.md index 435405da..aa6d84ae 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ get in contact with that distribution and send them our way! | Supported OSes | Supported Public Clouds | Supported Private Clouds | | --- | --- | --- | -| Alpine Linux
ArchLinux
Debian
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
Digital Ocean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| +| Alpine Linux
ArchLinux
Debian
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
Digital Ocean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| ## To start developing cloud-init diff --git a/cloudinit/apport.py b/cloudinit/apport.py index 25f254e3..aadc638f 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -41,6 +41,7 @@ KNOWN_CLOUD_NAMES = [ 'SmartOS', 'UpCloud', 'VMware', + 'Vultr', 'ZStack', 'Other' ] diff --git a/cloudinit/settings.py b/cloudinit/settings.py index 91e1bfe7..23e4c0ad 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -30,6 +30,7 @@ CFG_BUILTIN = { 'GCE', 'OpenStack', 'AliYun', + 'Vultr', 'Ec2', 'CloudSigma', 'CloudStack', diff --git a/cloudinit/sources/DataSourceVultr.py b/cloudinit/sources/DataSourceVultr.py new file mode 100644 index 00000000..c08ff848 --- /dev/null +++ b/cloudinit/sources/DataSourceVultr.py @@ -0,0 +1,147 @@ +# Author: Eric Benner +# +# This file is part of cloud-init. See LICENSE file for license information. + +# Vultr Metadata API: +# https://www.vultr.com/metadata/ + +from cloudinit import log as log +from cloudinit import sources +from cloudinit import util + +import cloudinit.sources.helpers.vultr as vultr + +LOG = log.getLogger(__name__) +BUILTIN_DS_CONFIG = { + 'url': 'http://169.254.169.254', + 'retries': 30, + 'timeout': 2, + 'wait': 2 +} + + +class DataSourceVultr(sources.DataSource): + + dsname = 'Vultr' + + def __init__(self, sys_cfg, distro, paths): + super(DataSourceVultr, self).__init__(sys_cfg, distro, paths) + self.ds_cfg = util.mergemanydict([ + util.get_cfg_by_path(sys_cfg, ["datasource", "Vultr"], {}), + BUILTIN_DS_CONFIG]) + + # Initiate data and check if Vultr + def _get_data(self): + LOG.debug("Detecting if machine is a Vultr instance") + if not vultr.is_vultr(): + LOG.debug("Machine is not a Vultr instance") + return False + + LOG.debug("Machine is a Vultr instance") + + # Fetch metadata + md = self.get_metadata() + + self.metadata_full = md + self.metadata['instanceid'] = md['instanceid'] + self.metadata['local-hostname'] = md['hostname'] + self.metadata['public-keys'] = md["public-keys"] + self.userdata_raw = md["user-data"] + + # Generate config and process data + self.get_datasource_data(md) + + # Dump some data so diagnosing failures is manageable + LOG.debug("Vultr Vendor Config:") + LOG.debug(md['vendor-data']['config']) + LOG.debug("SUBID: %s", self.metadata['instanceid']) + LOG.debug("Hostname: %s", self.metadata['local-hostname']) + if self.userdata_raw is not None: + LOG.debug("User-Data:") + LOG.debug(self.userdata_raw) + + return True + + # Process metadata + def get_datasource_data(self, md): + # Grab config + config = md['vendor-data']['config'] + + # Generate network config + self.netcfg = vultr.generate_network_config(md['interfaces']) + + # This requires info generated in the vendor config + user_scripts = vultr.generate_user_scripts(md, self.netcfg['config']) + + # Default hostname is "guest" for whitelabel + if self.metadata['local-hostname'] == "": + self.metadata['local-hostname'] = "guest" + + self.userdata_raw = md["user-data"] + if self.userdata_raw == "": + self.userdata_raw = None + + # Assemble vendor-data + # This adds provided scripts and the config + self.vendordata_raw = [] + self.vendordata_raw.extend(user_scripts) + self.vendordata_raw.append("#cloud-config\n%s" % config) + + # Get the metadata by flag + def get_metadata(self): + return vultr.get_metadata(self.ds_cfg['url'], + self.ds_cfg['timeout'], + self.ds_cfg['retries'], + self.ds_cfg['wait']) + + # Compare subid as instance id + def check_instance_id(self, sys_cfg): + if not vultr.is_vultr(): + return False + + # Baremetal has no way to implement this in local + if vultr.is_baremetal(): + return False + + subid = vultr.get_sysinfo()['subid'] + return sources.instance_id_matches_system_uuid(subid) + + # Currently unsupported + @property + def launch_index(self): + return None + + @property + def network_config(self): + return self.netcfg + + +# Used to match classes to dependencies +datasources = [ + (DataSourceVultr, (sources.DEP_FILESYSTEM, )), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) + + +if __name__ == "__main__": + import sys + + if not vultr.is_vultr(): + print("Machine is not a Vultr instance") + sys.exit(1) + + md = vultr.get_metadata(BUILTIN_DS_CONFIG['url'], + BUILTIN_DS_CONFIG['timeout'], + BUILTIN_DS_CONFIG['retries'], + BUILTIN_DS_CONFIG['wait']) + config = md['vendor-data']['config'] + sysinfo = vultr.get_sysinfo() + + print(util.json_dumps(sysinfo)) + print(config) + +# vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vultr.py b/cloudinit/sources/helpers/vultr.py new file mode 100644 index 00000000..c22cd0b1 --- /dev/null +++ b/cloudinit/sources/helpers/vultr.py @@ -0,0 +1,242 @@ +# Author: Eric Benner +# +# This file is part of cloud-init. See LICENSE file for license information. + +import json + +from cloudinit import log as log +from cloudinit import url_helper +from cloudinit import dmi +from cloudinit import util +from cloudinit import net +from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError +from functools import lru_cache + +# Get LOG +LOG = log.getLogger(__name__) + + +@lru_cache() +def get_metadata(url, timeout, retries, sec_between): + # Bring up interface + try: + with EphemeralDHCPv4(connectivity_url=url): + # Fetch the metadata + v1 = read_metadata(url, timeout, retries, sec_between) + except (NoDHCPLeaseError) as exc: + LOG.error("Bailing, DHCP Exception: %s", exc) + raise + + v1_json = json.loads(v1) + metadata = v1_json + + return metadata + + +# Read the system information from SMBIOS +def get_sysinfo(): + return { + 'manufacturer': dmi.read_dmi_data("system-manufacturer"), + 'subid': dmi.read_dmi_data("system-serial-number") + } + + +# Assumes is Vultr is already checked +def is_baremetal(): + if get_sysinfo()['manufacturer'] != "Vultr": + return True + return False + + +# Confirm is Vultr +def is_vultr(): + # VC2, VDC, and HFC use DMI + sysinfo = get_sysinfo() + + if sysinfo['manufacturer'] == "Vultr": + return True + + # Baremetal requires a kernel parameter + if "vultr" in util.get_cmdline().split(): + return True + + return False + + +# Read Metadata endpoint +def read_metadata(url, timeout, retries, sec_between): + url = "%s/v1.json" % url + response = url_helper.readurl(url, + timeout=timeout, + retries=retries, + headers={'Metadata-Token': 'vultr'}, + sec_between=sec_between) + + if not response.ok(): + raise RuntimeError("Failed to connect to %s: Code: %s" % + url, response.code) + + return response.contents.decode() + + +# Wrapped for caching +@lru_cache() +def get_interface_map(): + return net.get_interfaces_by_mac() + + +# Convert macs to nics +def get_interface_name(mac): + macs_to_nic = get_interface_map() + + if mac not in macs_to_nic: + return None + + return macs_to_nic.get(mac) + + +# Generate network configs +def generate_network_config(interfaces): + network = { + "version": 1, + "config": [ + { + "type": "nameserver", + "address": [ + "108.61.10.10" + ] + } + ] + } + + # Prepare interface 0, public + if len(interfaces) > 0: + public = generate_public_network_interface(interfaces[0]) + network['config'].append(public) + + # Prepare interface 1, private + if len(interfaces) > 1: + private = generate_private_network_interface(interfaces[1]) + network['config'].append(private) + + return network + + +# Input Metadata and generate public network config part +def generate_public_network_interface(interface): + interface_name = get_interface_name(interface['mac']) + if not interface_name: + raise RuntimeError( + "Interface: %s could not be found on the system" % + interface['mac']) + + netcfg = { + "name": interface_name, + "type": "physical", + "mac_address": interface['mac'], + "accept-ra": 1, + "subnets": [ + { + "type": "dhcp", + "control": "auto" + }, + { + "type": "dhcp6", + "control": "auto" + }, + ] + } + + # Check for additional IP's + additional_count = len(interface['ipv4']['additional']) + if "ipv4" in interface and additional_count > 0: + for additional in interface['ipv4']['additional']: + add = { + "type": "static", + "control": "auto", + "address": additional['address'], + "netmask": additional['netmask'] + } + netcfg['subnets'].append(add) + + # Check for additional IPv6's + additional_count = len(interface['ipv6']['additional']) + if "ipv6" in interface and additional_count > 0: + for additional in interface['ipv6']['additional']: + add = { + "type": "static6", + "control": "auto", + "address": additional['address'], + "netmask": additional['netmask'] + } + netcfg['subnets'].append(add) + + # Add config to template + return netcfg + + +# Input Metadata and generate private network config part +def generate_private_network_interface(interface): + interface_name = get_interface_name(interface['mac']) + if not interface_name: + raise RuntimeError( + "Interface: %s could not be found on the system" % + interface['mac']) + + netcfg = { + "name": interface_name, + "type": "physical", + "mac_address": interface['mac'], + "accept-ra": 1, + "subnets": [ + { + "type": "static", + "control": "auto", + "address": interface['ipv4']['address'], + "netmask": interface['ipv4']['netmask'] + } + ] + } + + return netcfg + + +# This is for the vendor and startup scripts +def generate_user_scripts(md, network_config): + user_scripts = [] + + # Raid 1 script + if md['vendor-data']['raid1-script']: + user_scripts.append(md['vendor-data']['raid1-script']) + + # Enable multi-queue on linux + if util.is_Linux() and md['vendor-data']['ethtool-script']: + ethtool_script = md['vendor-data']['ethtool-script'] + + # Tool location + tool = "/opt/vultr/ethtool" + + # Go through the interfaces + for netcfg in network_config: + # If the interface has a mac and is physical + if "mac_address" in netcfg and netcfg['type'] == "physical": + # Set its multi-queue to num of cores as per RHEL Docs + name = netcfg['name'] + command = "%s -L %s combined $(nproc --all)" % (tool, name) + ethtool_script = '%s\n%s' % (ethtool_script, command) + + user_scripts.append(ethtool_script) + + # This is for vendor scripts + if md['vendor-data']['vendor-script']: + user_scripts.append(md['vendor-data']['vendor-script']) + + # Startup script + script = md['startup-script'] + if script and script != "echo No configured startup script": + user_scripts.append(script) + + return user_scripts + + +# vi: ts=4 expandtab diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst index f58b2b38..f3e13edc 100644 --- a/doc/rtd/topics/availability.rst +++ b/doc/rtd/topics/availability.rst @@ -56,6 +56,7 @@ environments in the public cloud: - AltCloud - SmartOS - UpCloud +- Vultr Additionally, cloud-init is supported on these private clouds: diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index 228173d2..497b1467 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -49,7 +49,7 @@ The following is a list of documents for each supported datasource: datasources/smartos.rst datasources/upcloud.rst datasources/zstack.rst - + datasources/vultr.rst Creation ======== diff --git a/doc/rtd/topics/datasources/vultr.rst b/doc/rtd/topics/datasources/vultr.rst new file mode 100644 index 00000000..e73406a8 --- /dev/null +++ b/doc/rtd/topics/datasources/vultr.rst @@ -0,0 +1,35 @@ +.. _datasource_vultr: + +Vultr +===== + +The `Vultr`_ datasource retrieves basic configuration values from the locally +accessible `metadata service`_. All data is served over HTTP from the address +169.254.169.254. The endpoints are documented in, +`https://www.vultr.com/metadata/ +`_ + +Configuration +------------- + +Vultr's datasource can be configured as follows: + + datasource: + Vultr: + url: 'http://169.254.169.254' + retries: 3 + timeout: 2 + wait: 2 + +- *url*: The URL used to aquire the metadata configuration from +- *retries*: Determines the number of times to attempt to connect to the + metadata service +- *timeout*: Determines the timeout in seconds to wait for a response from the + metadata service +- *wait*: Determines the timeout in seconds to wait before retrying after + accessible failure + +.. _Vultr: https://www.vultr.com/ +.. _metadata service: https://www.vultr.com/metadata/ + +.. vi: textwidth=78 diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst index 07cad765..5f7a74f8 100644 --- a/doc/rtd/topics/network-config.rst +++ b/doc/rtd/topics/network-config.rst @@ -148,6 +148,10 @@ The following Datasources optionally provide network configuration: - `UpCloud JSON metadata`_ +- :ref:`datasource_vultr` + + - `Vultr JSON metadata`_ + For more information on network configuration formats .. toctree:: @@ -262,5 +266,6 @@ Example output converting V2 to sysconfig: .. _OpenStack Metadata Service Network: https://specs.openstack.org/openstack/nova-specs/specs/liberty/implemented/metadata-service-network-info.html .. _SmartOS JSON Metadata: https://eng.joyent.com/mdata/datadict.html .. _UpCloud JSON metadata: https://developers.upcloud.com/1.3/8-servers/#metadata-service +.. _Vultr JSON metadata: https://www.vultr.com/metadata/ .. vi: textwidth=78 diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index 5912f7ee..5e9c547a 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -28,6 +28,7 @@ from cloudinit.sources import ( DataSourceScaleway as Scaleway, DataSourceSmartOS as SmartOS, DataSourceUpCloud as UpCloud, + DataSourceVultr as Vultr, ) from cloudinit.sources import DataSourceNone as DSNone @@ -45,6 +46,7 @@ DEFAULT_LOCAL = [ Oracle.DataSourceOracle, OVF.DataSourceOVF, SmartOS.DataSourceSmartOS, + Vultr.DataSourceVultr, Ec2.DataSourceEc2Local, OpenStack.DataSourceOpenStackLocal, RbxCloud.DataSourceRbxCloud, diff --git a/tests/unittests/test_datasource/test_vultr.py b/tests/unittests/test_datasource/test_vultr.py new file mode 100644 index 00000000..bbea2aa3 --- /dev/null +++ b/tests/unittests/test_datasource/test_vultr.py @@ -0,0 +1,343 @@ +# Author: Eric Benner +# +# This file is part of cloud-init. See LICENSE file for license information. + +# Vultr Metadata API: +# https://www.vultr.com/metadata/ + +import json + +from cloudinit import helpers +from cloudinit import settings +from cloudinit.sources import DataSourceVultr +from cloudinit.sources.helpers import vultr + +from cloudinit.tests.helpers import mock, CiTestCase + +# Vultr metadata test data +VULTR_V1_1 = { + 'bgp': { + 'ipv4': { + 'my-address': '', + 'my-asn': '', + 'peer-address': '', + 'peer-asn': '' + }, + 'ipv6': { + 'my-address': '', + 'my-asn': '', + 'peer-address': '', + 'peer-asn': '' + } + }, + 'hostname': 'CLOUDINIT_1', + 'instanceid': '42506325', + 'interfaces': [ + { + 'ipv4': { + 'additional': [ + ], + 'address': '108.61.89.242', + 'gateway': '108.61.89.1', + 'netmask': '255.255.255.0' + }, + 'ipv6': { + 'additional': [ + ], + 'address': '2001:19f0:5:56c2:5400:03ff:fe15:c465', + 'network': '2001:19f0:5:56c2::', + 'prefix': '64' + }, + 'mac': '56:00:03:15:c4:65', + 'network-type': 'public' + } + ], + 'public-keys': [ + 'ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key' + ], + 'region': { + 'regioncode': 'EWR' + }, + 'user-defined': [ + ], + 'startup-script': 'echo No configured startup script', + 'raid1-script': '', + 'user-data': [ + ], + 'vendor-data': { + 'vendor-script': '', + 'ethtool-script': '', + 'config': { + 'package_upgrade': 'true', + 'disable_root': 0, + 'ssh_pwauth': 1, + 'chpasswd': { + 'expire': False, + 'list': [ + 'root:$6$S2Smuj.../VqxmIR9Urw0jPZ88i4yvB/' + ] + }, + 'system_info': { + 'default_user': { + 'name': 'root' + } + } + } + } +} + +VULTR_V1_2 = { + 'bgp': { + 'ipv4': { + 'my-address': '', + 'my-asn': '', + 'peer-address': '', + 'peer-asn': '' + }, + 'ipv6': { + 'my-address': '', + 'my-asn': '', + 'peer-address': '', + 'peer-asn': '' + } + }, + 'hostname': 'CLOUDINIT_2', + 'instance-v2-id': '29bea708-2e6e-480a-90ad-0e6b5d5ad62f', + 'instanceid': '42872224', + 'interfaces': [ + { + 'ipv4': { + 'additional': [ + ], + 'address':'45.76.7.171', + 'gateway':'45.76.6.1', + 'netmask':'255.255.254.0' + }, + 'ipv6':{ + 'additional': [ + ], + 'address':'2001:19f0:5:28a7:5400:03ff:fe1b:4eca', + 'network':'2001:19f0:5:28a7::', + 'prefix':'64' + }, + 'mac':'56:00:03:1b:4e:ca', + 'network-type':'public' + }, + { + 'ipv4': { + 'additional': [ + ], + 'address':'10.1.112.3', + 'gateway':'', + 'netmask':'255.255.240.0' + }, + 'ipv6':{ + 'additional': [ + ], + 'network':'', + 'prefix':'' + }, + 'mac':'5a:00:03:1b:4e:ca', + 'network-type':'private', + 'network-v2-id':'fbbe2b5b-b986-4396-87f5-7246660ccb64', + 'networkid':'net5e7155329d730' + } + ], + 'public-keys': [ + 'ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key' + ], + 'region': { + 'regioncode': 'EWR' + }, + 'user-defined': [ + ], + 'startup-script': 'echo No configured startup script', + 'user-data': [ + ], + + 'vendor-data': { + 'vendor-script': '', + 'ethtool-script': '', + 'raid1-script': '', + 'config': { + 'package_upgrade': 'true', + 'disable_root': 0, + 'ssh_pwauth': 1, + 'chpasswd': { + 'expire': False, + 'list': [ + 'root:$6$SxXx...k2mJNIzZB5vMCDBlYT1' + ] + }, + 'system_info': { + 'default_user': { + 'name': 'root' + } + } + } + } +} + +SSH_KEYS_1 = [ + "ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key" +] + +# Expected generated objects + +# Expected config +EXPECTED_VULTR_CONFIG = { + 'package_upgrade': 'true', + 'disable_root': 0, + 'ssh_pwauth': 1, + 'chpasswd': { + 'expire': False, + 'list': [ + 'root:$6$SxXx...k2mJNIzZB5vMCDBlYT1' + ] + }, + 'system_info': { + 'default_user': { + 'name': 'root' + } + } +} + +# Expected network config object from generator +EXPECTED_VULTR_NETWORK_1 = { + 'version': 1, + 'config': [ + { + 'type': 'nameserver', + 'address': ['108.61.10.10'] + }, + { + 'name': 'eth0', + 'type': 'physical', + 'mac_address': '56:00:03:15:c4:65', + 'accept-ra': 1, + 'subnets': [ + {'type': 'dhcp', 'control': 'auto'}, + {'type': 'dhcp6', 'control': 'auto'} + ], + } + ] +} + +EXPECTED_VULTR_NETWORK_2 = { + 'version': 1, + 'config': [ + { + 'type': 'nameserver', + 'address': ['108.61.10.10'] + }, + { + 'name': 'eth0', + 'type': 'physical', + 'mac_address': '56:00:03:1b:4e:ca', + 'accept-ra': 1, + 'subnets': [ + {'type': 'dhcp', 'control': 'auto'}, + {'type': 'dhcp6', 'control': 'auto'} + ], + }, + { + 'name': 'eth1', + 'type': 'physical', + 'mac_address': '5a:00:03:1b:4e:ca', + 'accept-ra': 1, + 'subnets': [ + { + "type": "static", + "control": "auto", + "address": "10.1.112.3", + "netmask": "255.255.240.0" + } + ], + } + ] +} + + +INTERFACE_MAP = { + '56:00:03:15:c4:65': 'eth0', + '56:00:03:1b:4e:ca': 'eth0', + '5a:00:03:1b:4e:ca': 'eth1' +} + + +class TestDataSourceVultr(CiTestCase): + def setUp(self): + super(TestDataSourceVultr, self).setUp() + + # Stored as a dict to make it easier to maintain + raw1 = json.dumps(VULTR_V1_1['vendor-data']['config']) + raw2 = json.dumps(VULTR_V1_2['vendor-data']['config']) + + # Make expected format + VULTR_V1_1['vendor-data']['config'] = raw1 + VULTR_V1_2['vendor-data']['config'] = raw2 + + self.tmp = self.tmp_dir() + + # Test the datasource itself + @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch('cloudinit.sources.helpers.vultr.is_vultr') + @mock.patch('cloudinit.sources.helpers.vultr.get_metadata') + def test_datasource(self, + mock_getmeta, + mock_isvultr, + mock_netmap): + mock_getmeta.return_value = VULTR_V1_2 + mock_isvultr.return_value = True + mock_netmap.return_value = INTERFACE_MAP + + source = DataSourceVultr.DataSourceVultr( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + + # Test for failure + self.assertEqual(True, source._get_data()) + + # Test instance id + self.assertEqual("42872224", source.metadata['instanceid']) + + # Test hostname + self.assertEqual("CLOUDINIT_2", source.metadata['local-hostname']) + + # Test ssh keys + self.assertEqual(SSH_KEYS_1, source.metadata['public-keys']) + + # Test vendor data generation + orig_val = self.maxDiff + self.maxDiff = None + + vendordata = source.vendordata_raw + + # Test vendor config + self.assertEqual( + EXPECTED_VULTR_CONFIG, + json.loads(vendordata[0].replace("#cloud-config", ""))) + + self.maxDiff = orig_val + + # Test network config generation + self.assertEqual(EXPECTED_VULTR_NETWORK_2, source.network_config) + + # Test network config generation + @mock.patch('cloudinit.net.get_interfaces_by_mac') + def test_network_config(self, mock_netmap): + mock_netmap.return_value = INTERFACE_MAP + interf = VULTR_V1_1['interfaces'] + + self.assertEqual(EXPECTED_VULTR_NETWORK_1, + vultr.generate_network_config(interf)) + + # Test Private Networking config generation + @mock.patch('cloudinit.net.get_interfaces_by_mac') + def test_private_network_config(self, mock_netmap): + mock_netmap.return_value = INTERFACE_MAP + interf = VULTR_V1_2['interfaces'] + + self.assertEqual(EXPECTED_VULTR_NETWORK_2, + vultr.generate_network_config(interf)) + +# vi: ts=4 expandtab diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index b39f4198..d6212d1d 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -11,6 +11,7 @@ BirknerAlex candlerb cawamata dankenigsberg +ddymko dermotbradley dhensby eandersson diff --git a/tools/ds-identify b/tools/ds-identify index 2f2486f7..73e27c71 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -124,7 +124,7 @@ DI_DSNAME="" # this has to match the builtin list in cloud-init, it is what will # be searched if there is no setting found in config. DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ -CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \ +CloudSigma CloudStack DigitalOcean Vultr AliYun Ec2 GCE OpenNebula OpenStack \ OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud UpCloud" DI_DSLIST="" DI_MODE="" @@ -1350,6 +1350,20 @@ dscheck_IBMCloud() { return ${DS_NOT_FOUND} } +dscheck_Vultr() { + dmi_sys_vendor_is Vultr && return $DS_FOUND + + case " $DI_KERNEL_CMDLINE " in + *\ vultr\ *) return $DS_FOUND ;; + esac + + if [ -f "${PATH_ROOT}/etc/vultr" ]; then + return $DS_FOUND + fi + + return $DS_NOT_FOUND +} + collect_info() { read_uname_info read_virt -- cgit v1.2.3 From 9f9e154ff54cc37e7d0831c431a9d1279832fc69 Mon Sep 17 00:00:00 2001 From: Anh Vo Date: Tue, 13 Apr 2021 17:39:39 -0400 Subject: azure: Removing ability to invoke walinuxagent (#799) Invoking walinuxagent from within cloud-init is no longer supported/necessary --- cloudinit/sources/DataSourceAzure.py | 137 +++++--------------------- doc/rtd/topics/datasources/azure.rst | 62 ++---------- tests/unittests/test_datasource/test_azure.py | 97 ------------------ 3 files changed, 35 insertions(+), 261 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 6cae9e82..8c7fb021 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -381,53 +381,6 @@ class DataSourceAzure(sources.DataSource): util.logexc(LOG, "handling set_hostname failed") return False - @azure_ds_telemetry_reporter - def get_metadata_from_agent(self): - temp_hostname = self.metadata.get('local-hostname') - agent_cmd = self.ds_cfg['agent_command'] - LOG.debug("Getting metadata via agent. hostname=%s cmd=%s", - temp_hostname, agent_cmd) - - self.bounce_network_with_azure_hostname() - - try: - invoke_agent(agent_cmd) - except subp.ProcessExecutionError: - # claim the datasource even if the command failed - util.logexc(LOG, "agent command '%s' failed.", - self.ds_cfg['agent_command']) - - ddir = self.ds_cfg['data_dir'] - - fp_files = [] - key_value = None - for pk in self.cfg.get('_pubkeys', []): - if pk.get('value', None): - key_value = pk['value'] - LOG.debug("SSH authentication: using value from fabric") - else: - bname = str(pk['fingerprint'] + ".crt") - fp_files += [os.path.join(ddir, bname)] - LOG.debug("SSH authentication: " - "using fingerprint from fabric") - - with events.ReportEventStack( - name="waiting-for-ssh-public-key", - description="wait for agents to retrieve SSH keys", - parent=azure_ds_reporter): - # wait very long for public SSH keys to arrive - # https://bugs.launchpad.net/cloud-init/+bug/1717611 - missing = util.log_time(logfunc=LOG.debug, - msg="waiting for SSH public key files", - func=util.wait_for_files, - args=(fp_files, 900)) - if len(missing): - LOG.warning("Did not find files, but going on: %s", missing) - - metadata = {} - metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) - return metadata - def _get_subplatform(self): """Return the subplatform metadata source details.""" if self.seed.startswith('/dev'): @@ -1354,35 +1307,32 @@ class DataSourceAzure(sources.DataSource): On failure, returns False. """ - if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN: - self.bounce_network_with_azure_hostname() + self.bounce_network_with_azure_hostname() - pubkey_info = None - try: - raise KeyError( - "Not using public SSH keys from IMDS" - ) - # pylint:disable=unreachable - public_keys = self.metadata['imds']['compute']['publicKeys'] - LOG.debug( - 'Successfully retrieved %s key(s) from IMDS', - len(public_keys) - if public_keys is not None - else 0 - ) - except KeyError: - LOG.debug( - 'Unable to retrieve SSH keys from IMDS during ' - 'negotiation, falling back to OVF' - ) - pubkey_info = self.cfg.get('_pubkeys', None) - - metadata_func = partial(get_metadata_from_fabric, - fallback_lease_file=self. - dhclient_lease_file, - pubkey_info=pubkey_info) - else: - metadata_func = self.get_metadata_from_agent + pubkey_info = None + try: + raise KeyError( + "Not using public SSH keys from IMDS" + ) + # pylint:disable=unreachable + public_keys = self.metadata['imds']['compute']['publicKeys'] + LOG.debug( + 'Successfully retrieved %s key(s) from IMDS', + len(public_keys) + if public_keys is not None + else 0 + ) + except KeyError: + LOG.debug( + 'Unable to retrieve SSH keys from IMDS during ' + 'negotiation, falling back to OVF' + ) + pubkey_info = self.cfg.get('_pubkeys', None) + + metadata_func = partial(get_metadata_from_fabric, + fallback_lease_file=self. + dhclient_lease_file, + pubkey_info=pubkey_info) LOG.debug("negotiating with fabric via agent command %s", self.ds_cfg['agent_command']) @@ -1617,33 +1567,6 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname): return True -@azure_ds_telemetry_reporter -def crtfile_to_pubkey(fname, data=None): - pipeline = ('openssl x509 -noout -pubkey < "$0" |' - 'ssh-keygen -i -m PKCS8 -f /dev/stdin') - (out, _err) = subp.subp(['sh', '-c', pipeline, fname], - capture=True, data=data) - return out.rstrip() - - -@azure_ds_telemetry_reporter -def pubkeys_from_crt_files(flist): - pubkeys = [] - errors = [] - for fname in flist: - try: - pubkeys.append(crtfile_to_pubkey(fname)) - except subp.ProcessExecutionError: - errors.append(fname) - - if errors: - report_diagnostic_event( - "failed to convert the crt files to pubkey: %s" % errors, - logger_func=LOG.warning) - - return pubkeys - - @azure_ds_telemetry_reporter def write_files(datadir, files, dirmode=None): @@ -1672,16 +1595,6 @@ def write_files(datadir, files, dirmode=None): util.write_file(filename=fname, content=content, mode=0o600) -@azure_ds_telemetry_reporter -def invoke_agent(cmd): - # this is a function itself to simplify patching it for test - if cmd: - LOG.debug("invoking agent: %s", cmd) - subp.subp(cmd, shell=(not isinstance(cmd, list))) - else: - LOG.debug("not invoking agent") - - def find_child(node, filter_func): ret = [] if not node.hasChildNodes(): diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst index e04c3a33..ad9f2236 100644 --- a/doc/rtd/topics/datasources/azure.rst +++ b/doc/rtd/topics/datasources/azure.rst @@ -5,28 +5,6 @@ Azure This datasource finds metadata and user-data from the Azure cloud platform. -walinuxagent ------------- -walinuxagent has several functions within images. For cloud-init -specifically, the relevant functionality it performs is to register the -instance with the Azure cloud platform at boot so networking will be -permitted. For more information about the other functionality of -walinuxagent, see `Azure's documentation -`_ for more details. -(Note, however, that only one of walinuxagent's provisioning and cloud-init -should be used to perform instance customisation.) - -If you are configuring walinuxagent yourself, you will want to ensure that you -have `Provisioning.UseCloudInit -`_ set to -``y``. - - -Builtin Agent -------------- -An alternative to using walinuxagent to register to the Azure cloud platform -is to use the ``__builtin__`` agent command. This section contains more -background on what that code path does, and how to enable it. The Azure cloud platform provides initial data to an instance via an attached CD formatted in UDF. That CD contains a 'ovf-env.xml' file that provides some @@ -41,16 +19,6 @@ by calling a script in /etc/dhcp/dhclient-exit-hooks or a file in 'dhclient_hook' of cloud-init itself. This sub-command will write the client information in json format to /run/cloud-init/dhclient.hook/.json. -In order for cloud-init to leverage this method to find the endpoint, the -cloud.cfg file must contain: - -.. sourcecode:: yaml - - datasource: - Azure: - set_hostname: False - agent_command: __builtin__ - If those files are not available, the fallback is to check the leases file for the endpoint server (again option 245). @@ -83,9 +51,6 @@ configuration (in ``/etc/cloud/cloud.cfg`` or ``/etc/cloud/cloud.cfg.d/``). The settings that may be configured are: - * **agent_command**: Either __builtin__ (default) or a command to run to getcw - metadata. If __builtin__, get metadata from walinuxagent. Otherwise run the - provided command to obtain metadata. * **apply_network_config**: Boolean set to True to use network configuration described by Azure's IMDS endpoint instead of fallback network config of dhcp on eth0. Default is True. For Ubuntu 16.04 or earlier, default is @@ -121,7 +86,6 @@ An example configuration with the default values is provided below: datasource: Azure: - agent_command: __builtin__ apply_network_config: true data_dir: /var/lib/waagent dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases @@ -144,9 +108,7 @@ child of the ``LinuxProvisioningConfigurationSet`` (a sibling to ``UserName``) If both ``UserData`` and ``CustomData`` are provided behavior is undefined on which will be selected. -In the example below, user-data provided is 'this is my userdata', and the -datasource config provided is ``{"agent_command": ["start", "walinuxagent"]}``. -That agent command will take affect as if it were specified in system config. +In the example below, user-data provided is 'this is my userdata' Example: @@ -184,20 +146,16 @@ The hostname is provided to the instance in the ovf-env.xml file as Whatever value the instance provides in its dhcp request will resolve in the domain returned in the 'search' request. -The interesting issue is that a generic image will already have a hostname -configured. The ubuntu cloud images have 'ubuntu' as the hostname of the -system, and the initial dhcp request on eth0 is not guaranteed to occur after -the datasource code has been run. So, on first boot, that initial value will -be sent in the dhcp request and *that* value will resolve. - -In order to make the ``HostName`` provided in the ovf-env.xml resolve, a -dhcp request must be made with the new value. Walinuxagent (in its current -version) handles this by polling the state of hostname and bouncing ('``ifdown -eth0; ifup eth0``' the network interface if it sees that a change has been -made. +A generic image will already have a hostname configured. The ubuntu +cloud images have 'ubuntu' as the hostname of the system, and the +initial dhcp request on eth0 is not guaranteed to occur after the +datasource code has been run. So, on first boot, that initial value +will be sent in the dhcp request and *that* value will resolve. -cloud-init handles this by setting the hostname in the DataSource's 'get_data' -method via '``hostname $HostName``', and then bouncing the interface. This +In order to make the ``HostName`` provided in the ovf-env.xml resolve, +a dhcp request must be made with the new value. cloud-init handles +this by setting the hostname in the DataSource's 'get_data' method via +'``hostname $HostName``', and then bouncing the interface. This behavior can be configured or disabled in the datasource config. See 'Configuration' above. diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index dedebeb1..320fa857 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -638,17 +638,10 @@ scbus-1 on xpt0 bus 0 def dsdevs(): return data.get('dsdevs', []) - def _invoke_agent(cmd): - data['agent_invoked'] = cmd - def _wait_for_files(flist, _maxwait=None, _naplen=None): data['waited'] = flist return [] - def _pubkeys_from_crt_files(flist): - data['pubkey_files'] = flist - return ["pubkey_from: %s" % f for f in flist] - if data.get('ovfcontent') is not None: populate_dir(os.path.join(self.paths.seed_dir, "azure"), {'ovf-env.xml': data['ovfcontent']}) @@ -675,8 +668,6 @@ scbus-1 on xpt0 bus 0 self.apply_patches([ (dsaz, 'list_possible_azure_ds_devs', dsdevs), - (dsaz, 'invoke_agent', _invoke_agent), - (dsaz, 'pubkeys_from_crt_files', _pubkeys_from_crt_files), (dsaz, 'perform_hostname_bounce', mock.MagicMock()), (dsaz, 'get_hostname', mock.MagicMock()), (dsaz, 'set_hostname', mock.MagicMock()), @@ -765,7 +756,6 @@ scbus-1 on xpt0 bus 0 ret = dsrc.get_data() self.m_is_platform_viable.assert_called_with(dsrc.seed_dir) self.assertFalse(ret) - self.assertNotIn('agent_invoked', data) # Assert that for non viable platforms, # there is no communication with the Azure datasource. self.assertEqual( @@ -789,7 +779,6 @@ scbus-1 on xpt0 bus 0 ret = dsrc.get_data() self.m_is_platform_viable.assert_called_with(dsrc.seed_dir) self.assertFalse(ret) - self.assertNotIn('agent_invoked', data) self.assertEqual( 1, m_report_failure.call_count) @@ -806,7 +795,6 @@ scbus-1 on xpt0 bus 0 1, m_crawl_metadata.call_count) self.assertFalse(ret) - self.assertNotIn('agent_invoked', data) def test_crawl_metadata_exception_should_report_failure_with_msg(self): data = {} @@ -1086,21 +1074,6 @@ scbus-1 on xpt0 bus 0 self.assertTrue(os.path.isdir(self.waagent_d)) self.assertEqual(stat.S_IMODE(os.stat(self.waagent_d).st_mode), 0o700) - def test_user_cfg_set_agent_command_plain(self): - # set dscfg in via plaintext - # we must have friendly-to-xml formatted plaintext in yaml_cfg - # not all plaintext is expected to work. - yaml_cfg = "{agent_command: my_command}\n" - cfg = yaml.safe_load(yaml_cfg) - odata = {'HostName': "myhost", 'UserName': "myuser", - 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} - - dsrc = self._get_ds(data) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(data['agent_invoked'], cfg['agent_command']) - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', return_value=None) def test_network_config_set_from_imds(self, m_driver): @@ -1205,29 +1178,6 @@ scbus-1 on xpt0 bus 0 dsrc.get_data() self.assertEqual('eastus2', dsrc.region) - def test_user_cfg_set_agent_command(self): - # set dscfg in via base64 encoded yaml - cfg = {'agent_command': "my_command"} - odata = {'HostName': "myhost", 'UserName': "myuser", - 'dscfg': {'text': b64e(yaml.dump(cfg)), - 'encoding': 'base64'}} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} - - dsrc = self._get_ds(data) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(data['agent_invoked'], cfg['agent_command']) - - def test_sys_cfg_set_agent_command(self): - sys_cfg = {'datasource': {'Azure': {'agent_command': '_COMMAND'}}} - data = {'ovfcontent': construct_valid_ovf_env(data={}), - 'sys_cfg': sys_cfg} - - dsrc = self._get_ds(data) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(data['agent_invoked'], '_COMMAND') - def test_sys_cfg_set_never_destroy_ntfs(self): sys_cfg = {'datasource': {'Azure': { 'never_destroy_ntfs': 'user-supplied-value'}}} @@ -1311,51 +1261,6 @@ scbus-1 on xpt0 bus 0 self.assertTrue(ret) self.assertEqual(dsrc.userdata_raw, mydata.encode('utf-8')) - def test_cfg_has_pubkeys_fingerprint(self): - odata = {'HostName': "myhost", 'UserName': "myuser"} - mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}] - pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] - data = {'ovfcontent': construct_valid_ovf_env(data=odata, - pubkeys=pubkeys)} - - dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - for mypk in mypklist: - self.assertIn(mypk, dsrc.cfg['_pubkeys']) - self.assertIn('pubkey_from', dsrc.metadata['public-keys'][-1]) - - def test_cfg_has_pubkeys_value(self): - # make sure that provided key is used over fingerprint - odata = {'HostName': "myhost", 'UserName': "myuser"} - mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': 'value1'}] - pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] - data = {'ovfcontent': construct_valid_ovf_env(data=odata, - pubkeys=pubkeys)} - - dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - - for mypk in mypklist: - self.assertIn(mypk, dsrc.cfg['_pubkeys']) - self.assertIn(mypk['value'], dsrc.metadata['public-keys']) - - def test_cfg_has_no_fingerprint_has_value(self): - # test value is used when fingerprint not provided - odata = {'HostName': "myhost", 'UserName': "myuser"} - mypklist = [{'fingerprint': None, 'path': 'path1', 'value': 'value1'}] - pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] - data = {'ovfcontent': construct_valid_ovf_env(data=odata, - pubkeys=pubkeys)} - - dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - - for mypk in mypklist: - self.assertIn(mypk['value'], dsrc.metadata['public-keys']) - def test_default_ephemeral_configs_ephemeral_exists(self): # make sure the ephemeral configs are correct if disk present odata = {} @@ -1919,8 +1824,6 @@ class TestAzureBounce(CiTestCase): with_logs = True def mock_out_azure_moving_parts(self): - self.patches.enter_context( - mock.patch.object(dsaz, 'invoke_agent')) self.patches.enter_context( mock.patch.object(dsaz.util, 'wait_for_files')) self.patches.enter_context( -- cgit v1.2.3 From cc16c9224681c9a60c2be5c52cff45aa17a8c010 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Wed, 14 Apr 2021 14:35:32 -0500 Subject: doc: Replace remaining references to user-scripts as a config module (#866) git-grep showed a few more locations where we refer to a "user-scripts" config module which is really cc_scripts_user module. Replace these references with slightly different language so as not to confuse future me when looking for "user-scripts" vs. "scripts-user" --- cloudinit/config/cc_rightscale_userdata.py | 2 +- doc/rtd/topics/boot.rst | 2 +- doc/rtd/topics/vendordata.rst | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py index a5aca038..c75dc57d 100644 --- a/cloudinit/config/cc_rightscale_userdata.py +++ b/cloudinit/config/cc_rightscale_userdata.py @@ -44,7 +44,7 @@ user scripts configuration directory, to be run later by ``cc_scripts_user``. # - read the blob of data from raw user data, and parse it as key/value # - for each key that is found, download the content to # the local instance/scripts directory and set them executable. -# - the files in that directory will be run by the user-scripts module +# - the files in that directory will be run by the scripts-user module # Therefore, this must run before that. # # diff --git a/doc/rtd/topics/boot.rst b/doc/rtd/topics/boot.rst index a5282e35..f7dfcd3a 100644 --- a/doc/rtd/topics/boot.rst +++ b/doc/rtd/topics/boot.rst @@ -150,7 +150,7 @@ Things that run here include * package installations * configuration management plugins (puppet, chef, salt-minion) - * user-scripts (i.e. shell scripts passed as user-data) + * user-defined scripts (i.e. shell scripts passed as user-data) For scripts external to cloud-init looking to wait until cloud-init is finished, the ``cloud-init status`` subcommand can help block external diff --git a/doc/rtd/topics/vendordata.rst b/doc/rtd/topics/vendordata.rst index cdb552d0..87a899b3 100644 --- a/doc/rtd/topics/vendordata.rst +++ b/doc/rtd/topics/vendordata.rst @@ -47,8 +47,8 @@ way as user-data. The only differences are: - * user-scripts are stored in a different location than user-scripts (to - avoid namespace collision) + * vendor-data-defined scripts are stored in a different location than + user-data-defined scripts (to avoid namespace collision) * user can disable part handlers by cloud-config settings. For example, to disable handling of 'part-handlers' in vendor-data, the user could provide user-data like this: -- cgit v1.2.3 From 45db197cfc7e3488baae7dc1053c45da070248f6 Mon Sep 17 00:00:00 2001 From: hamalq <81582959+hamalq@users.noreply.github.com> Date: Thu, 15 Apr 2021 16:45:12 -0700 Subject: add prefer_fqdn_over_hostname config option (#859) the above option allows the user to control the behavior of a distro hostname selection if both short hostname and FQDN are supplied. If `prefer_fqdn_over_hostname` is true the FQDN will be selected as hostname; if false the hostname will be selected LP: #1921004 --- cloudinit/config/cc_set_hostname.py | 14 ++++- cloudinit/config/cc_update_hostname.py | 8 +++ cloudinit/distros/__init__.py | 7 +++ cloudinit/distros/freebsd.py | 7 +-- cloudinit/distros/rhel.py | 11 ++-- .../integration_tests/modules/test_set_hostname.py | 17 ++++++ .../test_handler/test_handler_set_hostname.py | 69 +++++++++++++++++----- 7 files changed, 105 insertions(+), 28 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py index d4017478..5a59dc32 100644 --- a/cloudinit/config/cc_set_hostname.py +++ b/cloudinit/config/cc_set_hostname.py @@ -19,7 +19,10 @@ A hostname and fqdn can be provided by specifying a full domain name under the key, and the fqdn of the cloud wil be used. If a fqdn specified with the ``hostname`` key, it will be handled properly, although it is better to use the ``fqdn`` config key. If both ``fqdn`` and ``hostname`` are set, -it is distro dependent whether ``hostname`` or ``fqdn`` is used. +it is distro dependent whether ``hostname`` or ``fqdn`` is used, +unless the ``prefer_fqdn_over_hostname`` option is true and fqdn is set +it will force the use of FQDN in all distros, and if false then it will +force the hostname use. This module will run in the init-local stage before networking is configured if the hostname is set by metadata or user data on the local system. @@ -38,6 +41,7 @@ based on initial hostname. **Config keys**:: preserve_hostname: + prefer_fqdn_over_hostname: fqdn: hostname: """ @@ -62,6 +66,14 @@ def handle(name, cfg, cloud, log, _args): log.debug(("Configuration option 'preserve_hostname' is set," " not setting the hostname in module %s"), name) return + + # Set prefer_fqdn_over_hostname value in distro + hostname_fqdn = util.get_cfg_option_bool(cfg, + "prefer_fqdn_over_hostname", + None) + if hostname_fqdn is not None: + cloud.distro.set_option('prefer_fqdn_over_hostname', hostname_fqdn) + (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) # Check for previous successful invocation of set-hostname diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py index d5f4eb5a..f4120356 100644 --- a/cloudinit/config/cc_update_hostname.py +++ b/cloudinit/config/cc_update_hostname.py @@ -27,6 +27,7 @@ is set, then the hostname will not be altered. **Config keys**:: preserve_hostname: + prefer_fqdn_over_hostname: fqdn: hostname: """ @@ -45,6 +46,13 @@ def handle(name, cfg, cloud, log, _args): " not updating the hostname in module %s"), name) return + # Set prefer_fqdn_over_hostname value in distro + hostname_fqdn = util.get_cfg_option_bool(cfg, + "prefer_fqdn_over_hostname", + None) + if hostname_fqdn is not None: + cloud.distro.set_option('prefer_fqdn_over_hostname', hostname_fqdn) + (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) try: prev_fn = os.path.join(cloud.get_cpath('data'), "previous-hostname") diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 220bd11f..8b8a647d 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -79,6 +79,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): shutdown_options_map = {'halt': '-H', 'poweroff': '-P', 'reboot': '-r'} _ci_pkl_version = 1 + prefer_fqdn = False def __init__(self, name, cfg, paths): self._paths = paths @@ -131,6 +132,9 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): def get_option(self, opt_name, default=None): return self._cfg.get(opt_name, default) + def set_option(self, opt_name, value=None): + self._cfg[opt_name] = value + def set_hostname(self, hostname, fqdn=None): writeable_hostname = self._select_hostname(hostname, fqdn) self._write_hostname(writeable_hostname, self.hostname_conf_fn) @@ -259,6 +263,9 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): def _select_hostname(self, hostname, fqdn): # Prefer the short hostname over the long # fully qualified domain name + if util.get_cfg_option_bool(self._cfg, "prefer_fqdn_over_hostname", + self.prefer_fqdn) and fqdn: + return fqdn if not hostname: return fqdn return hostname diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index dde34d41..9659843f 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -27,12 +27,7 @@ class Distro(cloudinit.distros.bsd.BSD): pkg_cmd_remove_prefix = ["pkg", "remove"] pkg_cmd_update_prefix = ["pkg", "update"] pkg_cmd_upgrade_prefix = ["pkg", "upgrade"] - - def _select_hostname(self, hostname, fqdn): - # Should be FQDN if available. See rc.conf(5) in FreeBSD - if fqdn: - return fqdn - return hostname + prefer_fqdn = True # See rc.conf(5) in FreeBSD def _get_add_member_to_group_cmd(self, member_name, group_name): return ['pw', 'usermod', '-n', member_name, '-G', group_name] diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index c72f7c17..0c00a531 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -50,6 +50,10 @@ class Distro(distros.Distro): } } + # Should be fqdn if we can use it + # See: https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/5/html/deployment_guide/ch-sysconfig # noqa: E501 + prefer_fqdn = True + def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) # This will be used to restrict certain @@ -91,13 +95,6 @@ class Distro(distros.Distro): } rhel_util.update_sysconfig_file(out_fn, host_cfg) - def _select_hostname(self, hostname, fqdn): - # Should be fqdn if we can use it - # See: https://www.centos.org/docs/5/html/Deployment_Guide-en-US/ch-sysconfig.html#s2-sysconfig-network # noqa - if fqdn: - return fqdn - return hostname - def _read_system_hostname(self): if self.uses_systemd(): host_fn = self.systemd_hostname_conf_fn diff --git a/tests/integration_tests/modules/test_set_hostname.py b/tests/integration_tests/modules/test_set_hostname.py index 2bfa403d..e7f7f6b6 100644 --- a/tests/integration_tests/modules/test_set_hostname.py +++ b/tests/integration_tests/modules/test_set_hostname.py @@ -24,6 +24,13 @@ hostname: cloudinit1 fqdn: cloudinit2.i9n.cloud-init.io """ +USER_DATA_PREFER_FQDN = """\ +#cloud-config +prefer_fqdn_over_hostname: {} +hostname: cloudinit1 +fqdn: cloudinit2.test.io +""" + @pytest.mark.ci class TestHostname: @@ -33,6 +40,16 @@ class TestHostname: hostname_output = client.execute("hostname") assert "cloudinit2" in hostname_output.strip() + @pytest.mark.user_data(USER_DATA_PREFER_FQDN.format(True)) + def test_prefer_fqdn(self, client): + hostname_output = client.execute("hostname") + assert "cloudinit2.test.io" in hostname_output.strip() + + @pytest.mark.user_data(USER_DATA_PREFER_FQDN.format(False)) + def test_prefer_short_hostname(self, client): + hostname_output = client.execute("hostname") + assert "cloudinit1" in hostname_output.strip() + @pytest.mark.user_data(USER_DATA_FQDN) def test_hostname_and_fqdn(self, client): hostname_output = client.execute("hostname") diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/test_handler/test_handler_set_hostname.py index 58abf51a..73641b70 100644 --- a/tests/unittests/test_handler/test_handler_set_hostname.py +++ b/tests/unittests/test_handler/test_handler_set_hostname.py @@ -15,6 +15,7 @@ import os import shutil import tempfile from io import BytesIO +from unittest import mock LOG = logging.getLogger(__name__) @@ -29,14 +30,53 @@ class TestHostname(t_help.FilesystemMockingTestCase): util.ensure_dir(os.path.join(self.tmp, 'data')) self.addCleanup(shutil.rmtree, self.tmp) - def _fetch_distro(self, kind): + def _fetch_distro(self, kind, conf=None): cls = distros.fetch(kind) paths = helpers.Paths({'cloud_dir': self.tmp}) - return cls(kind, {}, paths) + conf = {} if conf is None else conf + return cls(kind, conf, paths) - def test_write_hostname_rhel(self): + def test_debian_write_hostname_prefer_fqdn(self): cfg = { - 'hostname': 'blah.blah.blah.yahoo.com', + 'hostname': 'blah', + 'prefer_fqdn_over_hostname': True, + 'fqdn': 'blah.yahoo.com', + } + distro = self._fetch_distro('debian', cfg) + paths = helpers.Paths({'cloud_dir': self.tmp}) + ds = None + cc = cloud.Cloud(ds, paths, {}, distro, None) + self.patchUtils(self.tmp) + cc_set_hostname.handle('cc_set_hostname', + cfg, cc, LOG, []) + contents = util.load_file("/etc/hostname") + self.assertEqual('blah.yahoo.com', contents.strip()) + + @mock.patch('cloudinit.distros.Distro.uses_systemd', return_value=False) + def test_rhel_write_hostname_prefer_hostname(self, m_uses_systemd): + cfg = { + 'hostname': 'blah', + 'prefer_fqdn_over_hostname': False, + 'fqdn': 'blah.yahoo.com', + } + distro = self._fetch_distro('rhel', cfg) + paths = helpers.Paths({'cloud_dir': self.tmp}) + ds = None + cc = cloud.Cloud(ds, paths, {}, distro, None) + self.patchUtils(self.tmp) + cc_set_hostname.handle('cc_set_hostname', + cfg, cc, LOG, []) + contents = util.load_file("/etc/sysconfig/network", decode=False) + n_cfg = ConfigObj(BytesIO(contents)) + self.assertEqual( + {'HOSTNAME': 'blah'}, + dict(n_cfg)) + + @mock.patch('cloudinit.distros.Distro.uses_systemd', return_value=False) + def test_write_hostname_rhel(self, m_uses_systemd): + cfg = { + 'hostname': 'blah', + 'fqdn': 'blah.blah.blah.yahoo.com' } distro = self._fetch_distro('rhel') paths = helpers.Paths({'cloud_dir': self.tmp}) @@ -45,15 +85,16 @@ class TestHostname(t_help.FilesystemMockingTestCase): self.patchUtils(self.tmp) cc_set_hostname.handle('cc_set_hostname', cfg, cc, LOG, []) - if not distro.uses_systemd(): - contents = util.load_file("/etc/sysconfig/network", decode=False) - n_cfg = ConfigObj(BytesIO(contents)) - self.assertEqual({'HOSTNAME': 'blah.blah.blah.yahoo.com'}, - dict(n_cfg)) + contents = util.load_file("/etc/sysconfig/network", decode=False) + n_cfg = ConfigObj(BytesIO(contents)) + self.assertEqual( + {'HOSTNAME': 'blah.blah.blah.yahoo.com'}, + dict(n_cfg)) def test_write_hostname_debian(self): cfg = { - 'hostname': 'blah.blah.blah.yahoo.com', + 'hostname': 'blah', + 'fqdn': 'blah.blah.blah.yahoo.com', } distro = self._fetch_distro('debian') paths = helpers.Paths({'cloud_dir': self.tmp}) @@ -65,7 +106,8 @@ class TestHostname(t_help.FilesystemMockingTestCase): contents = util.load_file("/etc/hostname") self.assertEqual('blah', contents.strip()) - def test_write_hostname_sles(self): + @mock.patch('cloudinit.distros.Distro.uses_systemd', return_value=False) + def test_write_hostname_sles(self, m_uses_systemd): cfg = { 'hostname': 'blah.blah.blah.suse.com', } @@ -75,9 +117,8 @@ class TestHostname(t_help.FilesystemMockingTestCase): cc = cloud.Cloud(ds, paths, {}, distro, None) self.patchUtils(self.tmp) cc_set_hostname.handle('cc_set_hostname', cfg, cc, LOG, []) - if not distro.uses_systemd(): - contents = util.load_file(distro.hostname_conf_fn) - self.assertEqual('blah', contents.strip()) + contents = util.load_file(distro.hostname_conf_fn) + self.assertEqual('blah', contents.strip()) def test_multiple_calls_skips_unchanged_hostname(self): """Only new hostname or fqdn values will generate a hostname call.""" -- cgit v1.2.3 From d132356cc361abef2d90d4073438f3ab759d5964 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 19 Apr 2021 11:31:28 -0500 Subject: fix error on upgrade caused by new vendordata2 attributes (#869) In #777, we added 'vendordata2' and 'vendordata2_raw' attributes to the DataSource class, but didn't use the upgrade framework to deal with an unpickle after upgrade. This commit adds the necessary upgrade code. Additionally, added a smaller-scope upgrade test to our integration tests that will be run on every CI run so we catch these issues immediately in the future. LP: #1922739 --- cloudinit/sources/__init__.py | 12 +++++++++++- cloudinit/tests/test_upgrade.py | 4 ++++ tests/integration_tests/clouds.py | 4 ++-- tests/integration_tests/test_upgrade.py | 25 ++++++++++++++++++++++++- 4 files changed, 41 insertions(+), 4 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 1ad1880d..7d74f8d9 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -24,6 +24,7 @@ from cloudinit import util from cloudinit.atomic_helper import write_json from cloudinit.event import EventType from cloudinit.filters import launch_index +from cloudinit.persistence import CloudInitPickleMixin from cloudinit.reporting import events DSMODE_DISABLED = "disabled" @@ -134,7 +135,7 @@ URLParams = namedtuple( 'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries']) -class DataSource(metaclass=abc.ABCMeta): +class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): dsmode = DSMODE_NETWORK default_locale = 'en_US.UTF-8' @@ -196,6 +197,8 @@ class DataSource(metaclass=abc.ABCMeta): # non-root users sensitive_metadata_keys = ('merged_cfg', 'security-credentials',) + _ci_pkl_version = 1 + def __init__(self, sys_cfg, distro, paths, ud_proc=None): self.sys_cfg = sys_cfg self.distro = distro @@ -218,6 +221,13 @@ class DataSource(metaclass=abc.ABCMeta): else: self.ud_proc = ud_proc + def _unpickle(self, ci_pkl_version: int) -> None: + """Perform deserialization fixes for Paths.""" + if not hasattr(self, 'vendordata2'): + self.vendordata2 = None + if not hasattr(self, 'vendordata2_raw'): + self.vendordata2_raw = None + def __str__(self): return type_utils.obj_name(self) diff --git a/cloudinit/tests/test_upgrade.py b/cloudinit/tests/test_upgrade.py index 05eefd2a..da3ab23b 100644 --- a/cloudinit/tests/test_upgrade.py +++ b/cloudinit/tests/test_upgrade.py @@ -46,3 +46,7 @@ class TestUpgrade: def test_paths_has_run_dir_attribute(self, previous_obj_pkl): assert previous_obj_pkl.paths.run_dir is not None + + def test_vendordata_exists(self, previous_obj_pkl): + assert previous_obj_pkl.vendordata2 is None + assert previous_obj_pkl.vendordata2_raw is None diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py index 11b57407..3bbccb44 100644 --- a/tests/integration_tests/clouds.py +++ b/tests/integration_tests/clouds.py @@ -111,14 +111,14 @@ class IntegrationCloud(ABC): # Even if we're using the default key, it may still have a # different name in the clouds, so we need to set it separately. self.cloud_instance.key_pair.name = settings.KEYPAIR_NAME - self._released_image_id = self._get_initial_image() + self.released_image_id = self._get_initial_image() self.snapshot_id = None @property def image_id(self): if self.snapshot_id: return self.snapshot_id - return self._released_image_id + return self.released_image_id def emit_settings_to_log(self) -> None: log.info( diff --git a/tests/integration_tests/test_upgrade.py b/tests/integration_tests/test_upgrade.py index c20cb3c1..48e0691b 100644 --- a/tests/integration_tests/test_upgrade.py +++ b/tests/integration_tests/test_upgrade.py @@ -1,4 +1,5 @@ import logging +import os import pytest import time from pathlib import Path @@ -8,6 +9,8 @@ from tests.integration_tests.conftest import ( get_validated_source, session_start_time, ) +from tests.integration_tests.instances import CloudInitSource + log = logging.getLogger('integration_testing') @@ -63,7 +66,7 @@ def test_upgrade(session_cloud: IntegrationCloud): return # type checking doesn't understand that skip raises launch_kwargs = { - 'image_id': session_cloud._get_initial_image(), + 'image_id': session_cloud.released_image_id, } image = ImageSpecification.from_os_image() @@ -93,6 +96,26 @@ def test_upgrade(session_cloud: IntegrationCloud): instance.install_new_cloud_init(source, take_snapshot=False) instance.execute('hostname something-else') _restart(instance) + assert instance.execute('cloud-init status --wait --long').ok _output_to_compare(instance, after_path, netcfg_path) log.info('Wrote upgrade test logs to %s and %s', before_path, after_path) + + +@pytest.mark.ci +@pytest.mark.ubuntu +def test_upgrade_package(session_cloud: IntegrationCloud): + if get_validated_source(session_cloud) != CloudInitSource.DEB_PACKAGE: + not_run_message = 'Test only supports upgrading to build deb' + if os.environ.get('TRAVIS'): + # If this isn't running on CI, we should know + pytest.fail(not_run_message) + else: + pytest.skip(not_run_message) + + launch_kwargs = {'image_id': session_cloud.released_image_id} + + with session_cloud.launch(launch_kwargs=launch_kwargs) as instance: + instance.install_deb() + instance.restart() + assert instance.execute('cloud-init status --wait --long').ok -- cgit v1.2.3 From ced836e69274af905bbc1e5f5fde71de4066c86c Mon Sep 17 00:00:00 2001 From: Nicolas Bock Date: Thu, 22 Apr 2021 12:22:54 -0600 Subject: Use `partprobe` to re-read partition table if available (#856) The blkdev command is fragile re-reading partition tables if a partition is mounted. This change instead uses the partprobe if it is available. LP: #1920939 --- cloudinit/config/cc_disk_setup.py | 13 +++++++++---- tools/.github-cla-signers | 1 + 2 files changed, 10 insertions(+), 4 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index d1200694..a582924b 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -113,6 +113,7 @@ SGDISK_CMD = subp.which("sgdisk") LSBLK_CMD = subp.which("lsblk") BLKID_CMD = subp.which("blkid") BLKDEV_CMD = subp.which("blockdev") +PARTPROBE_CMD = subp.which("partprobe") WIPEFS_CMD = subp.which("wipefs") LANG_C_ENV = {'LANG': 'C'} @@ -685,13 +686,17 @@ def get_partition_layout(table_type, size, layout): def read_parttbl(device): """ - Use partprobe instead of 'udevadm'. Partprobe is the only - reliable way to probe the partition table. + Use `partprobe` or `blkdev` instead of `udevadm`. `Partprobe` is + preferred over `blkdev` since it is more reliably able to probe + the partition table. """ - blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device] + if PARTPROBE_CMD is not None: + probe_cmd = [PARTPROBE_CMD, device] + else: + probe_cmd = [BLKDEV_CMD, '--rereadpt', device] util.udevadm_settle() try: - subp.subp(blkdev_cmd) + subp.subp(probe_cmd) except Exception as e: util.logexc(LOG, "Failed reading the partition table %s" % e) diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index d6212d1d..38e1ad74 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -31,6 +31,7 @@ manuelisimo marlluslustosa matthewruffell mitechie +nicolasbock nishigori olivierlemasle omBratteng -- cgit v1.2.3 From d5cca27a56145a5eb3d2ebad6749989b2fb7dcd3 Mon Sep 17 00:00:00 2001 From: Anh Vo Date: Fri, 23 Apr 2021 10:18:05 -0400 Subject: Azure: eject the provisioning iso before reporting ready (#861) Due to hyper-v implementations, iso ejection is more efficient if performed from within the guest. The code will attempt to perform a best-effort ejection. Failure during ejection will not prevent reporting ready from happening. If iso ejection is successful, later iso ejection from the platform will be a no-op. In the event the iso ejection from the guest fails, iso ejection will still happen at the platform level. --- cloudinit/sources/DataSourceAzure.py | 22 ++++++++++++++++++--- cloudinit/sources/helpers/azure.py | 23 +++++++++++++++++++--- .../unittests/test_datasource/test_azure_helper.py | 13 ++++++++++-- 3 files changed, 50 insertions(+), 8 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 8c7fb021..eac6405a 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -332,6 +332,7 @@ class DataSourceAzure(sources.DataSource): dsname = 'Azure' _negotiated = False _metadata_imds = sources.UNSET + _ci_pkl_version = 1 def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -346,8 +347,13 @@ class DataSourceAzure(sources.DataSource): # Regenerate network config new_instance boot and every boot self.update_events['network'].add(EventType.BOOT) self._ephemeral_dhcp_ctx = None - self.failed_desired_api_version = False + self.iso_dev = None + + def _unpickle(self, ci_pkl_version: int) -> None: + super()._unpickle(ci_pkl_version) + if "iso_dev" not in self.__dict__: + self.iso_dev = None def __str__(self): root = sources.DataSource.__str__(self) @@ -459,6 +465,13 @@ class DataSourceAzure(sources.DataSource): '%s was not mountable' % cdev, logger_func=LOG.warning) continue + report_diagnostic_event("Found provisioning metadata in %s" % cdev, + logger_func=LOG.debug) + + # save the iso device for ejection before reporting ready + if cdev.startswith("/dev"): + self.iso_dev = cdev + perform_reprovision = reprovision or self._should_reprovision(ret) perform_reprovision_after_nic_attach = ( reprovision_after_nic_attach or @@ -1226,7 +1239,9 @@ class DataSourceAzure(sources.DataSource): @return: The success status of sending the ready signal. """ try: - get_metadata_from_fabric(None, lease['unknown-245']) + get_metadata_from_fabric(fallback_lease_file=None, + dhcp_opts=lease['unknown-245'], + iso_dev=self.iso_dev) return True except Exception as e: report_diagnostic_event( @@ -1332,7 +1347,8 @@ class DataSourceAzure(sources.DataSource): metadata_func = partial(get_metadata_from_fabric, fallback_lease_file=self. dhclient_lease_file, - pubkey_info=pubkey_info) + pubkey_info=pubkey_info, + iso_dev=self.iso_dev) LOG.debug("negotiating with fabric via agent command %s", self.ds_cfg['agent_command']) diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 03e7156b..ad476076 100755 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -865,7 +865,19 @@ class WALinuxAgentShim: return endpoint_ip_address @azure_ds_telemetry_reporter - def register_with_azure_and_fetch_data(self, pubkey_info=None) -> dict: + def eject_iso(self, iso_dev) -> None: + try: + LOG.debug("Ejecting the provisioning iso") + subp.subp(['eject', iso_dev]) + except Exception as e: + report_diagnostic_event( + "Failed ejecting the provisioning iso: %s" % e, + logger_func=LOG.debug) + + @azure_ds_telemetry_reporter + def register_with_azure_and_fetch_data(self, + pubkey_info=None, + iso_dev=None) -> dict: """Gets the VM's GoalState from Azure, uses the GoalState information to report ready/send the ready signal/provisioning complete signal to Azure, and then uses pubkey_info to filter and obtain the user's @@ -891,6 +903,10 @@ class WALinuxAgentShim: ssh_keys = self._get_user_pubkeys(goal_state, pubkey_info) health_reporter = GoalStateHealthReporter( goal_state, self.azure_endpoint_client, self.endpoint) + + if iso_dev is not None: + self.eject_iso(iso_dev) + health_reporter.send_ready_signal() return {'public-keys': ssh_keys} @@ -1046,11 +1062,12 @@ class WALinuxAgentShim: @azure_ds_telemetry_reporter def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None, - pubkey_info=None): + pubkey_info=None, iso_dev=None): shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file, dhcp_options=dhcp_opts) try: - return shim.register_with_azure_and_fetch_data(pubkey_info=pubkey_info) + return shim.register_with_azure_and_fetch_data( + pubkey_info=pubkey_info, iso_dev=iso_dev) finally: shim.clean_up() diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index 63482c6c..552c7905 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -1009,6 +1009,14 @@ class TestWALinuxAgentShim(CiTestCase): self.GoalState.return_value.container_id = self.test_container_id self.GoalState.return_value.instance_id = self.test_instance_id + def test_eject_iso_is_called(self): + shim = wa_shim() + with mock.patch.object( + shim, 'eject_iso', autospec=True + ) as m_eject_iso: + shim.register_with_azure_and_fetch_data(iso_dev="/dev/sr0") + m_eject_iso.assert_called_once_with("/dev/sr0") + def test_http_client_does_not_use_certificate_for_report_ready(self): shim = wa_shim() shim.register_with_azure_and_fetch_data() @@ -1283,13 +1291,14 @@ class TestGetMetadataGoalStateXMLAndReportReadyToFabric(CiTestCase): def test_calls_shim_register_with_azure_and_fetch_data(self): m_pubkey_info = mock.MagicMock() - azure_helper.get_metadata_from_fabric(pubkey_info=m_pubkey_info) + azure_helper.get_metadata_from_fabric( + pubkey_info=m_pubkey_info, iso_dev="/dev/sr0") self.assertEqual( 1, self.m_shim.return_value .register_with_azure_and_fetch_data.call_count) self.assertEqual( - mock.call(pubkey_info=m_pubkey_info), + mock.call(iso_dev="/dev/sr0", pubkey_info=m_pubkey_info), self.m_shim.return_value .register_with_azure_and_fetch_data.call_args) -- cgit v1.2.3 From 431b3683fce309e7d673f97c9cf6cc726463b9e0 Mon Sep 17 00:00:00 2001 From: Thomas Stringer Date: Mon, 26 Apr 2021 09:41:38 -0400 Subject: Azure: Retrieve username and hostname from IMDS (#865) This change allows us to retrieve the username and hostname from IMDS instead of having to rely on the mounted OVF. --- cloudinit/sources/DataSourceAzure.py | 149 +++++++++++++++++++++----- tests/unittests/test_datasource/test_azure.py | 87 ++++++++++++++- 2 files changed, 205 insertions(+), 31 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index eac6405a..f4fc91cd 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -5,6 +5,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import base64 +from collections import namedtuple import contextlib import crypt from functools import partial @@ -25,6 +26,7 @@ from cloudinit.net import device_driver from cloudinit.net.dhcp import EphemeralDHCPv4 from cloudinit import sources from cloudinit.sources.helpers import netlink +from cloudinit import ssh_util from cloudinit import subp from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc from cloudinit import util @@ -80,7 +82,12 @@ AGENT_SEED_DIR = '/var/lib/waagent' IMDS_TIMEOUT_IN_SECONDS = 2 IMDS_URL = "http://169.254.169.254/metadata" IMDS_VER_MIN = "2019-06-01" -IMDS_VER_WANT = "2020-09-01" +IMDS_VER_WANT = "2020-10-01" + + +# This holds SSH key data including if the source was +# from IMDS, as well as the SSH key data itself. +SSHKeys = namedtuple("SSHKeys", ("keys_from_imds", "ssh_keys")) class metadata_type(Enum): @@ -391,6 +398,8 @@ class DataSourceAzure(sources.DataSource): """Return the subplatform metadata source details.""" if self.seed.startswith('/dev'): subplatform_type = 'config-disk' + elif self.seed.lower() == 'imds': + subplatform_type = 'imds' else: subplatform_type = 'seed-dir' return '%s (%s)' % (subplatform_type, self.seed) @@ -433,9 +442,11 @@ class DataSourceAzure(sources.DataSource): found = None reprovision = False + ovf_is_accessible = True reprovision_after_nic_attach = False for cdev in candidates: try: + LOG.debug("cdev: %s", cdev) if cdev == "IMDS": ret = None reprovision = True @@ -462,8 +473,18 @@ class DataSourceAzure(sources.DataSource): raise sources.InvalidMetaDataException(msg) except util.MountFailedError: report_diagnostic_event( - '%s was not mountable' % cdev, logger_func=LOG.warning) - continue + '%s was not mountable' % cdev, logger_func=LOG.debug) + cdev = 'IMDS' + ovf_is_accessible = False + empty_md = {'local-hostname': ''} + empty_cfg = dict( + system_info=dict( + default_user=dict( + name='' + ) + ) + ) + ret = (empty_md, '', empty_cfg, {}) report_diagnostic_event("Found provisioning metadata in %s" % cdev, logger_func=LOG.debug) @@ -490,6 +511,10 @@ class DataSourceAzure(sources.DataSource): self.fallback_interface, retries=10 ) + if not imds_md and not ovf_is_accessible: + msg = 'No OVF or IMDS available' + report_diagnostic_event(msg) + raise sources.InvalidMetaDataException(msg) (md, userdata_raw, cfg, files) = ret self.seed = cdev crawled_data.update({ @@ -498,6 +523,21 @@ class DataSourceAzure(sources.DataSource): 'metadata': util.mergemanydict( [md, {'imds': imds_md}]), 'userdata_raw': userdata_raw}) + imds_username = _username_from_imds(imds_md) + imds_hostname = _hostname_from_imds(imds_md) + imds_disable_password = _disable_password_from_imds(imds_md) + if imds_username: + LOG.debug('Username retrieved from IMDS: %s', imds_username) + cfg['system_info']['default_user']['name'] = imds_username + if imds_hostname: + LOG.debug('Hostname retrieved from IMDS: %s', imds_hostname) + crawled_data['metadata']['local-hostname'] = imds_hostname + if imds_disable_password: + LOG.debug( + 'Disable password retrieved from IMDS: %s', + imds_disable_password + ) + crawled_data['metadata']['disable_password'] = imds_disable_password # noqa: E501 found = cdev report_diagnostic_event( @@ -676,6 +716,13 @@ class DataSourceAzure(sources.DataSource): @azure_ds_telemetry_reporter def get_public_ssh_keys(self): + """ + Retrieve public SSH keys. + """ + + return self._get_public_ssh_keys_and_source().ssh_keys + + def _get_public_ssh_keys_and_source(self): """ Try to get the ssh keys from IMDS first, and if that fails (i.e. IMDS is unavailable) then fallback to getting the ssh @@ -685,30 +732,50 @@ class DataSourceAzure(sources.DataSource): advantage, so this is a strong preference. But we must keep OVF as a second option for environments that don't have IMDS. """ + LOG.debug('Retrieving public SSH keys') ssh_keys = [] + keys_from_imds = True + LOG.debug('Attempting to get SSH keys from IMDS') try: - raise KeyError( - "Not using public SSH keys from IMDS" - ) - # pylint:disable=unreachable ssh_keys = [ public_key['keyData'] for public_key in self.metadata['imds']['compute']['publicKeys'] ] - LOG.debug('Retrieved SSH keys from IMDS') + for key in ssh_keys: + if not _key_is_openssh_formatted(key=key): + keys_from_imds = False + break + + if not keys_from_imds: + log_msg = 'Keys not in OpenSSH format, using OVF' + else: + log_msg = 'Retrieved {} keys from IMDS'.format( + len(ssh_keys) + if ssh_keys is not None + else 0 + ) except KeyError: log_msg = 'Unable to get keys from IMDS, falling back to OVF' + keys_from_imds = False + finally: report_diagnostic_event(log_msg, logger_func=LOG.debug) + + if not keys_from_imds: + LOG.debug('Attempting to get SSH keys from OVF') try: ssh_keys = self.metadata['public-keys'] - LOG.debug('Retrieved keys from OVF') + log_msg = 'Retrieved {} keys from OVF'.format(len(ssh_keys)) except KeyError: log_msg = 'No keys available from OVF' + finally: report_diagnostic_event(log_msg, logger_func=LOG.debug) - return ssh_keys + return SSHKeys( + keys_from_imds=keys_from_imds, + ssh_keys=ssh_keys + ) def get_config_obj(self): return self.cfg @@ -1325,30 +1392,21 @@ class DataSourceAzure(sources.DataSource): self.bounce_network_with_azure_hostname() pubkey_info = None - try: - raise KeyError( - "Not using public SSH keys from IMDS" - ) - # pylint:disable=unreachable - public_keys = self.metadata['imds']['compute']['publicKeys'] - LOG.debug( - 'Successfully retrieved %s key(s) from IMDS', - len(public_keys) - if public_keys is not None + ssh_keys_and_source = self._get_public_ssh_keys_and_source() + + if not ssh_keys_and_source.keys_from_imds: + pubkey_info = self.cfg.get('_pubkeys', None) + log_msg = 'Retrieved {} fingerprints from OVF'.format( + len(pubkey_info) + if pubkey_info is not None else 0 ) - except KeyError: - LOG.debug( - 'Unable to retrieve SSH keys from IMDS during ' - 'negotiation, falling back to OVF' - ) - pubkey_info = self.cfg.get('_pubkeys', None) + report_diagnostic_event(log_msg, logger_func=LOG.debug) metadata_func = partial(get_metadata_from_fabric, fallback_lease_file=self. dhclient_lease_file, - pubkey_info=pubkey_info, - iso_dev=self.iso_dev) + pubkey_info=pubkey_info) LOG.debug("negotiating with fabric via agent command %s", self.ds_cfg['agent_command']) @@ -1404,6 +1462,41 @@ class DataSourceAzure(sources.DataSource): return self.metadata.get('imds', {}).get('compute', {}).get('location') +def _username_from_imds(imds_data): + try: + return imds_data['compute']['osProfile']['adminUsername'] + except KeyError: + return None + + +def _hostname_from_imds(imds_data): + try: + return imds_data['compute']['osProfile']['computerName'] + except KeyError: + return None + + +def _disable_password_from_imds(imds_data): + try: + return imds_data['compute']['osProfile']['disablePasswordAuthentication'] == 'true' # noqa: E501 + except KeyError: + return None + + +def _key_is_openssh_formatted(key): + """ + Validate whether or not the key is OpenSSH-formatted. + """ + + parser = ssh_util.AuthKeyLineParser() + try: + akl = parser.parse(key) + except TypeError: + return False + + return akl.keytype is not None + + def _partitions_on_device(devpath, maxnum=16): # return a list of tuples (ptnum, path) for each part on devpath for suff in ("-part", "p", ""): diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 320fa857..d9817d84 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -108,7 +108,7 @@ NETWORK_METADATA = { "zone": "", "publicKeys": [ { - "keyData": "key1", + "keyData": "ssh-rsa key1", "path": "path1" } ] @@ -1761,8 +1761,29 @@ scbus-1 on xpt0 bus 0 dsrc.get_data() dsrc.setup(True) ssh_keys = dsrc.get_public_ssh_keys() - # Temporarily alter this test so that SSH public keys - # from IMDS are *not* going to be in use to fix a regression. + self.assertEqual(ssh_keys, ["ssh-rsa key1"]) + self.assertEqual(m_parse_certificates.call_count, 0) + + @mock.patch( + 'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates') + @mock.patch(MOCKPATH + 'get_metadata_from_imds') + def test_get_public_ssh_keys_with_no_openssh_format( + self, + m_get_metadata_from_imds, + m_parse_certificates): + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data['compute']['publicKeys'][0]['keyData'] = 'no-openssh-format' + m_get_metadata_from_imds.return_value = imds_data + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + dsrc = self._get_ds(data) + dsrc.get_data() + dsrc.setup(True) + ssh_keys = dsrc.get_public_ssh_keys() self.assertEqual(ssh_keys, []) self.assertEqual(m_parse_certificates.call_count, 0) @@ -1818,6 +1839,66 @@ scbus-1 on xpt0 bus 0 self.assertIsNotNone(dsrc.metadata) self.assertFalse(dsrc.failed_desired_api_version) + @mock.patch(MOCKPATH + 'get_metadata_from_imds') + def test_hostname_from_imds(self, m_get_metadata_from_imds): + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA) + imds_data_with_os_profile["compute"]["osProfile"] = dict( + adminUsername="username1", + computerName="hostname1", + disablePasswordAuthentication="true" + ) + m_get_metadata_from_imds.return_value = imds_data_with_os_profile + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual(dsrc.metadata["local-hostname"], "hostname1") + + @mock.patch(MOCKPATH + 'get_metadata_from_imds') + def test_username_from_imds(self, m_get_metadata_from_imds): + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA) + imds_data_with_os_profile["compute"]["osProfile"] = dict( + adminUsername="username1", + computerName="hostname1", + disablePasswordAuthentication="true" + ) + m_get_metadata_from_imds.return_value = imds_data_with_os_profile + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertEqual( + dsrc.cfg["system_info"]["default_user"]["name"], + "username1" + ) + + @mock.patch(MOCKPATH + 'get_metadata_from_imds') + def test_disable_password_from_imds(self, m_get_metadata_from_imds): + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA) + imds_data_with_os_profile["compute"]["osProfile"] = dict( + adminUsername="username1", + computerName="hostname1", + disablePasswordAuthentication="true" + ) + m_get_metadata_from_imds.return_value = imds_data_with_os_profile + dsrc = self._get_ds(data) + dsrc.get_data() + self.assertTrue(dsrc.metadata["disable_password"]) + class TestAzureBounce(CiTestCase): -- cgit v1.2.3 From b2311664fa73ec85c3f85ec6ff6b092765e07975 Mon Sep 17 00:00:00 2001 From: aswinrajamannar <39812128+aswinrajamannar@users.noreply.github.com> Date: Mon, 26 Apr 2021 07:28:39 -0700 Subject: Azure: Retry net metadata during nic attach for non-timeout errs (#878) When network interfaces are hot-attached to the VM, attempting to get network metadata might return 410 (or 500, 503 etc) because the info is not yet available. In those cases, we retry getting the metadata before giving up. The only case where we can move on to wait for more nic attach events is if the call times out despite retries, which means the interface is not likely a primary interface, and we should try for more nic attach events. --- cloudinit/sources/DataSourceAzure.py | 65 +++++++++++++++--- tests/unittests/test_datasource/test_azure.py | 95 +++++++++++++++++++++++---- 2 files changed, 140 insertions(+), 20 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index f4fc91cd..38790c12 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -17,6 +17,7 @@ from time import sleep from xml.dom import minidom import xml.etree.ElementTree as ET from enum import Enum +import requests from cloudinit import dmi from cloudinit import log as logging @@ -665,7 +666,9 @@ class DataSourceAzure(sources.DataSource): self, fallback_nic, retries, - md_type=metadata_type.compute): + md_type=metadata_type.compute, + exc_cb=retry_on_url_exc, + infinite=False): """ Wrapper for get_metadata_from_imds so that we can have flexibility in which IMDS api-version we use. If a particular instance of IMDS @@ -685,7 +688,8 @@ class DataSourceAzure(sources.DataSource): fallback_nic=fallback_nic, retries=0, md_type=md_type, - api_version=IMDS_VER_WANT + api_version=IMDS_VER_WANT, + exc_cb=exc_cb ) except UrlError as err: LOG.info( @@ -708,7 +712,9 @@ class DataSourceAzure(sources.DataSource): fallback_nic=fallback_nic, retries=retries, md_type=md_type, - api_version=IMDS_VER_MIN + api_version=IMDS_VER_MIN, + exc_cb=exc_cb, + infinite=infinite ) def device_name_to_device(self, name): @@ -938,6 +944,9 @@ class DataSourceAzure(sources.DataSource): is_primary = False expected_nic_count = -1 imds_md = None + metadata_poll_count = 0 + metadata_logging_threshold = 1 + metadata_timeout_count = 0 # For now, only a VM's primary NIC can contact IMDS and WireServer. If # DHCP fails for a NIC, we have no mechanism to determine if the NIC is @@ -962,14 +971,48 @@ class DataSourceAzure(sources.DataSource): % (ifname, e), logger_func=LOG.error) raise + # Retry polling network metadata for a limited duration only when the + # calls fail due to timeout. This is because the platform drops packets + # going towards IMDS when it is not a primary nic. If the calls fail + # due to other issues like 410, 503 etc, then it means we are primary + # but IMDS service is unavailable at the moment. Retry indefinitely in + # those cases since we cannot move on without the network metadata. + def network_metadata_exc_cb(msg, exc): + nonlocal metadata_timeout_count, metadata_poll_count + nonlocal metadata_logging_threshold + + metadata_poll_count = metadata_poll_count + 1 + + # Log when needed but back off exponentially to avoid exploding + # the log file. + if metadata_poll_count >= metadata_logging_threshold: + metadata_logging_threshold *= 2 + report_diagnostic_event( + "Ran into exception when attempting to reach %s " + "after %d polls." % (msg, metadata_poll_count), + logger_func=LOG.error) + + if isinstance(exc, UrlError): + report_diagnostic_event("poll IMDS with %s failed. " + "Exception: %s and code: %s" % + (msg, exc.cause, exc.code), + logger_func=LOG.error) + + if exc.cause and isinstance(exc.cause, requests.Timeout): + metadata_timeout_count = metadata_timeout_count + 1 + return (metadata_timeout_count <= 10) + return True + # Primary nic detection will be optimized in the future. The fact that # primary nic is being attached first helps here. Otherwise each nic # could add several seconds of delay. try: imds_md = self.get_imds_data_with_api_fallback( ifname, - 5, - metadata_type.network + 0, + metadata_type.network, + network_metadata_exc_cb, + True ) except Exception as e: LOG.warning( @@ -2139,7 +2182,9 @@ def _generate_network_config_from_fallback_config() -> dict: def get_metadata_from_imds(fallback_nic, retries, md_type=metadata_type.compute, - api_version=IMDS_VER_MIN): + api_version=IMDS_VER_MIN, + exc_cb=retry_on_url_exc, + infinite=False): """Query Azure's instance metadata service, returning a dictionary. If network is not up, setup ephemeral dhcp on fallback_nic to talk to the @@ -2158,7 +2203,7 @@ def get_metadata_from_imds(fallback_nic, kwargs = {'logfunc': LOG.debug, 'msg': 'Crawl of Azure Instance Metadata Service (IMDS)', 'func': _get_metadata_from_imds, - 'args': (retries, md_type, api_version,)} + 'args': (retries, exc_cb, md_type, api_version, infinite)} if net.is_up(fallback_nic): return util.log_time(**kwargs) else: @@ -2176,14 +2221,16 @@ def get_metadata_from_imds(fallback_nic, @azure_ds_telemetry_reporter def _get_metadata_from_imds( retries, + exc_cb, md_type=metadata_type.compute, - api_version=IMDS_VER_MIN): + api_version=IMDS_VER_MIN, + infinite=False): url = "{}?api-version={}".format(md_type.value, api_version) headers = {"Metadata": "true"} try: response = readurl( url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers, - retries=retries, exception_cb=retry_on_url_exc) + retries=retries, exception_cb=exc_cb, infinite=infinite) except Exception as e: # pylint:disable=no-member if isinstance(e, UrlError) and e.code == 400: diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index d9817d84..c4a8e08d 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -448,7 +448,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): "http://169.254.169.254/metadata/instance?api-version=" "2019-06-01", exception_cb=mock.ANY, headers=mock.ANY, retries=mock.ANY, - timeout=mock.ANY) + timeout=mock.ANY, infinite=False) @mock.patch(MOCKPATH + 'readurl', autospec=True) @mock.patch(MOCKPATH + 'EphemeralDHCPv4') @@ -467,7 +467,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): "http://169.254.169.254/metadata/instance/network?api-version=" "2019-06-01", exception_cb=mock.ANY, headers=mock.ANY, retries=mock.ANY, - timeout=mock.ANY) + timeout=mock.ANY, infinite=False) @mock.patch(MOCKPATH + 'readurl', autospec=True) @mock.patch(MOCKPATH + 'EphemeralDHCPv4') @@ -486,7 +486,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): "http://169.254.169.254/metadata/instance?api-version=" "2019-06-01", exception_cb=mock.ANY, headers=mock.ANY, retries=mock.ANY, - timeout=mock.ANY) + timeout=mock.ANY, infinite=False) @mock.patch(MOCKPATH + 'readurl', autospec=True) @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting', autospec=True) @@ -511,7 +511,7 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): m_readurl.assert_called_with( self.network_md_url, exception_cb=mock.ANY, headers={'Metadata': 'true'}, retries=2, - timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS) + timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, infinite=False) @mock.patch('cloudinit.url_helper.time.sleep') @mock.patch(MOCKPATH + 'net.is_up', autospec=True) @@ -2694,15 +2694,22 @@ class TestPreprovisioningHotAttachNics(CiTestCase): def nic_attach_ret(nl_sock, nics_found): nonlocal m_attach_call_count - if m_attach_call_count == 0: - m_attach_call_count = m_attach_call_count + 1 + m_attach_call_count = m_attach_call_count + 1 + if m_attach_call_count == 1: return "eth0" - return "eth1" + elif m_attach_call_count == 2: + return "eth1" + raise RuntimeError("Must have found primary nic by now.") + + # Simulate two NICs by adding the same one twice. + md = { + "interface": [ + IMDS_NETWORK_METADATA['interface'][0], + IMDS_NETWORK_METADATA['interface'][0] + ] + } - def network_metadata_ret(ifname, retries, type): - # Simulate two NICs by adding the same one twice. - md = IMDS_NETWORK_METADATA - md['interface'].append(md['interface'][0]) + def network_metadata_ret(ifname, retries, type, exc_cb, infinite): if ifname == "eth0": return md raise requests.Timeout('Fake connection timeout') @@ -2724,6 +2731,72 @@ class TestPreprovisioningHotAttachNics(CiTestCase): self.assertEqual(1, m_imds.call_count) self.assertEqual(2, m_link_up.call_count) + @mock.patch(MOCKPATH + 'DataSourceAzure.get_imds_data_with_api_fallback') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + def test_check_if_nic_is_primary_retries_on_failures( + self, m_dhcpv4, m_imds): + """Retry polling for network metadata on all failures except timeout""" + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + lease = { + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'} + + eth0Retries = [] + eth1Retries = [] + # Simulate two NICs by adding the same one twice. + md = { + "interface": [ + IMDS_NETWORK_METADATA['interface'][0], + IMDS_NETWORK_METADATA['interface'][0] + ] + } + + def network_metadata_ret(ifname, retries, type, exc_cb, infinite): + nonlocal eth0Retries, eth1Retries + + # Simulate readurl functionality with retries and + # exception callbacks so that the callback logic can be + # validated. + if ifname == "eth0": + cause = requests.HTTPError() + for _ in range(0, 15): + error = url_helper.UrlError(cause=cause, code=410) + eth0Retries.append(exc_cb("No goal state.", error)) + else: + cause = requests.Timeout('Fake connection timeout') + for _ in range(0, 10): + error = url_helper.UrlError(cause=cause) + eth1Retries.append(exc_cb("Connection timeout", error)) + # Should stop retrying after 10 retries + eth1Retries.append(exc_cb("Connection timeout", error)) + raise cause + return md + + m_imds.side_effect = network_metadata_ret + + dhcp_ctx = mock.MagicMock(lease=lease) + dhcp_ctx.obtain_lease.return_value = lease + m_dhcpv4.return_value = dhcp_ctx + + is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth0") + self.assertEqual(True, is_primary) + self.assertEqual(2, expected_nic_count) + + # All Eth0 errors are non-timeout errors. So we should have been + # retrying indefinitely until success. + for i in eth0Retries: + self.assertTrue(i) + + is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth1") + self.assertEqual(False, is_primary) + + # All Eth1 errors are timeout errors. Retry happens for a max of 10 and + # then we should have moved on assuming it is not the primary nic. + for i in range(0, 10): + self.assertTrue(eth1Retries[i]) + self.assertFalse(eth1Retries[10]) + @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up') def test_wait_for_link_up_returns_if_already_up( self, m_is_link_up): -- cgit v1.2.3 From 02db2c3ecf29924690d4c4adf6ec059f36f31103 Mon Sep 17 00:00:00 2001 From: Ben Hughes Date: Mon, 26 Apr 2021 21:19:26 +0100 Subject: Fix chef module run failure when chef_license is set (#868) Move chef_license from TPL_PATH_KEYS to TPL_KEYS as the chef license setting is not a path but must be added to the client config template. Fixes file or folder not found exception raised from ensure_dirs. --- cloudinit/config/cc_chef.py | 2 +- templates/chef_client.rb.tmpl | 2 +- tools/.github-cla-signers | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index aaf71366..7b20222e 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -70,7 +70,6 @@ CHEF_RB_TPL_PATH_KEYS = frozenset([ 'json_attribs', 'pid_file', 'encrypted_data_bag_secret', - 'chef_license', ]) CHEF_RB_TPL_KEYS = list(CHEF_RB_TPL_DEFAULTS.keys()) CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_BOOL_KEYS) @@ -80,6 +79,7 @@ CHEF_RB_TPL_KEYS.extend([ 'node_name', 'environment', 'validation_name', + 'chef_license', ]) CHEF_RB_TPL_KEYS = frozenset(CHEF_RB_TPL_KEYS) CHEF_RB_PATH = '/etc/chef/client.rb' diff --git a/templates/chef_client.rb.tmpl b/templates/chef_client.rb.tmpl index 0a759b04..b9d58172 100644 --- a/templates/chef_client.rb.tmpl +++ b/templates/chef_client.rb.tmpl @@ -15,7 +15,7 @@ The reason these are not in quotes is because they are ruby symbols that will be placed inside here, and not actual strings... #} {% if chef_license %} -chef_license "{{chef_license}}" +chef_license "{{chef_license}}" {% endif%} {% if log_level %} log_level {{log_level}} diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index 38e1ad74..d7151a59 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -51,3 +51,4 @@ WebSpider xiachen-rh xnox hamalq +bmhughes -- cgit v1.2.3 From ba82b3ef70d0f8de7471aea8fcc89923d9b07235 Mon Sep 17 00:00:00 2001 From: Anh Vo Date: Tue, 27 Apr 2021 13:40:59 -0400 Subject: Azure: adding support for consuming userdata from IMDS (#884) --- cloudinit/sources/DataSourceAzure.py | 23 +++++++++++- tests/unittests/test_datasource/test_azure.py | 50 +++++++++++++++++++++++++++ 2 files changed, 72 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 38790c12..c0025c7b 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -83,7 +83,7 @@ AGENT_SEED_DIR = '/var/lib/waagent' IMDS_TIMEOUT_IN_SECONDS = 2 IMDS_URL = "http://169.254.169.254/metadata" IMDS_VER_MIN = "2019-06-01" -IMDS_VER_WANT = "2020-10-01" +IMDS_VER_WANT = "2021-01-01" # This holds SSH key data including if the source was @@ -539,6 +539,20 @@ class DataSourceAzure(sources.DataSource): imds_disable_password ) crawled_data['metadata']['disable_password'] = imds_disable_password # noqa: E501 + + # only use userdata from imds if OVF did not provide custom data + # userdata provided by IMDS is always base64 encoded + if not userdata_raw: + imds_userdata = _userdata_from_imds(imds_md) + if imds_userdata: + LOG.debug("Retrieved userdata from IMDS") + try: + crawled_data['userdata_raw'] = base64.b64decode( + ''.join(imds_userdata.split())) + except Exception: + report_diagnostic_event( + "Bad userdata in IMDS", + logger_func=LOG.warning) found = cdev report_diagnostic_event( @@ -1512,6 +1526,13 @@ def _username_from_imds(imds_data): return None +def _userdata_from_imds(imds_data): + try: + return imds_data['compute']['userData'] + except KeyError: + return None + + def _hostname_from_imds(imds_data): try: return imds_data['compute']['osProfile']['computerName'] diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index c4a8e08d..f8433690 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1899,6 +1899,56 @@ scbus-1 on xpt0 bus 0 dsrc.get_data() self.assertTrue(dsrc.metadata["disable_password"]) + @mock.patch(MOCKPATH + 'get_metadata_from_imds') + def test_userdata_from_imds(self, m_get_metadata_from_imds): + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + userdata = "userdataImds" + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data["compute"]["osProfile"] = dict( + adminUsername="username1", + computerName="hostname1", + disablePasswordAuthentication="true", + ) + imds_data["compute"]["userData"] = b64e(userdata) + m_get_metadata_from_imds.return_value = imds_data + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(dsrc.userdata_raw, userdata.encode('utf-8')) + + @mock.patch(MOCKPATH + 'get_metadata_from_imds') + def test_userdata_from_imds_with_customdata_from_OVF( + self, m_get_metadata_from_imds): + userdataOVF = "userdataOVF" + odata = { + 'HostName': "myhost", 'UserName': "myuser", + 'UserData': {'text': b64e(userdataOVF), 'encoding': 'base64'} + } + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} + data = { + 'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg + } + + userdataImds = "userdataImds" + imds_data = copy.deepcopy(NETWORK_METADATA) + imds_data["compute"]["osProfile"] = dict( + adminUsername="username1", + computerName="hostname1", + disablePasswordAuthentication="true", + ) + imds_data["compute"]["userData"] = b64e(userdataImds) + m_get_metadata_from_imds.return_value = imds_data + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(dsrc.userdata_raw, userdataOVF.encode('utf-8')) + class TestAzureBounce(CiTestCase): -- cgit v1.2.3 From 5f5fa5ee99296b3b1044682c41bab38a32cdccd7 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Mon, 3 May 2021 10:56:46 -0400 Subject: Revert "Add support to resize rootfs if using LVM (#721)" (#887) This reverts commit 74fa008bfcd3263eb691cc0b3f7a055b17569f8b. During pre-release testing, we discovered two issues with this commit. Firstly, there's a typo in the udevadm command that causes a TypeError for _all_ growpart executions. Secondly, the LVM resizing does not appear to successfully resize everything up to the LV, though some things do get resized. We certainly want this change, so we'll be happy to review and land it alongside an integration test which confirms that it is working as expected. LP: #1922742 --- cloudinit/config/cc_growpart.py | 83 +--------------------- doc/examples/cloud-config-growpart.txt | 2 - .../test_handler/test_handler_growpart.py | 56 +-------------- 3 files changed, 4 insertions(+), 137 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 6399bfb7..9f338ad1 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -68,9 +68,7 @@ import os import os.path import re import stat -import platform -from functools import lru_cache from cloudinit import log as logging from cloudinit.settings import PER_ALWAYS from cloudinit import subp @@ -95,58 +93,6 @@ class RESIZE(object): LOG = logging.getLogger(__name__) -@lru_cache() -def is_lvm_lv(devpath): - if util.is_Linux(): - # all lvm lvs will have a realpath as a 'dm-*' name. - rpath = os.path.realpath(devpath) - if not os.path.basename(rpath).startswith("dm-"): - return False - out, _ = subp.subp("udevadm", "info", devpath) - # lvs should have DM_LV_NAME= and also DM_VG_NAME - return 'DM_LV_NAME=' in out - else: - LOG.info("Not an LVM Logical Volume partition") - return False - - -@lru_cache() -def get_pvs_for_lv(devpath): - myenv = {'LANG': 'C'} - - if not util.is_Linux(): - LOG.info("No support for LVM on %s", platform.system()) - return None - if not subp.which('lvm'): - LOG.info("No 'lvm' command present") - return None - - try: - (out, _err) = subp.subp(["lvm", "lvs", devpath, "--options=vgname", - "--noheadings"], update_env=myenv) - vgname = out.strip() - except subp.ProcessExecutionError as e: - if e.exit_code != 0: - util.logexc(LOG, "Failed: can't get Volume Group information " - "from %s", devpath) - raise ResizeFailedException(e) from e - - try: - (out, _err) = subp.subp(["lvm", "vgs", vgname, "--options=pvname", - "--noheadings"], update_env=myenv) - pvs = [p.strip() for p in out.splitlines()] - if len(pvs) > 1: - LOG.info("Do not know how to resize multiple Physical" - " Volumes") - else: - return pvs[0] - except subp.ProcessExecutionError as e: - if e.exit_code != 0: - util.logexc(LOG, "Failed: can't get Physical Volume " - "information from Volume Group %s", vgname) - raise ResizeFailedException(e) from e - - def resizer_factory(mode): resize_class = None if mode == "auto": @@ -262,18 +208,13 @@ def get_size(filename): os.close(fd) -def device_part_info(devpath, is_lvm): +def device_part_info(devpath): # convert an entry in /dev/ to parent disk and partition number # input of /dev/vdb or /dev/disk/by-label/foo # rpath is hopefully a real-ish path in /dev (vda, sdb..) rpath = os.path.realpath(devpath) - # first check if this is an LVM and get its PVs - lvm_rpath = get_pvs_for_lv(devpath) - if is_lvm and lvm_rpath: - rpath = lvm_rpath - bname = os.path.basename(rpath) syspath = "/sys/class/block/%s" % bname @@ -303,7 +244,7 @@ def device_part_info(devpath, is_lvm): # diskdevpath has something like 253:0 # and udev has put links in /dev/block/253:0 to the device name in /dev/ - return diskdevpath, ptnum + return (diskdevpath, ptnum) def devent2dev(devent): @@ -353,9 +294,8 @@ def resize_devices(resizer, devices): "device '%s' not a block device" % blockdev,)) continue - is_lvm = is_lvm_lv(blockdev) try: - disk, ptnum = device_part_info(blockdev, is_lvm) + (disk, ptnum) = device_part_info(blockdev) except (TypeError, ValueError) as e: info.append((devent, RESIZE.SKIPPED, "device_part_info(%s) failed: %s" % (blockdev, e),)) @@ -376,23 +316,6 @@ def resize_devices(resizer, devices): "failed to resize: disk=%s, ptnum=%s: %s" % (disk, ptnum, e),)) - if is_lvm and isinstance(resizer, ResizeGrowPart): - try: - if len(devices) == 1: - (_out, _err) = subp.subp( - ["lvm", "lvextend", "--extents=100%FREE", blockdev], - update_env={'LANG': 'C'}) - info.append((devent, RESIZE.CHANGED, - "Logical Volume %s extended" % devices[0],)) - else: - LOG.info("Exactly one device should be configured to be " - "resized when using LVM. More than one configured" - ": %s", devices) - except (subp.ProcessExecutionError, ValueError) as e: - info.append((devent, RESIZE.NOCHANGE, - "Logical Volume %s resize failed: %s" % - (blockdev, e),)) - return info diff --git a/doc/examples/cloud-config-growpart.txt b/doc/examples/cloud-config-growpart.txt index 09268117..393d5164 100644 --- a/doc/examples/cloud-config-growpart.txt +++ b/doc/examples/cloud-config-growpart.txt @@ -13,8 +13,6 @@ # # devices: # a list of things to resize. -# if the devices are under LVM, the list should be a single entry, -# cloud-init will then extend the single entry, otherwise it will fail. # items can be filesystem paths or devices (in /dev) # examples: # devices: [/, /dev/vdb1] diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index cc0a9248..7f039b79 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -172,53 +172,6 @@ class TestResize(unittest.TestCase): self.name = "growpart" self.log = logging.getLogger("TestResize") - def test_lvm_resize(self): - # LVM resize should work only if a single device is configured. More - # than one device should fail. - lvm_pass = ["/dev/XXdm-0"] - lvm_fail = ["/dev/XXdm-1", "/dev/YYdm-1"] - devstat_ret = Bunch(st_mode=25008, st_ino=6078, st_dev=5, - st_nlink=1, st_uid=0, st_gid=6, st_size=0, - st_atime=0, st_mtime=0, st_ctime=0) - real_stat = os.stat - resize_calls = [] - - class myresizer(object): - def resize(self, diskdev, partnum, partdev): - resize_calls.append((diskdev, partnum, partdev)) - if partdev == "/dev/XXdm-0": - return (1024, 2048) - return (1024, 1024) # old size, new size - - def mystat(path): - if path in lvm_pass or path in lvm_fail: - return devstat_ret - return real_stat(path) - - try: - opinfo = cc_growpart.device_part_info - cc_growpart.device_part_info = simple_device_part_info_lvm - os.stat = mystat - - resized = cc_growpart.resize_devices(myresizer(), lvm_pass) - not_resized = cc_growpart.resize_devices(myresizer(), lvm_fail) - - def find(name, res): - for f in res: - if f[0] == name: - return f - return None - - self.assertEqual(cc_growpart.RESIZE.CHANGED, - find("/dev/XXdm-0", resized)[1]) - self.assertEqual(cc_growpart.RESIZE.NOCHANGE, - find("/dev/XXdm-1", not_resized)[1]) - self.assertEqual(cc_growpart.RESIZE.NOCHANGE, - find("/dev/YYdm-1", not_resized)[1]) - finally: - cc_growpart.device_part_info = opinfo - os.stat = real_stat - def test_simple_devices(self): # test simple device list # this patches out devent2dev, os.stat, and device_part_info @@ -274,14 +227,7 @@ class TestResize(unittest.TestCase): os.stat = real_stat -def simple_device_part_info_lvm(devpath, is_lvm): - # simple stupid return (/dev/vda, 1) for /dev/vda - ret = re.search("([^0-9]*)([0-9]*)$", devpath) - x = (ret.group(1), ret.group(2)) - return x - - -def simple_device_part_info(devpath, is_lvm): +def simple_device_part_info(devpath): # simple stupid return (/dev/vda, 1) for /dev/vda ret = re.search("([^0-9]*)([0-9]*)$", devpath) x = (ret.group(1), ret.group(2)) -- cgit v1.2.3 From f17f78fa9d28e62793a5f2c7109fc29eeffb0c89 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 5 May 2021 10:54:17 -0500 Subject: Add \r\n check for SSH keys in Azure (#889) See https://bugs.launchpad.net/cloud-init/+bug/1910835 --- cloudinit/sources/DataSourceAzure.py | 3 +++ tests/unittests/test_datasource/test_azure.py | 12 ++++++++++++ 2 files changed, 15 insertions(+) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index c0025c7b..2f3390c3 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -1551,6 +1551,9 @@ def _key_is_openssh_formatted(key): """ Validate whether or not the key is OpenSSH-formatted. """ + # See https://bugs.launchpad.net/cloud-init/+bug/1910835 + if '\r\n' in key.strip(): + return False parser = ssh_util.AuthKeyLineParser() try: diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index f8433690..742d1faa 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1764,6 +1764,18 @@ scbus-1 on xpt0 bus 0 self.assertEqual(ssh_keys, ["ssh-rsa key1"]) self.assertEqual(m_parse_certificates.call_count, 0) + def test_key_without_crlf_valid(self): + test_key = 'ssh-rsa somerandomkeystuff some comment' + assert True is dsaz._key_is_openssh_formatted(test_key) + + def test_key_with_crlf_invalid(self): + test_key = 'ssh-rsa someran\r\ndomkeystuff some comment' + assert False is dsaz._key_is_openssh_formatted(test_key) + + def test_key_endswith_crlf_valid(self): + test_key = 'ssh-rsa somerandomkeystuff some comment\r\n' + assert True is dsaz._key_is_openssh_formatted(test_key) + @mock.patch( 'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates') @mock.patch(MOCKPATH + 'get_metadata_from_imds') -- cgit v1.2.3 From 13877549527812959f59e4add685bc42d350edd8 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 5 May 2021 14:50:41 -0500 Subject: Release 21.2 (#890) Bump the version in cloudinit/version.py to 21.2 and update ChangeLog. LP: #1927254 --- ChangeLog | 63 ++++++++++++++++++++++++++++++++++++++++++++++++++++ cloudinit/version.py | 2 +- 2 files changed, 64 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/ChangeLog b/ChangeLog index 44b50410..98528249 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,66 @@ +21.2 + - Add \r\n check for SSH keys in Azure (#889) + - Revert "Add support to resize rootfs if using LVM (#721)" (#887) + (LP: #1922742) + - Add Vultaire as contributor (#881) [Paul Goins] + - Azure: adding support for consuming userdata from IMDS (#884) [Anh Vo] + - test_upgrade: modify test_upgrade_package to run for more sources (#883) + - Fix chef module run failure when chef_license is set (#868) [Ben Hughes] + - Azure: Retry net metadata during nic attach for non-timeout errs (#878) + [aswinrajamannar] + - Azure: Retrieve username and hostname from IMDS (#865) [Thomas Stringer] + - Azure: eject the provisioning iso before reporting ready (#861) [Anh Vo] + - Use `partprobe` to re-read partition table if available (#856) + [Nicolas Bock] (LP: #1920939) + - fix error on upgrade caused by new vendordata2 attributes (#869) + (LP: #1922739) + - add prefer_fqdn_over_hostname config option (#859) + [hamalq] (LP: #1921004) + - Emit dots on travis to avoid timeout (#867) + - doc: Replace remaining references to user-scripts as a config module + (#866) [Ryan Harper] + - azure: Removing ability to invoke walinuxagent (#799) [Anh Vo] + - Add Vultr support (#827) [David Dymko] + - Fix unpickle for source paths missing run_dir (#863) + [lucasmoura] (LP: #1899299) + - sysconfig: use BONDING_MODULE_OPTS on SUSE (#831) [Jens Sandmann] + - bringup_static_routes: fix gateway check (#850) [Petr Fedchenkov] + - add hamalq user (#860) [hamalq] + - Add support to resize rootfs if using LVM (#721) + [Eduardo Otubo] (LP: #1799953) + - Fix mis-detecting network configuration in initramfs cmdline (#844) + (LP: #1919188) + - tools/write-ssh-key-fingerprints: do not display empty header/footer + (#817) [dermotbradley] + - Azure helper: Ensure Azure http handler sleeps between retries (#842) + [Johnson Shi] + - Fix chef apt source example (#826) [timothegenzmer] + - .travis.yml: generate an SSH key before running tests (#848) + - write passwords only to serial console, lock down cloud-init-output.log + (#847) (LP: #1918303) + - Fix apt default integration test (#845) + - integration_tests: bump pycloudlib dependency (#846) + - Fix stack trace if vendordata_raw contained an array (#837) [eb3095] + - archlinux: Fix broken locale logic (#841) + [Kristian Klausen] (LP: #1402406) + - Integration test for #783 (#832) + - integration_tests: mount more paths IN_PLACE (#838) + - Fix requiring device-number on EC2 derivatives (#836) (LP: #1917875) + - Remove the vi comment from the part-handler example (#835) + - net: exclude OVS internal interfaces in get_interfaces (#829) + (LP: #1912844) + - tox.ini: pass OS_* environment variables to integration tests (#830) + - integration_tests: add OpenStack as a platform (#804) + - Add flexibility to IMDS api-version (#793) [Thomas Stringer] + - Fix the TestApt tests using apt-key on Xenial and Hirsute (#823) + [Paride Legovini] (LP: #1916629) + - doc: remove duplicate "it" from nocloud.rst (#825) [V.I. Wood] + - archlinux: Use hostnamectl to set the transient hostname (#797) + [Kristian Klausen] + - cc_keys_to_console.py: Add documentation for recently added config key + (#824) [dermotbradley] + - Update cc_set_hostname documentation (#818) [Toshi Aoyama] + 21.1 - Azure: Support for VMs without ephemeral resource disks. (#800) [Johnson Shi] (LP: #1901011) diff --git a/cloudinit/version.py b/cloudinit/version.py index 94afd60d..be47aff3 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -__VERSION__ = "21.1" +__VERSION__ = "21.2" _PACKAGED_VERSION = '@@PACKAGED_VERSION@@' FEATURES = [ -- cgit v1.2.3 From 8cf40a73646a9448746fc4043c6410ae92172e7c Mon Sep 17 00:00:00 2001 From: Andrew Lukoshko Date: Sat, 8 May 2021 00:31:14 +0300 Subject: Add AlmaLinux OS support (#872) AlmaLinux OS is RHEL-compatible so all the changes needed are trivial. --- README.md | 2 +- cloudinit/config/cc_ntp.py | 4 ++-- cloudinit/config/cc_yum_add_repo.py | 4 ++-- cloudinit/distros/__init__.py | 2 +- cloudinit/distros/almalinux.py | 9 +++++++++ cloudinit/net/sysconfig.py | 2 +- cloudinit/tests/test_util.py | 35 +++++++++++++++++++++++++++++++++++ cloudinit/util.py | 4 ++-- config/cloud.cfg.tmpl | 6 +++--- systemd/cloud-init-generator.tmpl | 2 +- systemd/cloud-init.service.tmpl | 2 +- tests/unittests/test_cli.py | 2 +- tools/.github-cla-signers | 1 + tools/render-cloudcfg | 4 ++-- 14 files changed, 62 insertions(+), 17 deletions(-) create mode 100644 cloudinit/distros/almalinux.py (limited to 'cloudinit') diff --git a/README.md b/README.md index aa6d84ae..01fd3b07 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ get in contact with that distribution and send them our way! | Supported OSes | Supported Public Clouds | Supported Private Clouds | | --- | --- | --- | -| Alpine Linux
ArchLinux
Debian
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
Digital Ocean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| +| Alpine Linux
ArchLinux
Debian
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS/AlmaLinux
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
Digital Ocean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| ## To start developing cloud-init diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index e183993f..41c278ff 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -24,8 +24,8 @@ LOG = logging.getLogger(__name__) frequency = PER_INSTANCE NTP_CONF = '/etc/ntp.conf' NR_POOL_SERVERS = 4 -distros = ['alpine', 'centos', 'debian', 'fedora', 'opensuse', 'rhel', - 'sles', 'ubuntu'] +distros = ['almalinux', 'alpine', 'centos', 'debian', 'fedora', 'opensuse', + 'rhel', 'sles', 'ubuntu'] NTP_CLIENT_CONFIG = { 'chrony': { diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 01fe683c..db513ed7 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -18,7 +18,7 @@ entry, the config entry will be skipped. **Module frequency:** per always -**Supported distros:** centos, fedora, rhel +**Supported distros:** almalinux, centos, fedora, rhel **Config keys**:: @@ -36,7 +36,7 @@ from configparser import ConfigParser from cloudinit import util -distros = ['centos', 'fedora', 'rhel'] +distros = ['almalinux', 'centos', 'fedora', 'rhel'] def _canonicalize_id(repo_id): diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 8b8a647d..107b928c 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -46,7 +46,7 @@ OSFAMILIES = { 'debian': ['debian', 'ubuntu'], 'freebsd': ['freebsd'], 'gentoo': ['gentoo'], - 'redhat': ['amazon', 'centos', 'fedora', 'rhel'], + 'redhat': ['almalinux', 'amazon', 'centos', 'fedora', 'rhel'], 'suse': ['opensuse', 'sles'], } diff --git a/cloudinit/distros/almalinux.py b/cloudinit/distros/almalinux.py new file mode 100644 index 00000000..edb3165d --- /dev/null +++ b/cloudinit/distros/almalinux.py @@ -0,0 +1,9 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.distros import rhel + + +class Distro(rhel.Distro): + pass + +# vi: ts=4 expandtab diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index e4607804..089b44b2 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -19,7 +19,7 @@ from .network_state import ( LOG = logging.getLogger(__name__) NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf" -KNOWN_DISTROS = ['centos', 'fedora', 'rhel', 'suse'] +KNOWN_DISTROS = ['almalinux', 'centos', 'fedora', 'rhel', 'suse'] def _make_header(sep='#'): diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index e811917e..a4c02877 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -107,12 +107,31 @@ OS_RELEASE_REDHAT_7 = dedent("""\ REDHAT_SUPPORT_PRODUCT_VERSION="7.5" """) +OS_RELEASE_ALMALINUX_8 = dedent("""\ + NAME="AlmaLinux" + VERSION="8.3 (Purple Manul)" + ID="almalinux" + ID_LIKE="rhel centos fedora" + VERSION_ID="8.3" + PLATFORM_ID="platform:el8" + PRETTY_NAME="AlmaLinux 8.3 (Purple Manul)" + ANSI_COLOR="0;34" + CPE_NAME="cpe:/o:almalinux:almalinux:8.3:GA" + HOME_URL="https://almalinux.org/" + BUG_REPORT_URL="https://bugs.almalinux.org/" + + ALMALINUX_MANTISBT_PROJECT="AlmaLinux-8" + ALMALINUX_MANTISBT_PROJECT_VERSION="8.3" +""") + REDHAT_RELEASE_CENTOS_6 = "CentOS release 6.10 (Final)" REDHAT_RELEASE_CENTOS_7 = "CentOS Linux release 7.5.1804 (Core)" REDHAT_RELEASE_REDHAT_6 = ( "Red Hat Enterprise Linux Server release 6.10 (Santiago)") REDHAT_RELEASE_REDHAT_7 = ( "Red Hat Enterprise Linux Server release 7.5 (Maipo)") +REDHAT_RELEASE_ALMALINUX_8 = ( + "AlmaLinux release 8.3 (Purple Manul)") OS_RELEASE_DEBIAN = dedent("""\ @@ -502,6 +521,22 @@ class TestGetLinuxDistro(CiTestCase): dist = util.get_linux_distro() self.assertEqual(('centos', '7', 'Core'), dist) + @mock.patch('cloudinit.util.load_file') + def test_get_linux_almalinux8_rhrelease(self, m_os_release, m_path_exists): + """Verify almalinux 8 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_ALMALINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('almalinux', '8.3', 'Purple Manul'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_almalinux8_osrelease(self, m_os_release, m_path_exists): + """Verify almalinux 8 read from os-release.""" + m_os_release.return_value = OS_RELEASE_ALMALINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('almalinux', '8.3', 'Purple Manul'), dist) + @mock.patch('cloudinit.util.load_file') def test_get_linux_debian(self, m_os_release, m_path_exists): """Verify we get the correct name and release name on Debian.""" diff --git a/cloudinit/util.py b/cloudinit/util.py index 4e0a72db..fdea1181 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -521,8 +521,8 @@ def system_info(): if system == "linux": linux_dist = info['dist'][0].lower() if linux_dist in ( - 'alpine', 'arch', 'centos', 'debian', 'fedora', 'rhel', - 'suse'): + 'almalinux', 'alpine', 'arch', 'centos', 'debian', 'fedora', + 'rhel', 'suse'): var = linux_dist elif linux_dist in ('ubuntu', 'linuxmint', 'mint'): var = 'ubuntu' diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 7171aaa5..8656daa7 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -21,7 +21,7 @@ disable_root: false disable_root: true {% endif %} -{% if variant in ["alpine", "amazon", "centos", "fedora", "rhel"] %} +{% if variant in ["almalinux", "alpine", "amazon", "centos", "fedora", "rhel"] %} mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2'] {% if variant == "amazon" %} resize_rootfs: noblock @@ -153,7 +153,7 @@ cloud_final_modules: # (not accessible to handlers/transforms) system_info: # This will affect which distro class gets used -{% if variant in ["alpine", "amazon", "arch", "centos", "debian", +{% if variant in ["almalinux", "alpine", "amazon", "arch", "centos", "debian", "fedora", "freebsd", "netbsd", "openbsd", "rhel", "suse", "ubuntu"] %} distro: {{ variant }} @@ -206,7 +206,7 @@ system_info: primary: http://ports.ubuntu.com/ubuntu-ports security: http://ports.ubuntu.com/ubuntu-ports ssh_svcname: ssh -{% elif variant in ["alpine", "amazon", "arch", "centos", "fedora", +{% elif variant in ["almalinux", "alpine", "amazon", "arch", "centos", "fedora", "rhel", "suse"] %} # Default user name + that default users groups (if added/used) default_user: diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl index 0773356b..9b103ef9 100755 --- a/systemd/cloud-init-generator.tmpl +++ b/systemd/cloud-init-generator.tmpl @@ -83,7 +83,7 @@ default() { check_for_datasource() { local ds_rc="" -{% if variant in ["rhel", "fedora", "centos"] %} +{% if variant in ["almalinux", "rhel", "fedora", "centos"] %} local dsidentify="/usr/libexec/cloud-init/ds-identify" {% else %} local dsidentify="/usr/lib/cloud-init/ds-identify" diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl index f140344d..a5c51277 100644 --- a/systemd/cloud-init.service.tmpl +++ b/systemd/cloud-init.service.tmpl @@ -10,7 +10,7 @@ After=systemd-networkd-wait-online.service {% if variant in ["ubuntu", "unknown", "debian"] %} After=networking.service {% endif %} -{% if variant in ["centos", "fedora", "rhel"] %} +{% if variant in ["almalinux", "centos", "fedora", "rhel"] %} After=network.service After=NetworkManager.service {% endif %} diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index 74f85959..f5cf514d 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -224,7 +224,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): self._call_main(['cloud-init', 'devel', 'schema', '--docs', 'all']) expected_doc_sections = [ '**Supported distros:** all', - '**Supported distros:** alpine, centos, debian, fedora', + '**Supported distros:** almalinux, alpine, centos, debian, fedora', '**Config schema**:\n **resize_rootfs:** (true/false/noblock)', '**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n' ] diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index 607f1d8e..48995057 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -3,6 +3,7 @@ ajmyyra AlexBaranowski Aman306 andrewbogott +andrewlukoshko antonyc aswinrajamannar beezly diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg index ed454840..f5990748 100755 --- a/tools/render-cloudcfg +++ b/tools/render-cloudcfg @@ -4,8 +4,8 @@ import argparse import os import sys -VARIANTS = ["alpine", "amazon", "arch", "centos", "debian", "fedora", - "freebsd", "netbsd", "openbsd", "rhel", "suse", "ubuntu", +VARIANTS = ["almalinux", "alpine", "amazon", "arch", "centos", "debian", + "fedora", "freebsd", "netbsd", "openbsd", "rhel", "suse", "ubuntu", "unknown"] -- cgit v1.2.3 From 77320bfcdafa4ba63bb59be63d11c4d7414e5f92 Mon Sep 17 00:00:00 2001 From: dermotbradley Date: Sat, 8 May 2021 02:54:56 +0100 Subject: cc_disk_setup.py: remove UDEVADM_CMD definition as not used (#886) UDEVADM_CMD is defined but not actually used in cc_disk_setup.py so remove it. Also modify the comment at top of read_parttbl function to remove the reference to udevadm which implies it is used to scan the partition table. --- cloudinit/config/cc_disk_setup.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index a582924b..22af3813 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -107,7 +107,6 @@ import shlex frequency = PER_INSTANCE # Define the commands to use -UDEVADM_CMD = subp.which('udevadm') SFDISK_CMD = subp.which("sfdisk") SGDISK_CMD = subp.which("sgdisk") LSBLK_CMD = subp.which("lsblk") @@ -686,9 +685,8 @@ def get_partition_layout(table_type, size, layout): def read_parttbl(device): """ - Use `partprobe` or `blkdev` instead of `udevadm`. `Partprobe` is - preferred over `blkdev` since it is more reliably able to probe - the partition table. + `Partprobe` is preferred over `blkdev` since it is more reliably + able to probe the partition table. """ if PARTPROBE_CMD is not None: probe_cmd = [PARTPROBE_CMD, device] -- cgit v1.2.3 From 899bfaa9d6bfab1db0df99257628ca1f6febff60 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 11 May 2021 11:34:29 -0500 Subject: Update test characters in substitution unit test (#893) In newer versions of python, when using urllib.parse, lines containing newline or tab characters now get sanitized. This caused a unit test to fail. See https://bugs.python.org/issue43882 --- cloudinit/distros/tests/test_init.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/distros/tests/test_init.py b/cloudinit/distros/tests/test_init.py index db534654..fd64a322 100644 --- a/cloudinit/distros/tests/test_init.py +++ b/cloudinit/distros/tests/test_init.py @@ -11,10 +11,15 @@ import pytest from cloudinit.distros import _get_package_mirror_info, LDH_ASCII_CHARS +# In newer versions of Python, these characters will be omitted instead +# of substituted because of security concerns. +# See https://bugs.python.org/issue43882 +SECURITY_URL_CHARS = '\n\r\t' # Define a set of characters we would expect to be replaced INVALID_URL_CHARS = [ - chr(x) for x in range(127) if chr(x) not in LDH_ASCII_CHARS + chr(x) for x in range(127) + if chr(x) not in LDH_ASCII_CHARS + SECURITY_URL_CHARS ] for separator in [":", ".", "/", "#", "?", "@", "[", "]"]: # Remove from the set characters that either separate hostname parts (":", -- cgit v1.2.3 From 864346999702e6b2b8bf7e6244a6608bcead72a5 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 13 May 2021 12:55:41 -0500 Subject: Allow user control over update events (#834) Control is currently limited to boot events, though this should allow us to more easily incorporate HOTPLUG support. Disabling 'instance-first-boot' is not supported as we apply networking config too early in boot to have processed userdata (along with the fact that this would be a pretty big foot-gun). The concept of update events on datasource has been split into supported update events and default update events. Defaults will be used if there is no user-defined update events, but user-defined events won't be supplied if they aren't supported. When applying the networking config, we now check to see if the event is supported by the datasource as well as if it is enabled. Configuration looks like: updates: network: when: ['boot'] --- cloudinit/event.py | 69 ++++++++++-- cloudinit/sources/DataSourceAzure.py | 15 ++- cloudinit/sources/DataSourceEc2.py | 10 +- cloudinit/sources/DataSourceRbxCloud.py | 9 +- cloudinit/sources/DataSourceScaleway.py | 10 +- cloudinit/sources/DataSourceSmartOS.py | 8 +- cloudinit/sources/__init__.py | 41 +++++--- cloudinit/sources/tests/test_init.py | 29 +++-- cloudinit/stages.py | 117 +++++++++++++++++---- cloudinit/tests/test_event.py | 26 +++++ cloudinit/tests/test_stages.py | 98 ++++++++++++++--- doc/rtd/index.rst | 1 + doc/rtd/topics/events.rst | 83 +++++++++++++++ .../integration_tests/modules/test_user_events.py | 95 +++++++++++++++++ tests/unittests/test_datasource/test_azure.py | 4 +- tests/unittests/test_datasource/test_smartos.py | 10 +- tox.ini | 2 +- 17 files changed, 545 insertions(+), 82 deletions(-) create mode 100644 cloudinit/tests/test_event.py create mode 100644 doc/rtd/topics/events.rst create mode 100644 tests/integration_tests/modules/test_user_events.py (limited to 'cloudinit') diff --git a/cloudinit/event.py b/cloudinit/event.py index f7b311fb..76a0afc6 100644 --- a/cloudinit/event.py +++ b/cloudinit/event.py @@ -1,17 +1,72 @@ # This file is part of cloud-init. See LICENSE file for license information. - """Classes and functions related to event handling.""" +from enum import Enum +from typing import Dict, Set + +from cloudinit import log as logging + +LOG = logging.getLogger(__name__) + -# Event types which can generate maintenance requests for cloud-init. -class EventType(object): - BOOT = "System boot" - BOOT_NEW_INSTANCE = "New instance first boot" +class EventScope(Enum): + # NETWORK is currently the only scope, but we want to leave room to + # grow other scopes (e.g., STORAGE) without having to make breaking + # changes to the user config + NETWORK = 'network' - # TODO: Cloud-init will grow support for the follow event types: - # UDEV + def __str__(self): # pylint: disable=invalid-str-returned + return self.value + + +class EventType(Enum): + """Event types which can generate maintenance requests for cloud-init.""" + # Cloud-init should grow support for the follow event types: + # HOTPLUG # METADATA_CHANGE # USER_REQUEST + BOOT = "boot" + BOOT_NEW_INSTANCE = "boot-new-instance" + BOOT_LEGACY = "boot-legacy" + + def __str__(self): # pylint: disable=invalid-str-returned + return self.value + + +def userdata_to_events(user_config: dict) -> Dict[EventScope, Set[EventType]]: + """Convert userdata into update config format defined on datasource. + + Userdata is in the form of (e.g): + {'network': {'when': ['boot']}} + + DataSource config is in the form of: + {EventScope.Network: {EventType.BOOT}} + + Take the first and return the second + """ + update_config = {} + for scope, scope_list in user_config.items(): + try: + new_scope = EventScope(scope) + except ValueError as e: + LOG.warning( + "%s! Update data will be ignored for '%s' scope", + str(e), + scope, + ) + continue + try: + new_values = [EventType(x) for x in scope_list['when']] + except ValueError as e: + LOG.warning( + "%s! Update data will be ignored for '%s' scope", + str(e), + scope, + ) + new_values = [] + update_config[new_scope] = set(new_values) + + return update_config # vi: ts=4 expandtab diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 2f3390c3..dcdf9f8f 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -22,7 +22,7 @@ import requests from cloudinit import dmi from cloudinit import log as logging from cloudinit import net -from cloudinit.event import EventType +from cloudinit.event import EventScope, EventType from cloudinit.net import device_driver from cloudinit.net.dhcp import EphemeralDHCPv4 from cloudinit import sources @@ -338,6 +338,13 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): class DataSourceAzure(sources.DataSource): dsname = 'Azure' + # Regenerate network config new_instance boot and every boot + default_update_events = {EventScope.NETWORK: { + EventType.BOOT_NEW_INSTANCE, + EventType.BOOT, + EventType.BOOT_LEGACY + }} + _negotiated = False _metadata_imds = sources.UNSET _ci_pkl_version = 1 @@ -352,8 +359,6 @@ class DataSourceAzure(sources.DataSource): BUILTIN_DS_CONFIG]) self.dhclient_lease_file = self.ds_cfg.get('dhclient_lease_file') self._network_config = None - # Regenerate network config new_instance boot and every boot - self.update_events['network'].add(EventType.BOOT) self._ephemeral_dhcp_ctx = None self.failed_desired_api_version = False self.iso_dev = None @@ -2309,8 +2314,8 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None): LOG.info( 'Removing Ubuntu extended network scripts because' ' cloud-init updates Azure network configuration on the' - ' following event: %s.', - EventType.BOOT) + ' following events: %s.', + [EventType.BOOT.value, EventType.BOOT_LEGACY.value]) logged = True if os.path.isdir(path): util.del_dir(path) diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index a2105dc7..8a7f7c60 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -8,6 +8,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. +import copy import os import time @@ -20,7 +21,7 @@ from cloudinit import sources from cloudinit import url_helper as uhelp from cloudinit import util from cloudinit import warnings -from cloudinit.event import EventType +from cloudinit.event import EventScope, EventType LOG = logging.getLogger(__name__) @@ -426,7 +427,12 @@ class DataSourceEc2(sources.DataSource): # Non-VPC (aka Classic) Ec2 instances need to rewrite the # network config file every boot due to MAC address change. if self.is_classic_instance(): - self.update_events['network'].add(EventType.BOOT) + self.default_update_events = copy.deepcopy( + self.default_update_events) + self.default_update_events[EventScope.NETWORK].add( + EventType.BOOT) + self.default_update_events[EventScope.NETWORK].add( + EventType.BOOT_LEGACY) else: LOG.warning("Metadata 'network' key not valid: %s.", net_md) self._network_config = result diff --git a/cloudinit/sources/DataSourceRbxCloud.py b/cloudinit/sources/DataSourceRbxCloud.py index 0b8994bf..bb69e998 100644 --- a/cloudinit/sources/DataSourceRbxCloud.py +++ b/cloudinit/sources/DataSourceRbxCloud.py @@ -17,7 +17,7 @@ from cloudinit import log as logging from cloudinit import sources from cloudinit import subp from cloudinit import util -from cloudinit.event import EventType +from cloudinit.event import EventScope, EventType LOG = logging.getLogger(__name__) ETC_HOSTS = '/etc/hosts' @@ -206,10 +206,11 @@ def read_user_data_callback(mount_dir): class DataSourceRbxCloud(sources.DataSource): dsname = "RbxCloud" - update_events = {'network': [ + default_update_events = {EventScope.NETWORK: { EventType.BOOT_NEW_INSTANCE, - EventType.BOOT - ]} + EventType.BOOT, + EventType.BOOT_LEGACY + }} def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index 41be7665..7b8974a2 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -31,8 +31,8 @@ from cloudinit import sources from cloudinit import url_helper from cloudinit import util from cloudinit import net +from cloudinit.event import EventScope, EventType from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError -from cloudinit.event import EventType LOG = logging.getLogger(__name__) @@ -172,7 +172,13 @@ def query_data_api(api_type, api_address, retries, timeout): class DataSourceScaleway(sources.DataSource): dsname = "Scaleway" - update_events = {'network': [EventType.BOOT_NEW_INSTANCE, EventType.BOOT]} + default_update_events = { + EventScope.NETWORK: { + EventType.BOOT_NEW_INSTANCE, + EventType.BOOT, + EventType.BOOT_LEGACY + } + } def __init__(self, sys_cfg, distro, paths): super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index fd292baa..9b16bf8d 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -36,7 +36,7 @@ from cloudinit import serial from cloudinit import sources from cloudinit import subp from cloudinit import util -from cloudinit.event import EventType +from cloudinit.event import EventScope, EventType LOG = logging.getLogger(__name__) @@ -170,6 +170,11 @@ class DataSourceSmartOS(sources.DataSource): smartos_type = sources.UNSET md_client = sources.UNSET + default_update_events = {EventScope.NETWORK: { + EventType.BOOT_NEW_INSTANCE, + EventType.BOOT, + EventType.BOOT_LEGACY + }} def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -181,7 +186,6 @@ class DataSourceSmartOS(sources.DataSource): self.metadata = {} self.network_data = None self._network_config = None - self.update_events['network'].add(EventType.BOOT) self.script_base_d = os.path.join(self.paths.get_cpath("scripts")) diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 7d74f8d9..a07c4b4f 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -13,6 +13,7 @@ import copy import json import os from collections import namedtuple +from typing import Dict, List from cloudinit import dmi from cloudinit import importer @@ -22,7 +23,7 @@ from cloudinit import type_utils from cloudinit import user_data as ud from cloudinit import util from cloudinit.atomic_helper import write_json -from cloudinit.event import EventType +from cloudinit.event import EventScope, EventType from cloudinit.filters import launch_index from cloudinit.persistence import CloudInitPickleMixin from cloudinit.reporting import events @@ -175,12 +176,23 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): # The datasource defines a set of supported EventTypes during which # the datasource can react to changes in metadata and regenerate - # network configuration on metadata changes. - # A datasource which supports writing network config on each system boot - # would call update_events['network'].add(EventType.BOOT). + # network configuration on metadata changes. These are defined in + # `supported_network_events`. + # The datasource also defines a set of default EventTypes that the + # datasource can react to. These are the event types that will be used + # if not overridden by the user. + # A datasource requiring to write network config on each system boot + # would call default_update_events['network'].add(EventType.BOOT). # Default: generate network config on new instance id (first boot). - update_events = {'network': set([EventType.BOOT_NEW_INSTANCE])} + supported_update_events = {EventScope.NETWORK: { + EventType.BOOT_NEW_INSTANCE, + EventType.BOOT, + EventType.BOOT_LEGACY, + }} + default_update_events = {EventScope.NETWORK: { + EventType.BOOT_NEW_INSTANCE, + }} # N-tuple listing default values for any metadata-related class # attributes cached on an instance by a process_data runs. These attribute @@ -648,10 +660,12 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): def get_package_mirror_info(self): return self.distro.get_package_mirror_info(data_source=self) - def update_metadata(self, source_event_types): + def update_metadata_if_supported( + self, source_event_types: List[EventType] + ) -> bool: """Refresh cached metadata if the datasource supports this event. - The datasource has a list of update_events which + The datasource has a list of supported_update_events which trigger refreshing all cached metadata as well as refreshing the network configuration. @@ -661,9 +675,9 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): @return True if the datasource did successfully update cached metadata due to source_event_type. """ - supported_events = {} + supported_events = {} # type: Dict[EventScope, set] for event in source_event_types: - for update_scope, update_events in self.update_events.items(): + for update_scope, update_events in self.supported_update_events.items(): # noqa: E501 if event in update_events: if not supported_events.get(update_scope): supported_events[update_scope] = set() @@ -671,7 +685,8 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): for scope, matched_events in supported_events.items(): LOG.debug( "Update datasource metadata and %s config due to events: %s", - scope, ', '.join(matched_events)) + scope.value, + ', '.join([event.value for event in matched_events])) # Each datasource has a cached config property which needs clearing # Once cleared that config property will be regenerated from # current metadata. @@ -682,7 +697,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): if result: return True LOG.debug("Datasource %s not updated for events: %s", self, - ', '.join(source_event_types)) + ', '.join([event.value for event in source_event_types])) return False def check_instance_id(self, sys_cfg): @@ -789,7 +804,9 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter): with myrep: LOG.debug("Seeing if we can get any data from %s", cls) s = cls(sys_cfg, distro, paths) - if s.update_metadata([EventType.BOOT_NEW_INSTANCE]): + if s.update_metadata_if_supported( + [EventType.BOOT_NEW_INSTANCE] + ): myrep.message = "found %s data from %s" % (mode, name) return (s, type_utils.obj_name(cls)) except Exception: diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index 1420a988..a2b052a6 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -5,7 +5,7 @@ import inspect import os import stat -from cloudinit.event import EventType +from cloudinit.event import EventScope, EventType from cloudinit.helpers import Paths from cloudinit import importer from cloudinit.sources import ( @@ -618,24 +618,29 @@ class TestDataSource(CiTestCase): self.assertEqual('himom', getattr(self.datasource, cached_attr_name)) self.assertEqual('updated', self.datasource.myattr) + @mock.patch.dict(DataSource.default_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) + @mock.patch.dict(DataSource.supported_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) def test_update_metadata_only_acts_on_supported_update_events(self): - """update_metadata won't get_data on unsupported update events.""" - self.datasource.update_events['network'].discard(EventType.BOOT) + """update_metadata_if_supported wont get_data on unsupported events.""" self.assertEqual( - {'network': set([EventType.BOOT_NEW_INSTANCE])}, - self.datasource.update_events) + {EventScope.NETWORK: set([EventType.BOOT_NEW_INSTANCE])}, + self.datasource.default_update_events + ) def fake_get_data(): raise Exception('get_data should not be called') self.datasource.get_data = fake_get_data self.assertFalse( - self.datasource.update_metadata( + self.datasource.update_metadata_if_supported( source_event_types=[EventType.BOOT])) + @mock.patch.dict(DataSource.supported_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) def test_update_metadata_returns_true_on_supported_update_event(self): - """update_metadata returns get_data response on supported events.""" - + """update_metadata_if_supported returns get_data on supported events""" def fake_get_data(): return True @@ -643,14 +648,16 @@ class TestDataSource(CiTestCase): self.datasource._network_config = 'something' self.datasource._dirty_cache = True self.assertTrue( - self.datasource.update_metadata( + self.datasource.update_metadata_if_supported( source_event_types=[ EventType.BOOT, EventType.BOOT_NEW_INSTANCE])) self.assertEqual(UNSET, self.datasource._network_config) + self.assertIn( "DEBUG: Update datasource metadata and network config due to" - " events: New instance first boot", - self.logs.getvalue()) + " events: boot-new-instance", + self.logs.getvalue() + ) class TestRedactSensitiveData(CiTestCase): diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 5bacc85d..bbded1e9 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -8,9 +8,11 @@ import copy import os import pickle import sys +from collections import namedtuple +from typing import Dict, Set from cloudinit.settings import ( - FREQUENCIES, CLOUD_CONFIG, PER_INSTANCE, RUN_CLOUD_CONFIG) + FREQUENCIES, CLOUD_CONFIG, PER_INSTANCE, PER_ONCE, RUN_CLOUD_CONFIG) from cloudinit import handlers @@ -21,7 +23,11 @@ from cloudinit.handlers.jinja_template import JinjaTemplatePartHandler from cloudinit.handlers.shell_script import ShellScriptPartHandler from cloudinit.handlers.upstart_job import UpstartJobPartHandler -from cloudinit.event import EventType +from cloudinit.event import ( + EventScope, + EventType, + userdata_to_events, +) from cloudinit.sources import NetworkConfigSource from cloudinit import cloud @@ -118,6 +124,7 @@ class Init(object): def _initial_subdirs(self): c_dir = self.paths.cloud_dir + run_dir = self.paths.run_dir initial_dirs = [ c_dir, os.path.join(c_dir, 'scripts'), @@ -130,6 +137,7 @@ class Init(object): os.path.join(c_dir, 'handlers'), os.path.join(c_dir, 'sem'), os.path.join(c_dir, 'data'), + os.path.join(run_dir, 'sem'), ] return initial_dirs @@ -341,6 +349,11 @@ class Init(object): return self._previous_iid def is_new_instance(self): + """Return true if this is a new instance. + + If datasource has already been initialized, this will return False, + even on first boot. + """ previous = self.previous_iid() ret = (previous == NO_PREVIOUS_INSTANCE_ID or previous != self.datasource.get_instance_id()) @@ -702,6 +715,46 @@ class Init(object): return (self.distro.generate_fallback_config(), NetworkConfigSource.fallback) + def update_event_enabled( + self, event_source_type: EventType, scope: EventScope = None + ) -> bool: + """Determine if a particular EventType is enabled. + + For the `event_source_type` passed in, check whether this EventType + is enabled in the `updates` section of the userdata. If `updates` + is not enabled in userdata, check if defined as one of the + `default_events` on the datasource. `scope` may be used to + narrow the check to a particular `EventScope`. + + Note that on first boot, userdata may NOT be available yet. In this + case, we only have the data source's `default_update_events`, + so an event that should be enabled in userdata may be denied. + """ + default_events = self.datasource.default_update_events # type: Dict[EventScope, Set[EventType]] # noqa: E501 + user_events = userdata_to_events(self.cfg.get('updates', {})) # type: Dict[EventScope, Set[EventType]] # noqa: E501 + # A value in the first will override a value in the second + allowed = util.mergemanydict([ + copy.deepcopy(user_events), + copy.deepcopy(default_events), + ]) + LOG.debug('Allowed events: %s', allowed) + + if not scope: + scopes = allowed.keys() + else: + scopes = [scope] + scope_values = [s.value for s in scopes] + + for evt_scope in scopes: + if event_source_type in allowed.get(evt_scope, []): + LOG.debug('Event Allowed: scope=%s EventType=%s', + evt_scope.value, event_source_type) + return True + + LOG.debug('Event Denied: scopes=%s EventType=%s', + scope_values, event_source_type) + return False + def _apply_netcfg_names(self, netcfg): try: LOG.debug("applying net config names for %s", netcfg) @@ -709,27 +762,51 @@ class Init(object): except Exception as e: LOG.warning("Failed to rename devices: %s", e) + def _get_per_boot_network_semaphore(self): + return namedtuple('Semaphore', 'semaphore args')( + helpers.FileSemaphores(self.paths.get_runpath('sem')), + ('apply_network_config', PER_ONCE) + ) + + def _network_already_configured(self) -> bool: + sem = self._get_per_boot_network_semaphore() + return sem.semaphore.has_run(*sem.args) + def apply_network_config(self, bring_up): - # get a network config + """Apply the network config. + + Find the config, determine whether to apply it, apply it via + the distro, and optionally bring it up + """ netcfg, src = self._find_networking_config() if netcfg is None: LOG.info("network config is disabled by %s", src) return - # request an update if needed/available - if self.datasource is not NULL_DATA_SOURCE: - if not self.is_new_instance(): - if not self.datasource.update_metadata([EventType.BOOT]): - LOG.debug( - "No network config applied. Neither a new instance" - " nor datasource network update on '%s' event", - EventType.BOOT) - # nothing new, but ensure proper names - self._apply_netcfg_names(netcfg) - return - else: - # refresh netcfg after update - netcfg, src = self._find_networking_config() + def event_enabled_and_metadata_updated(event_type): + return self.update_event_enabled( + event_type, scope=EventScope.NETWORK + ) and self.datasource.update_metadata_if_supported([event_type]) + + def should_run_on_boot_event(): + return (not self._network_already_configured() and + event_enabled_and_metadata_updated(EventType.BOOT)) + + if ( + self.datasource is not NULL_DATA_SOURCE and + not self.is_new_instance() and + not should_run_on_boot_event() and + not event_enabled_and_metadata_updated(EventType.BOOT_LEGACY) + ): + LOG.debug( + "No network config applied. Neither a new instance" + " nor datasource network update allowed") + # nothing new, but ensure proper names + self._apply_netcfg_names(netcfg) + return + + # refresh netcfg after update + netcfg, src = self._find_networking_config() # ensure all physical devices in config are present self.distro.networking.wait_for_physdevs(netcfg) @@ -740,8 +817,12 @@ class Init(object): # rendering config LOG.info("Applying network configuration from %s bringup=%s: %s", src, bring_up, netcfg) + + sem = self._get_per_boot_network_semaphore() try: - return self.distro.apply_network_config(netcfg, bring_up=bring_up) + with sem.semaphore.lock(*sem.args): + return self.distro.apply_network_config( + netcfg, bring_up=bring_up) except net.RendererNotFoundError as e: LOG.error("Unable to render networking. Network config is " "likely broken: %s", e) diff --git a/cloudinit/tests/test_event.py b/cloudinit/tests/test_event.py new file mode 100644 index 00000000..3da4c70c --- /dev/null +++ b/cloudinit/tests/test_event.py @@ -0,0 +1,26 @@ +# This file is part of cloud-init. See LICENSE file for license information. +"""Tests related to cloudinit.event module.""" +from cloudinit.event import EventType, EventScope, userdata_to_events + + +class TestEvent: + def test_userdata_to_events(self): + userdata = {'network': {'when': ['boot']}} + expected = {EventScope.NETWORK: {EventType.BOOT}} + assert expected == userdata_to_events(userdata) + + def test_invalid_scope(self, caplog): + userdata = {'networkasdfasdf': {'when': ['boot']}} + userdata_to_events(userdata) + assert ( + "'networkasdfasdf' is not a valid EventScope! Update data " + "will be ignored for 'networkasdfasdf' scope" + ) in caplog.text + + def test_invalid_event(self, caplog): + userdata = {'network': {'when': ['bootasdfasdf']}} + userdata_to_events(userdata) + assert ( + "'bootasdfasdf' is not a valid EventType! Update data " + "will be ignored for 'network' scope" + ) in caplog.text diff --git a/cloudinit/tests/test_stages.py b/cloudinit/tests/test_stages.py index d2d1b37f..a06a2bde 100644 --- a/cloudinit/tests/test_stages.py +++ b/cloudinit/tests/test_stages.py @@ -1,7 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. """Tests related to cloudinit.stages module.""" - import os import stat @@ -11,7 +10,7 @@ from cloudinit import stages from cloudinit import sources from cloudinit.sources import NetworkConfigSource -from cloudinit.event import EventType +from cloudinit.event import EventScope, EventType from cloudinit.util import write_file from cloudinit.tests.helpers import CiTestCase, mock @@ -52,6 +51,8 @@ class TestInit(CiTestCase): 'distro': 'ubuntu', 'paths': {'cloud_dir': self.tmpdir, 'run_dir': self.tmpdir}}} self.init.datasource = FakeDataSource(paths=self.init.paths) + self._real_is_new_instance = self.init.is_new_instance + self.init.is_new_instance = mock.Mock(return_value=True) def test_wb__find_networking_config_disabled(self): """find_networking_config returns no config when disabled.""" @@ -291,6 +292,7 @@ class TestInit(CiTestCase): m_macs.return_value = {'42:42:42:42:42:42': 'eth9'} self.init._find_networking_config = fake_network_config + self.init.apply_network_config(True) self.init.distro.apply_network_config_names.assert_called_with(net_cfg) self.init.distro.apply_network_config.assert_called_with( @@ -299,6 +301,7 @@ class TestInit(CiTestCase): @mock.patch('cloudinit.distros.ubuntu.Distro') def test_apply_network_on_same_instance_id(self, m_ubuntu): """Only call distro.apply_network_config_names on same instance id.""" + self.init.is_new_instance = self._real_is_new_instance old_instance_id = os.path.join( self.init.paths.get_cpath('data'), 'instance-id') write_file(old_instance_id, TEST_INSTANCE_ID) @@ -311,18 +314,19 @@ class TestInit(CiTestCase): return net_cfg, NetworkConfigSource.fallback self.init._find_networking_config = fake_network_config + self.init.apply_network_config(True) self.init.distro.apply_network_config_names.assert_called_with(net_cfg) self.init.distro.apply_network_config.assert_not_called() - self.assertIn( - 'No network config applied. Neither a new instance' - " nor datasource network update on '%s' event" % EventType.BOOT, - self.logs.getvalue()) - - @mock.patch('cloudinit.net.get_interfaces_by_mac') - @mock.patch('cloudinit.distros.ubuntu.Distro') - def test_apply_network_on_datasource_allowed_event(self, m_ubuntu, m_macs): - """Apply network if datasource.update_metadata permits BOOT event.""" + assert ( + "No network config applied. Neither a new instance nor datasource " + "network update allowed" + ) in self.logs.getvalue() + + # CiTestCase doesn't work with pytest.mark.parametrize, and moving this + # functionality to a separate class is more cumbersome than it'd be worth + # at the moment, so use this as a simple setup + def _apply_network_setup(self, m_macs): old_instance_id = os.path.join( self.init.paths.get_cpath('data'), 'instance-id') write_file(old_instance_id, TEST_INSTANCE_ID) @@ -338,12 +342,80 @@ class TestInit(CiTestCase): self.init._find_networking_config = fake_network_config self.init.datasource = FakeDataSource(paths=self.init.paths) - self.init.datasource.update_events = {'network': [EventType.BOOT]} + self.init.is_new_instance = mock.Mock(return_value=False) + return net_cfg + + @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch('cloudinit.distros.ubuntu.Distro') + @mock.patch.dict(sources.DataSource.default_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}}) + def test_apply_network_allowed_when_default_boot( + self, m_ubuntu, m_macs + ): + """Apply network if datasource permits BOOT event.""" + net_cfg = self._apply_network_setup(m_macs) + self.init.apply_network_config(True) - self.init.distro.apply_network_config_names.assert_called_with(net_cfg) + assert mock.call( + net_cfg + ) == self.init.distro.apply_network_config_names.call_args_list[-1] + assert mock.call( + net_cfg, bring_up=True + ) == self.init.distro.apply_network_config.call_args_list[-1] + + @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch('cloudinit.distros.ubuntu.Distro') + @mock.patch.dict(sources.DataSource.default_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) + def test_apply_network_disabled_when_no_default_boot( + self, m_ubuntu, m_macs + ): + """Don't apply network if datasource has no BOOT event.""" + self._apply_network_setup(m_macs) + self.init.apply_network_config(True) + self.init.distro.apply_network_config.assert_not_called() + assert ( + "No network config applied. Neither a new instance nor datasource " + "network update allowed" + ) in self.logs.getvalue() + + @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch('cloudinit.distros.ubuntu.Distro') + @mock.patch.dict(sources.DataSource.default_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) + def test_apply_network_allowed_with_userdata_overrides( + self, m_ubuntu, m_macs + ): + """Apply network if userdata overrides default config""" + net_cfg = self._apply_network_setup(m_macs) + self.init._cfg = {'updates': {'network': {'when': ['boot']}}} + self.init.apply_network_config(True) + self.init.distro.apply_network_config_names.assert_called_with( + net_cfg) self.init.distro.apply_network_config.assert_called_with( net_cfg, bring_up=True) + @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch('cloudinit.distros.ubuntu.Distro') + @mock.patch.dict(sources.DataSource.supported_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) + def test_apply_network_disabled_when_unsupported( + self, m_ubuntu, m_macs + ): + """Don't apply network config if unsupported. + + Shouldn't work even when specified as userdata + """ + self._apply_network_setup(m_macs) + + self.init._cfg = {'updates': {'network': {'when': ['boot']}}} + self.init.apply_network_config(True) + self.init.distro.apply_network_config.assert_not_called() + assert ( + "No network config applied. Neither a new instance nor datasource " + "network update allowed" + ) in self.logs.getvalue() + class TestInit_InitializeFilesystem: """Tests for cloudinit.stages.Init._initialize_filesystem. diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst index 10e8228f..33c6b56a 100644 --- a/doc/rtd/index.rst +++ b/doc/rtd/index.rst @@ -49,6 +49,7 @@ Having trouble? We would like to help! topics/format.rst topics/examples.rst + topics/events.rst topics/modules.rst topics/merging.rst diff --git a/doc/rtd/topics/events.rst b/doc/rtd/topics/events.rst new file mode 100644 index 00000000..463208cc --- /dev/null +++ b/doc/rtd/topics/events.rst @@ -0,0 +1,83 @@ +.. _events: + +****************** +Events and Updates +****************** + +Events +====== + +`Cloud-init`_ will fetch and apply cloud and user data configuration +upon several event types. The two most common events for cloud-init +are when an instance first boots and any subsequent boot thereafter (reboot). +In addition to boot events, cloud-init users and vendors are interested +in when devices are added. cloud-init currently supports the following +event types: + +- **BOOT_NEW_INSTANCE**: New instance first boot +- **BOOT**: Any system boot other than 'BOOT_NEW_INSTANCE' +- **BOOT_LEGACY**: Similar to 'BOOT', but applies networking config twice each + boot: once during Local stage, then again in Network stage. As this behavior + was previously the default behavior, this option exists to prevent regressing + such behavior. + +Future work will likely include infrastructure and support for the following +events: + +- **HOTPLUG**: Dynamic add of a system device +- **METADATA_CHANGE**: An instance's metadata has change +- **USER_REQUEST**: Directed request to update + +Datasource Event Support +======================== + +All :ref:`datasources` by default support the ``BOOT_NEW_INSTANCE`` event. +Each Datasource will declare a set of these events that it is capable of +handling. Datasources may not support all event types. In some cases a system +may be configured to allow a particular event but may be running on +a platform whose datasource cannot support the event. + +Configuring Event Updates +========================= + +Update configuration may be specified via user data, +which can be used to enable or disable handling of specific events. +This configuration will be honored as long as the events are supported by +the datasource. However, configuration will always be applied at first +boot, regardless of the user data specified. + +Updates +~~~~~~~ +Update policy configuration defines which +events are allowed to be handled. This is separate from whether a +particular platform or datasource has the capability for such events. + +**scope**: ** + +The ``scope`` value is a string which defines under which domain does the +event occur. Currently the only one known scope is ``network``, though more +scopes may be added in the future. Scopes are defined by convention but +arbitrary values can be used. + +**when**: ** + +Each ``scope`` requires a ``when`` element to specify which events +are to allowed to be handled. + + +Examples +======== + +apply network config every boot +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +On every boot, apply network configuration found in the datasource. + +.. code-block:: shell-session + + # apply network config on every boot + updates: + network: + when: ['boot'] + +.. _Cloud-init: https://launchpad.net/cloud-init +.. vi: textwidth=78 diff --git a/tests/integration_tests/modules/test_user_events.py b/tests/integration_tests/modules/test_user_events.py new file mode 100644 index 00000000..a45cad72 --- /dev/null +++ b/tests/integration_tests/modules/test_user_events.py @@ -0,0 +1,95 @@ +"""Test user-overridable events. + +This is currently limited to applying network config on BOOT events. +""" + +import pytest +import re +import yaml + +from tests.integration_tests.instances import IntegrationInstance + + +def _add_dummy_bridge_to_netplan(client: IntegrationInstance): + # Update netplan configuration to ensure it doesn't change on reboot + netplan = yaml.safe_load( + client.execute('cat /etc/netplan/50-cloud-init.yaml') + ) + # Just a dummy bridge to do nothing + try: + netplan['network']['bridges']['dummy0'] = {'dhcp4': False} + except KeyError: + netplan['network']['bridges'] = {'dummy0': {'dhcp4': False}} + + dumped_netplan = yaml.dump(netplan) + client.write_to_file('/etc/netplan/50-cloud-init.yaml', dumped_netplan) + + +@pytest.mark.lxd_container +@pytest.mark.lxd_vm +@pytest.mark.ec2 +@pytest.mark.gce +@pytest.mark.oci +@pytest.mark.openstack +@pytest.mark.not_xenial +def test_boot_event_disabled_by_default(client: IntegrationInstance): + log = client.read_from_file('/var/log/cloud-init.log') + assert 'Applying network configuration' in log + assert 'dummy0' not in client.execute('ls /sys/class/net') + + _add_dummy_bridge_to_netplan(client) + client.execute('rm /var/log/cloud-init.log') + + client.restart() + log2 = client.read_from_file('/var/log/cloud-init.log') + + # We attempt to apply network config twice on every boot. + # Ensure neither time works. + assert 2 == len( + re.findall(r"Event Denied: scopes=\['network'\] EventType=boot[^-]", + log2) + ) + assert 2 == log2.count( + "Event Denied: scopes=['network'] EventType=boot-legacy" + ) + assert 2 == log2.count( + "No network config applied. Neither a new instance" + " nor datasource network update allowed" + ) + + assert 'dummy0' in client.execute('ls /sys/class/net') + + +def _test_network_config_applied_on_reboot(client: IntegrationInstance): + log = client.read_from_file('/var/log/cloud-init.log') + assert 'Applying network configuration' in log + assert 'dummy0' not in client.execute('ls /sys/class/net') + + _add_dummy_bridge_to_netplan(client) + client.execute('rm /var/log/cloud-init.log') + client.restart() + log = client.read_from_file('/var/log/cloud-init.log') + + assert 'Event Allowed: scope=network EventType=boot' in log + assert 'Applying network configuration' in log + assert 'dummy0' not in client.execute('ls /sys/class/net') + + +@pytest.mark.azure +@pytest.mark.not_xenial +def test_boot_event_enabled_by_default(client: IntegrationInstance): + _test_network_config_applied_on_reboot(client) + + +USER_DATA = """\ +#cloud-config +updates: + network: + when: [boot] +""" + + +@pytest.mark.not_xenial +@pytest.mark.user_data(USER_DATA) +def test_boot_event_enabled(client: IntegrationInstance): + _test_network_config_applied_on_reboot(client) diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 742d1faa..54e06119 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -3163,8 +3163,8 @@ class TestRemoveUbuntuNetworkConfigScripts(CiTestCase): expected_logs = [ 'INFO: Removing Ubuntu extended network scripts because cloud-init' - ' updates Azure network configuration on the following event:' - ' System boot.', + ' updates Azure network configuration on the following events:' + " ['boot', 'boot-legacy']", 'Recursively deleting %s' % subdir, 'Attempting to remove %s' % file1] for log in expected_logs: diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 5847a384..9c499672 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -29,7 +29,7 @@ from cloudinit.sources.DataSourceSmartOS import ( convert_smartos_network_data as convert_net, SMARTOS_ENV_KVM, SERIAL_DEVICE, get_smartos_environ, identify_file) -from cloudinit.event import EventType +from cloudinit.event import EventScope, EventType from cloudinit import helpers as c_helpers from cloudinit.util import (b64e, write_file) @@ -653,8 +653,12 @@ class TestSmartOSDataSource(FilesystemMockingTestCase): def test_reconfig_network_on_boot(self): # Test to ensure that network is configured from metadata on each boot dsrc = self._get_ds(mockdata=MOCK_RETURNS) - self.assertSetEqual(set([EventType.BOOT_NEW_INSTANCE, EventType.BOOT]), - dsrc.update_events['network']) + self.assertSetEqual( + {EventType.BOOT_NEW_INSTANCE, + EventType.BOOT, + EventType.BOOT_LEGACY}, + dsrc.default_update_events[EventScope.NETWORK] + ) class TestIdentifyFile(CiTestCase): diff --git a/tox.ini b/tox.ini index bf8cb78b..a2981b98 100644 --- a/tox.ini +++ b/tox.ini @@ -174,7 +174,7 @@ markers = gce: test will only run on GCE platform azure: test will only run on Azure platform oci: test will only run on OCI platform - openstack: test will only run on openstack + openstack: test will only run on openstack platform lxd_config_dict: set the config_dict passed on LXD instance creation lxd_container: test will only run in LXD container lxd_use_exec: `execute` will use `lxc exec` instead of SSH -- cgit v1.2.3 From 57964125205790f7803dbebfadc6c52ed195c6bf Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 14 May 2021 12:48:52 -0500 Subject: Fix unit tests breaking from new httpretty version (#903) httpretty now logs all requests by default which gets mixed up with our logging tests. Also we were incorrectly setting a logging level to 'None', which now also causes issues with the new httpretty version. See https://github.com/gabrielfalcao/HTTPretty/pull/419 --- cloudinit/tests/helpers.py | 5 ++++- cloudinit/tests/test_url_helper.py | 4 ++++ 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 58f63b69..ccd56793 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -171,7 +171,7 @@ class CiTestCase(TestCase): if self.with_logs: # Remove the handler we setup logging.getLogger().handlers = self.old_handlers - logging.getLogger().level = None + logging.getLogger().setLevel(logging.NOTSET) subp.subp = _real_subp super(CiTestCase, self).tearDown() @@ -360,6 +360,9 @@ class HttprettyTestCase(CiTestCase): httpretty.HTTPretty.allow_net_connect = False httpretty.reset() httpretty.enable() + # Stop the logging from HttpPretty so our logs don't get mixed + # up with its logs + logging.getLogger('httpretty.core').setLevel(logging.CRITICAL) def tearDown(self): httpretty.disable() diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py index 364ec822..c3918f80 100644 --- a/cloudinit/tests/test_url_helper.py +++ b/cloudinit/tests/test_url_helper.py @@ -8,6 +8,7 @@ from cloudinit import util from cloudinit import version import httpretty +import logging import requests @@ -81,6 +82,9 @@ class TestReadFileOrUrl(CiTestCase): url = 'http://hostname/path' headers = {'sensitive': 'sekret', 'server': 'blah'} httpretty.register_uri(httpretty.GET, url) + # By default, httpretty will log our request along with the header, + # so if we don't change this the secret will show up in the logs + logging.getLogger('httpretty.core').setLevel(logging.CRITICAL) read_file_or_url(url, headers=headers, headers_redact=['sensitive']) logs = self.logs.getvalue() -- cgit v1.2.3 From 6fe1983777663a1a1136fd73dc50244f2d030be8 Mon Sep 17 00:00:00 2001 From: Gonéri Le Bouder Date: Fri, 14 May 2021 15:58:47 -0400 Subject: BSD: static network, set the mtu (#894) In the case of a static network, we now set the MTU according to the meta-data. --- cloudinit/net/bsd.py | 1 + cloudinit/net/freebsd.py | 11 ++++++----- cloudinit/net/netbsd.py | 8 +++++--- cloudinit/net/openbsd.py | 6 +++++- 4 files changed, 17 insertions(+), 9 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/net/bsd.py b/cloudinit/net/bsd.py index e34e0454..aab968a8 100644 --- a/cloudinit/net/bsd.py +++ b/cloudinit/net/bsd.py @@ -76,6 +76,7 @@ class BSDRenderer(renderer.Renderer): self.interface_configurations[device_name] = { 'address': subnet.get('address'), 'netmask': subnet.get('netmask'), + 'mtu': subnet.get('mtu'), } def _route_entries(self, settings, target=None): diff --git a/cloudinit/net/freebsd.py b/cloudinit/net/freebsd.py index 0285dfec..c843d792 100644 --- a/cloudinit/net/freebsd.py +++ b/cloudinit/net/freebsd.py @@ -19,12 +19,13 @@ class Renderer(cloudinit.net.bsd.BSDRenderer): def write_config(self): for device_name, v in self.interface_configurations.items(): + net_config = 'DHCP' if isinstance(v, dict): - self.set_rc_config_value( - 'ifconfig_' + device_name, - v.get('address') + ' netmask ' + v.get('netmask')) - else: - self.set_rc_config_value('ifconfig_' + device_name, 'DHCP') + net_config = v.get('address') + ' netmask ' + v.get('netmask') + mtu = v.get('mtu') + if mtu: + net_config += (' mtu %d' % mtu) + self.set_rc_config_value('ifconfig_' + device_name, net_config) def start_services(self, run=False): if not run: diff --git a/cloudinit/net/netbsd.py b/cloudinit/net/netbsd.py index 71b38ee6..5f8881a5 100644 --- a/cloudinit/net/netbsd.py +++ b/cloudinit/net/netbsd.py @@ -22,9 +22,11 @@ class Renderer(cloudinit.net.bsd.BSDRenderer): ) for device_name, v in self.interface_configurations.items(): if isinstance(v, dict): - self.set_rc_config_value( - 'ifconfig_' + device_name, - v.get('address') + ' netmask ' + v.get('netmask')) + net_config = v.get('address') + ' netmask ' + v.get('netmask') + mtu = v.get('mtu') + if mtu: + net_config += (' mtu %d' % mtu) + self.set_rc_config_value('ifconfig_' + device_name, net_config) def start_services(self, run=False): if not run: diff --git a/cloudinit/net/openbsd.py b/cloudinit/net/openbsd.py index 166d77e6..9ec7ee9e 100644 --- a/cloudinit/net/openbsd.py +++ b/cloudinit/net/openbsd.py @@ -18,7 +18,7 @@ class Renderer(cloudinit.net.bsd.BSDRenderer): content = 'dhcp\n' elif isinstance(v, dict): try: - content = "inet {address} {netmask}\n".format( + content = "inet {address} {netmask}".format( address=v['address'], netmask=v['netmask'] ) @@ -26,6 +26,10 @@ class Renderer(cloudinit.net.bsd.BSDRenderer): LOG.error( "Invalid static configuration for %s", device_name) + mtu = v.get("mtu") + if mtu: + content += (' mtu %d' % mtu) + content += "\n" util.write_file(fn, content) def start_services(self, run=False): -- cgit v1.2.3 From 1793b8b70ca2e3587c271155033ef943207136ae Mon Sep 17 00:00:00 2001 From: Paul Goins Date: Tue, 18 May 2021 17:02:51 +0000 Subject: Added support for importing keys via primary/security mirror clauses (#882) Presently, mirror keys cannot be associated with primary/security mirrors. Unfortunately, this prevents use of Landscape-managed package mirrors as the mirror key for the Landscape-hosted repository cannot be provided. This patch allows the same key-related fields usable on "sources" entries to be used on the "primary" and "security" entries as well. LP: #1925395 --- cloudinit/config/cc_apt_configure.py | 26 ++++++++++++++++++++++ doc/examples/cloud-config-apt.txt | 6 +++++ .../test_handler/test_handler_apt_source_v3.py | 23 +++++++++++++++++++ 3 files changed, 55 insertions(+) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index bb8a1278..0c9c7925 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -57,6 +57,15 @@ mirror_property = { }, 'search_dns': { 'type': 'boolean', + }, + 'keyid': { + 'type': 'string' + }, + 'key': { + 'type': 'string' + }, + 'keyserver': { + 'type': 'string' } } } @@ -228,6 +237,15 @@ schema = { key, the search pattern will be ``-security-mirror``. + Each mirror may also specify a key to import via + any of the following optional keys: + + - ``keyid``: a key to import via shortid or \ + fingerprint. + - ``key``: a raw PGP key. + - ``keyserver``: alternate keyserver to pull \ + ``keyid`` key from. + If no mirrors are specified, or all lookups fail, then default mirrors defined in the datasource are used. If none are present in the datasource @@ -453,6 +471,7 @@ def apply_apt(cfg, cloud, target): LOG.debug("Apt Mirror info: %s", mirrors) if util.is_false(cfg.get('preserve_sources_list', False)): + add_mirror_keys(cfg, target) generate_sources_list(cfg, release, mirrors, cloud) rename_apt_lists(mirrors, target, arch) @@ -660,6 +679,13 @@ def disable_suites(disabled, src, release): return retsrc +def add_mirror_keys(cfg, target): + """Adds any keys included in the primary/security mirror clauses""" + for key in ('primary', 'security'): + for mirror in cfg.get(key, []): + add_apt_key(mirror, target) + + def generate_sources_list(cfg, release, mirrors, cloud): """generate_sources_list create a source.list file based on a custom or default template diff --git a/doc/examples/cloud-config-apt.txt b/doc/examples/cloud-config-apt.txt index 004894b7..f4392326 100644 --- a/doc/examples/cloud-config-apt.txt +++ b/doc/examples/cloud-config-apt.txt @@ -138,6 +138,12 @@ apt: # the first defining a valid mirror wins (in the order as defined here, # not the order as listed in the config). # + # Additionally, if the repository requires a custom signing key, it can be + # specified via the same fields as for custom sources: + # 'keyid': providing a key to import via shortid or fingerprint + # 'key': providing a raw PGP key + # 'keyserver': specify an alternate keyserver to pull keys from that + # were specified by keyid - arches: [s390x, arm64] # as above, allowing to have one config for different per arch mirrors # security is optional, if not defined it is set to the same value as primary diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py index ac847238..abb0a9b6 100644 --- a/tests/unittests/test_handler/test_handler_apt_source_v3.py +++ b/tests/unittests/test_handler/test_handler_apt_source_v3.py @@ -1009,6 +1009,29 @@ deb http://ubuntu.com/ubuntu/ xenial-proposed main""") self.assertEqual(mirrors['SECURITY'], smir) + def test_apt_v3_add_mirror_keys(self): + """test_apt_v3_add_mirror_keys - Test adding key for mirrors""" + arch = 'amd64' + cfg = { + 'primary': [ + {'arches': [arch], + 'uri': 'http://test.ubuntu.com/', + 'key': 'fakekey_primary'}], + 'security': [ + {'arches': [arch], + 'uri': 'http://testsec.ubuntu.com/', + 'key': 'fakekey_security'}] + } + + with mock.patch.object(cc_apt_configure, + 'add_apt_key_raw') as mockadd: + cc_apt_configure.add_mirror_keys(cfg, TARGET) + calls = [ + mock.call('fakekey_primary', TARGET), + mock.call('fakekey_security', TARGET), + ] + mockadd.assert_has_calls(calls, any_order=True) + class TestDebconfSelections(TestCase): -- cgit v1.2.3 From 7c1d27b8836c266f6db1e179b98ef32effeb750e Mon Sep 17 00:00:00 2001 From: Louis Abel Date: Tue, 25 May 2021 06:25:41 -0700 Subject: Add Rocky Linux support to cloud-init (#906) Rocky Linux is a RHEL-compatible distribution so all changes that have been made should be trivial. --- README.md | 2 +- cloudinit/config/cc_ntp.py | 2 +- cloudinit/config/cc_yum_add_repo.py | 4 ++-- cloudinit/distros/__init__.py | 2 +- cloudinit/distros/rocky.py | 9 +++++++++ cloudinit/net/sysconfig.py | 2 +- cloudinit/tests/.test_util.py.swp | Bin 0 -> 16384 bytes cloudinit/tests/test_util.py | 35 ++++++++++++++++++++++++++++++++++- cloudinit/util.py | 2 +- config/cloud.cfg.tmpl | 7 ++++--- systemd/cloud-init-generator.tmpl | 2 +- systemd/cloud-init.service.tmpl | 2 +- tests/unittests/test_cli.py | 3 ++- tools/.github-cla-signers | 1 + tools/render-cloudcfg | 4 ++-- 15 files changed, 61 insertions(+), 16 deletions(-) create mode 100644 cloudinit/distros/rocky.py create mode 100644 cloudinit/tests/.test_util.py.swp (limited to 'cloudinit') diff --git a/README.md b/README.md index 01fd3b07..bf232eba 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ get in contact with that distribution and send them our way! | Supported OSes | Supported Public Clouds | Supported Private Clouds | | --- | --- | --- | -| Alpine Linux
ArchLinux
Debian
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS/AlmaLinux
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
Digital Ocean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| +| Alpine Linux
ArchLinux
Debian
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS/AlmaLinux/Rocky
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
Digital Ocean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| ## To start developing cloud-init diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index 41c278ff..70c24610 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -25,7 +25,7 @@ frequency = PER_INSTANCE NTP_CONF = '/etc/ntp.conf' NR_POOL_SERVERS = 4 distros = ['almalinux', 'alpine', 'centos', 'debian', 'fedora', 'opensuse', - 'rhel', 'sles', 'ubuntu'] + 'rhel', 'rocky', 'sles', 'ubuntu'] NTP_CLIENT_CONFIG = { 'chrony': { diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index db513ed7..7daa6bd9 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -18,7 +18,7 @@ entry, the config entry will be skipped. **Module frequency:** per always -**Supported distros:** almalinux, centos, fedora, rhel +**Supported distros:** almalinux, centos, fedora, rhel, rocky **Config keys**:: @@ -36,7 +36,7 @@ from configparser import ConfigParser from cloudinit import util -distros = ['almalinux', 'centos', 'fedora', 'rhel'] +distros = ['almalinux', 'centos', 'fedora', 'rhel', 'rocky'] def _canonicalize_id(repo_id): diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 107b928c..57e33621 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -46,7 +46,7 @@ OSFAMILIES = { 'debian': ['debian', 'ubuntu'], 'freebsd': ['freebsd'], 'gentoo': ['gentoo'], - 'redhat': ['almalinux', 'amazon', 'centos', 'fedora', 'rhel'], + 'redhat': ['almalinux', 'amazon', 'centos', 'fedora', 'rhel', 'rocky'], 'suse': ['opensuse', 'sles'], } diff --git a/cloudinit/distros/rocky.py b/cloudinit/distros/rocky.py new file mode 100644 index 00000000..edb3165d --- /dev/null +++ b/cloudinit/distros/rocky.py @@ -0,0 +1,9 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.distros import rhel + + +class Distro(rhel.Distro): + pass + +# vi: ts=4 expandtab diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 089b44b2..3a433c99 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -19,7 +19,7 @@ from .network_state import ( LOG = logging.getLogger(__name__) NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf" -KNOWN_DISTROS = ['almalinux', 'centos', 'fedora', 'rhel', 'suse'] +KNOWN_DISTROS = ['almalinux', 'centos', 'fedora', 'rhel', 'rocky', 'suse'] def _make_header(sep='#'): diff --git a/cloudinit/tests/.test_util.py.swp b/cloudinit/tests/.test_util.py.swp new file mode 100644 index 00000000..78ef5865 Binary files /dev/null and b/cloudinit/tests/.test_util.py.swp differ diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index a4c02877..f9bc31be 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -124,6 +124,22 @@ OS_RELEASE_ALMALINUX_8 = dedent("""\ ALMALINUX_MANTISBT_PROJECT_VERSION="8.3" """) +OS_RELEASE_ROCKY_8 = dedent("""\ + NAME="Rocky Linux" + VERSION="8.3 (Green Obsidian)" + ID="rocky" + ID_LIKE="rhel fedora" + VERSION_ID="8.3" + PLATFORM_ID="platform:el8" + PRETTY_NAME="Rocky Linux 8.3 (Green Obsidian)" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:rocky:rocky:8" + HOME_URL="https://rockylinux.org/" + BUG_REPORT_URL="https://bugs.rockylinux.org/" + ROCKY_SUPPORT_PRODUCT="Rocky Linux" + ROCKY_SUPPORT_PRODUCT_VERSION="8" +""") + REDHAT_RELEASE_CENTOS_6 = "CentOS release 6.10 (Final)" REDHAT_RELEASE_CENTOS_7 = "CentOS Linux release 7.5.1804 (Core)" REDHAT_RELEASE_REDHAT_6 = ( @@ -132,7 +148,8 @@ REDHAT_RELEASE_REDHAT_7 = ( "Red Hat Enterprise Linux Server release 7.5 (Maipo)") REDHAT_RELEASE_ALMALINUX_8 = ( "AlmaLinux release 8.3 (Purple Manul)") - +REDHAT_RELEASE_ROCKY_8 = ( + "Rocky Linux release 8.3 (Green Obsidian)") OS_RELEASE_DEBIAN = dedent("""\ PRETTY_NAME="Debian GNU/Linux 9 (stretch)" @@ -537,6 +554,22 @@ class TestGetLinuxDistro(CiTestCase): dist = util.get_linux_distro() self.assertEqual(('almalinux', '8.3', 'Purple Manul'), dist) + @mock.patch('cloudinit.util.load_file') + def test_get_linux_rocky8_rhrelease(self, m_os_release, m_path_exists): + """Verify rocky linux 8 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_ROCKY_8 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('rocky', '8.3', 'Green Obsidian'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_rocky8_osrelease(self, m_os_release, m_path_exists): + """Verify rocky linux 8 read from os-release.""" + m_os_release.return_value = OS_RELEASE_ROCKY_8 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('rocky', '8.3', 'Green Obsidian'), dist) + @mock.patch('cloudinit.util.load_file') def test_get_linux_debian(self, m_os_release, m_path_exists): """Verify we get the correct name and release name on Debian.""" diff --git a/cloudinit/util.py b/cloudinit/util.py index fdea1181..2de1123e 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -522,7 +522,7 @@ def system_info(): linux_dist = info['dist'][0].lower() if linux_dist in ( 'almalinux', 'alpine', 'arch', 'centos', 'debian', 'fedora', - 'rhel', 'suse'): + 'rhel', 'rocky', 'suse'): var = linux_dist elif linux_dist in ('ubuntu', 'linuxmint', 'mint'): var = 'ubuntu' diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 8656daa7..2f6c3a7d 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -21,7 +21,8 @@ disable_root: false disable_root: true {% endif %} -{% if variant in ["almalinux", "alpine", "amazon", "centos", "fedora", "rhel"] %} +{% if variant in ["almalinux", "alpine", "amazon", "centos", "fedora", + "rhel", "rocky"] %} mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2'] {% if variant == "amazon" %} resize_rootfs: noblock @@ -154,7 +155,7 @@ cloud_final_modules: system_info: # This will affect which distro class gets used {% if variant in ["almalinux", "alpine", "amazon", "arch", "centos", "debian", - "fedora", "freebsd", "netbsd", "openbsd", "rhel", + "fedora", "freebsd", "netbsd", "openbsd", "rhel", "rocky", "suse", "ubuntu"] %} distro: {{ variant }} {% else %} @@ -207,7 +208,7 @@ system_info: security: http://ports.ubuntu.com/ubuntu-ports ssh_svcname: ssh {% elif variant in ["almalinux", "alpine", "amazon", "arch", "centos", "fedora", - "rhel", "suse"] %} + "rhel", "rocky", "suse"] %} # Default user name + that default users groups (if added/used) default_user: {% if variant == "amazon" %} diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl index 9b103ef9..0713db16 100755 --- a/systemd/cloud-init-generator.tmpl +++ b/systemd/cloud-init-generator.tmpl @@ -83,7 +83,7 @@ default() { check_for_datasource() { local ds_rc="" -{% if variant in ["almalinux", "rhel", "fedora", "centos"] %} +{% if variant in ["almalinux", "rhel", "fedora", "centos", "rocky"] %} local dsidentify="/usr/libexec/cloud-init/ds-identify" {% else %} local dsidentify="/usr/lib/cloud-init/ds-identify" diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl index a5c51277..4da1a905 100644 --- a/systemd/cloud-init.service.tmpl +++ b/systemd/cloud-init.service.tmpl @@ -10,7 +10,7 @@ After=systemd-networkd-wait-online.service {% if variant in ["ubuntu", "unknown", "debian"] %} After=networking.service {% endif %} -{% if variant in ["almalinux", "centos", "fedora", "rhel"] %} +{% if variant in ["almalinux", "centos", "fedora", "rhel", "rocky"] %} After=network.service After=NetworkManager.service {% endif %} diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index f5cf514d..fbc6ec11 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -224,7 +224,8 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): self._call_main(['cloud-init', 'devel', 'schema', '--docs', 'all']) expected_doc_sections = [ '**Supported distros:** all', - '**Supported distros:** almalinux, alpine, centos, debian, fedora', + ('**Supported distros:** almalinux, alpine, centos, debian, ' + 'fedora, opensuse, rhel, rocky, sles, ubuntu'), '**Config schema**:\n **resize_rootfs:** (true/false/noblock)', '**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n' ] diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index 6f626643..14916d31 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -35,6 +35,7 @@ manuelisimo marlluslustosa matthewruffell mitechie +nazunalika nicolasbock nishigori olivierlemasle diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg index f5990748..9ec554bd 100755 --- a/tools/render-cloudcfg +++ b/tools/render-cloudcfg @@ -5,8 +5,8 @@ import os import sys VARIANTS = ["almalinux", "alpine", "amazon", "arch", "centos", "debian", - "fedora", "freebsd", "netbsd", "openbsd", "rhel", "suse", "ubuntu", - "unknown"] + "fedora", "freebsd", "netbsd", "openbsd", "rhel", "suse", "rocky", + "ubuntu", "unknown"] if "avoid-pep8-E402-import-not-top-of-file": -- cgit v1.2.3 From fc161f8e3e35883637ef12b27bfb1eacbfce6277 Mon Sep 17 00:00:00 2001 From: Gonéri Le Bouder Date: Tue, 25 May 2021 15:48:07 -0400 Subject: openbsd/net: flush the route table on net restart (#908) Ensure we've got a clean environment before we restart the network. In some cases, the `sh /etc/netstart` is not enough to restart the network. A previous default route remains in the route table and as a result the network is broken. Also `sh /netstart` does not kill `dhclient`. The problen happens for instance with OVH OpenStack SBG3. --- cloudinit/net/openbsd.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'cloudinit') diff --git a/cloudinit/net/openbsd.py b/cloudinit/net/openbsd.py index 9ec7ee9e..d87d8a4f 100644 --- a/cloudinit/net/openbsd.py +++ b/cloudinit/net/openbsd.py @@ -36,6 +36,9 @@ class Renderer(cloudinit.net.bsd.BSDRenderer): if not self._postcmds: LOG.debug("openbsd generate postcmd disabled") return + subp.subp(['pkill', 'dhclient'], capture=True, rcs=[0, 1]) + subp.subp(['route', 'del', 'default'], capture=True, rcs=[0, 1]) + subp.subp(['route', 'flush', 'default'], capture=True, rcs=[0, 1]) subp.subp(['sh', '/etc/netstart'], capture=True) def set_route(self, network, netmask, gateway): -- cgit v1.2.3 From 503e2d398660e8af5d49bdf6944a50ad793a3a31 Mon Sep 17 00:00:00 2001 From: eb3095 <45504889+eb3095@users.noreply.github.com> Date: Tue, 1 Jun 2021 18:30:00 -0400 Subject: Allow braces to appear in dhclient output (#911) dhclient output that contains brackets for pxe variables will break the dhclient parsing regex line. This fix retains the current functionality while fixing this particular issue. --- cloudinit/net/dhcp.py | 2 +- cloudinit/net/tests/test_dhcp.py | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py index 4394c68b..9b94c9a0 100644 --- a/cloudinit/net/dhcp.py +++ b/cloudinit/net/dhcp.py @@ -173,7 +173,7 @@ def parse_dhcp_lease_file(lease_file): @raises: InvalidDHCPLeaseFileError on empty of unparseable leasefile content. """ - lease_regex = re.compile(r"lease {(?P[^}]*)}\n") + lease_regex = re.compile(r"lease {(?P.*?)}\n", re.DOTALL) dhcp_leases = [] lease_content = util.load_file(lease_file) if len(lease_content) == 0: diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py index 6f9a02de..5ae048e2 100644 --- a/cloudinit/net/tests/test_dhcp.py +++ b/cloudinit/net/tests/test_dhcp.py @@ -42,6 +42,7 @@ class TestParseDHCPLeasesFile(CiTestCase): lease { interface "wlp3s0"; fixed-address 192.168.2.74; + filename "http://192.168.2.50/boot.php?mac=${netX}"; option subnet-mask 255.255.255.0; option routers 192.168.2.1; renew 4 2017/07/27 18:02:30; @@ -50,6 +51,7 @@ class TestParseDHCPLeasesFile(CiTestCase): lease { interface "wlp3s0"; fixed-address 192.168.2.74; + filename "http://192.168.2.50/boot.php?mac=${netX}"; option subnet-mask 255.255.255.0; option routers 192.168.2.1; } @@ -58,8 +60,10 @@ class TestParseDHCPLeasesFile(CiTestCase): {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1', 'renew': '4 2017/07/27 18:02:30', - 'expire': '5 2017/07/28 07:08:15'}, + 'expire': '5 2017/07/28 07:08:15', + 'filename': 'http://192.168.2.50/boot.php?mac=${netX}'}, {'interface': 'wlp3s0', 'fixed-address': '192.168.2.74', + 'filename': 'http://192.168.2.50/boot.php?mac=${netX}', 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}] write_file(lease_file, content) self.assertCountEqual(expected, parse_dhcp_lease_file(lease_file)) -- cgit v1.2.3 From 29ac50f2b9e7634fc59fc161d77d27e970ae8080 Mon Sep 17 00:00:00 2001 From: Robert Schweikert Date: Wed, 2 Jun 2021 17:10:32 -0400 Subject: - Create the log file with 640 permissions (#858) Security scanners are often simple minded and complain on arbitrary settings such as file permissions. For /var/log/* having world read is one of these cases. --- cloudinit/stages.py | 2 +- cloudinit/tests/test_stages.py | 24 ++++++++++++------------ 2 files changed, 13 insertions(+), 13 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/stages.py b/cloudinit/stages.py index bbded1e9..3688be2e 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -156,7 +156,7 @@ class Init(object): util.ensure_dirs(self._initial_subdirs()) log_file = util.get_cfg_option_str(self.cfg, 'def_log_file') if log_file: - util.ensure_file(log_file, preserve_mode=True) + util.ensure_file(log_file, mode=0o640, preserve_mode=True) perms = self.cfg.get('syslog_fix_perms') if not perms: perms = {} diff --git a/cloudinit/tests/test_stages.py b/cloudinit/tests/test_stages.py index a06a2bde..a50836a4 100644 --- a/cloudinit/tests/test_stages.py +++ b/cloudinit/tests/test_stages.py @@ -428,22 +428,20 @@ class TestInit_InitializeFilesystem: """A fixture which yields a stages.Init instance with paths and cfg set As it is replaced with a mock, consumers of this fixture can set - `init.cfg` if the default empty dict configuration is not appropriate. + `init._cfg` if the default empty dict configuration is not appropriate. """ - with mock.patch( - "cloudinit.stages.Init.cfg", mock.PropertyMock(return_value={}) - ): - with mock.patch("cloudinit.stages.util.ensure_dirs"): - init = stages.Init() - init._paths = paths - yield init + with mock.patch("cloudinit.stages.util.ensure_dirs"): + init = stages.Init() + init._cfg = {} + init._paths = paths + yield init @mock.patch("cloudinit.stages.util.ensure_file") def test_ensure_file_not_called_if_no_log_file_configured( self, m_ensure_file, init ): """If no log file is configured, we should not ensure its existence.""" - init.cfg = {} + init._cfg = {} init._initialize_filesystem() @@ -452,11 +450,13 @@ class TestInit_InitializeFilesystem: def test_log_files_existence_is_ensured_if_configured(self, init, tmpdir): """If a log file is configured, we should ensure its existence.""" log_file = tmpdir.join("cloud-init.log") - init.cfg = {"def_log_file": str(log_file)} + init._cfg = {"def_log_file": str(log_file)} init._initialize_filesystem() - assert log_file.exists + assert log_file.exists() + # Assert we create it 0o640 by default if it doesn't already exist + assert 0o640 == stat.S_IMODE(log_file.stat().mode) def test_existing_file_permissions_are_not_modified(self, init, tmpdir): """If the log file already exists, we should not modify its permissions @@ -469,7 +469,7 @@ class TestInit_InitializeFilesystem: log_file = tmpdir.join("cloud-init.log") log_file.ensure() log_file.chmod(mode) - init.cfg = {"def_log_file": str(log_file)} + init._cfg = {"def_log_file": str(log_file)} init._initialize_filesystem() -- cgit v1.2.3 From 05b0e35026db3789c56ee9f8192d4a81067325e5 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 10 Jun 2021 14:24:51 -0500 Subject: Use instance-data-sensitive.json in jinja templates (SC-117) (#917) instance-data.json redacts sensitive data for non-root users. Since user data is consumed as root, we should be consuming the non-redacted data instead. LP: #1931392 --- cloudinit/handlers/jinja_template.py | 5 ++-- doc/rtd/topics/instancedata.rst | 41 +++++++++++++++++++++++++++++--- tests/unittests/test_builtin_handlers.py | 30 +++++++++++++---------- 3 files changed, 58 insertions(+), 18 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/handlers/jinja_template.py b/cloudinit/handlers/jinja_template.py index aadfbf86..5033abbb 100644 --- a/cloudinit/handlers/jinja_template.py +++ b/cloudinit/handlers/jinja_template.py @@ -12,7 +12,7 @@ except ImportError: from cloudinit import handlers from cloudinit import log as logging -from cloudinit.sources import INSTANCE_JSON_FILE +from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE from cloudinit.templater import render_string, MISSING_JINJA_PREFIX from cloudinit.util import b64d, load_file, load_json, json_dumps @@ -36,7 +36,8 @@ class JinjaTemplatePartHandler(handlers.Handler): def handle_part(self, data, ctype, filename, payload, frequency, headers): if ctype in handlers.CONTENT_SIGNALS: return - jinja_json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE) + jinja_json_file = os.path.join( + self.paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE) rendered_payload = render_jinja_payload_from_file( payload, filename, jinja_json_file) if not rendered_payload: diff --git a/doc/rtd/topics/instancedata.rst b/doc/rtd/topics/instancedata.rst index 1850982c..6c17139f 100644 --- a/doc/rtd/topics/instancedata.rst +++ b/doc/rtd/topics/instancedata.rst @@ -509,14 +509,19 @@ EC2 instance: Using instance-data =================== -As of cloud-init v. 18.4, any variables present in -``/run/cloud-init/instance-data.json`` can be used in: +As of cloud-init v. 18.4, any instance-data can be used in: * User-data scripts * Cloud config data * Command line interface via **cloud-init query** or **cloud-init devel render** +This means that any variable present in +``/run/cloud-init/instance-data-sensitive.json`` can be used, +unless a non-root user is using the command line interface. +In the non-root user case, +``/run/cloud-init/instance-data.json`` will be used instead. + Many clouds allow users to provide user-data to an instance at the time the instance is launched. Cloud-init supports a number of :ref:`user_data_formats`. @@ -559,9 +564,39 @@ Below are some examples of providing these types of user-data: {%- endif %} ... +One way to easily explore what Jinja variables are available on your machine +is to use the ``cloud-init query --format`` (-f) commandline option which will +render any Jinja syntax you use. Warnings or exceptions will be raised on +invalid instance-data keys, paths or invalid syntax. + +.. code-block:: shell-session + + # List all instance-data keys and values as root user + % sudo cloud-init query --all + {...} + + # Introspect nested keys on an object + % cloud-init query -f "{{ds.keys()}}" + dict_keys(['meta_data', '_doc']) + + # Test your Jinja rendering syntax on the command-line directly + + # Failure to reference valid top-level instance-data key + % cloud-init query -f "{{invalid.instance-data.key}}" + WARNING: Ignoring jinja template for query commandline: 'invalid' is undefined + + # Failure to reference valid dot-delimited key path on a known top-level key + % cloud-init query -f "{{v1.not_here}}" + WARNING: Could not render jinja template variables in file 'query commandline': 'not_here' + CI_MISSING_JINJA_VAR/not_here + + # Test expected value using valid instance-data key path + % cloud-init query -f "My AMI: {{ds.meta_data.ami_id}}" + My AMI: ami-0fecc35d3c8ba8d60 + .. note:: Trying to reference jinja variables that don't exist in - instance-data.json will result in warnings in ``/var/log/cloud-init.log`` + instance-data will result in warnings in ``/var/log/cloud-init.log`` and the following string in your rendered user-data: ``CI_MISSING_JINJA_VAR/``. diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py index c5675249..30293e9e 100644 --- a/tests/unittests/test_builtin_handlers.py +++ b/tests/unittests/test_builtin_handlers.py @@ -27,6 +27,8 @@ from cloudinit.handlers.upstart_job import UpstartJobPartHandler from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE) +INSTANCE_DATA_FILE = 'instance-data-sensitive.json' + class TestUpstartJobPartHandler(FilesystemMockingTestCase): @@ -145,8 +147,8 @@ class TestJinjaTemplatePartHandler(CiTestCase): script_handler = ShellScriptPartHandler(self.paths) self.assertEqual(2, script_handler.handler_version) - # Create required instance-data.json file - instance_json = os.path.join(self.run_dir, 'instance-data.json') + # Create required instance data json file + instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE) instance_data = {'topkey': 'echo himom'} util.write_file(instance_json, util.json_dumps(instance_data)) h = JinjaTemplatePartHandler( @@ -168,7 +170,7 @@ class TestJinjaTemplatePartHandler(CiTestCase): self.assertEqual(3, cloudcfg_handler.handler_version) # Create required instance-data.json file - instance_json = os.path.join(self.run_dir, 'instance-data.json') + instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE) instance_data = {'topkey': {'sub': 'runcmd: [echo hi]'}} util.write_file(instance_json, util.json_dumps(instance_data)) h = JinjaTemplatePartHandler( @@ -198,8 +200,9 @@ class TestJinjaTemplatePartHandler(CiTestCase): script_file = os.path.join(script_handler.script_dir, 'part01') self.assertEqual( 'Cannot render jinja template vars. Instance data not yet present' - ' at {}/instance-data.json'.format( - self.run_dir), str(context_manager.exception)) + ' at {}/{}'.format(self.run_dir, INSTANCE_DATA_FILE), + str(context_manager.exception) + ) self.assertFalse( os.path.exists(script_file), 'Unexpected file created %s' % script_file) @@ -207,7 +210,8 @@ class TestJinjaTemplatePartHandler(CiTestCase): def test_jinja_template_handle_errors_on_unreadable_instance_data(self): """If instance-data is unreadable, raise an error from handle_part.""" script_handler = ShellScriptPartHandler(self.paths) - instance_json = os.path.join(self.run_dir, 'instance-data.json') + instance_json = os.path.join( + self.run_dir, INSTANCE_DATA_FILE) util.write_file(instance_json, util.json_dumps({})) h = JinjaTemplatePartHandler( self.paths, sub_handlers=[script_handler]) @@ -221,8 +225,8 @@ class TestJinjaTemplatePartHandler(CiTestCase): frequency='freq', headers='headers') script_file = os.path.join(script_handler.script_dir, 'part01') self.assertEqual( - 'Cannot render jinja template vars. No read permission on' - " '{rdir}/instance-data.json'. Try sudo".format(rdir=self.run_dir), + "Cannot render jinja template vars. No read permission on " + "'{}/{}'. Try sudo".format(self.run_dir, INSTANCE_DATA_FILE), str(context_manager.exception)) self.assertFalse( os.path.exists(script_file), @@ -230,9 +234,9 @@ class TestJinjaTemplatePartHandler(CiTestCase): @skipUnlessJinja() def test_jinja_template_handle_renders_jinja_content(self): - """When present, render jinja variables from instance-data.json.""" + """When present, render jinja variables from instance data""" script_handler = ShellScriptPartHandler(self.paths) - instance_json = os.path.join(self.run_dir, 'instance-data.json') + instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE) instance_data = {'topkey': {'subkey': 'echo himom'}} util.write_file(instance_json, util.json_dumps(instance_data)) h = JinjaTemplatePartHandler( @@ -247,8 +251,8 @@ class TestJinjaTemplatePartHandler(CiTestCase): frequency='freq', headers='headers') script_file = os.path.join(script_handler.script_dir, 'part01') self.assertNotIn( - 'Instance data not yet present at {}/instance-data.json'.format( - self.run_dir), + 'Instance data not yet present at {}/{}'.format( + self.run_dir, INSTANCE_DATA_FILE), self.logs.getvalue()) self.assertEqual( '#!/bin/bash\necho himom', util.load_file(script_file)) @@ -257,7 +261,7 @@ class TestJinjaTemplatePartHandler(CiTestCase): def test_jinja_template_handle_renders_jinja_content_missing_keys(self): """When specified jinja variable is undefined, log a warning.""" script_handler = ShellScriptPartHandler(self.paths) - instance_json = os.path.join(self.run_dir, 'instance-data.json') + instance_json = os.path.join(self.run_dir, INSTANCE_DATA_FILE) instance_data = {'topkey': {'subkey': 'echo himom'}} util.write_file(instance_json, util.json_dumps(instance_data)) h = JinjaTemplatePartHandler( -- cgit v1.2.3 From 59a848c5929cbfca45d95860eb60dfebd0786c94 Mon Sep 17 00:00:00 2001 From: Gonéri Le Bouder Date: Mon, 14 Jun 2021 15:39:05 -0400 Subject: add DragonFlyBSD support (#904) - Mostly based on FreeBSD, the main exception is that `find_devs_with_on_freebsd` does not work. - Since we cannot get the CDROM or the partition labels, `find_devs_with_on_dragonflybsd()` has a more naive approach and returns all the block devices. --- README.md | 2 +- cloudinit/config/cc_growpart.py | 4 ++ cloudinit/config/cc_resizefs.py | 5 +++ cloudinit/distros/dragonflybsd.py | 12 ++++++ cloudinit/distros/freebsd.py | 15 +++++++- cloudinit/net/__init__.py | 6 +-- cloudinit/net/freebsd.py | 11 +++++- cloudinit/util.py | 46 ++++++++++++++++++++++- config/cloud.cfg.tmpl | 22 +++++++++-- doc/rtd/topics/availability.rst | 5 ++- setup.py | 2 +- sysvinit/freebsd/cloudinit | 2 +- tests/unittests/test_distros/test_dragonflybsd.py | 25 ++++++++++++ tests/unittests/test_util.py | 18 +++++++++ 14 files changed, 157 insertions(+), 18 deletions(-) create mode 100644 cloudinit/distros/dragonflybsd.py create mode 100644 tests/unittests/test_distros/test_dragonflybsd.py (limited to 'cloudinit') diff --git a/README.md b/README.md index c382b592..02b2f666 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ get in contact with that distribution and send them our way! | Supported OSes | Supported Public Clouds | Supported Private Clouds | | --- | --- | --- | -| Alpine Linux
ArchLinux
Debian
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS/AlmaLinux/Rocky
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
Digital Ocean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| +| Alpine Linux
ArchLinux
Debian
DragonFlyBSD
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS/AlmaLinux/Rocky
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
Digital Ocean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| ## To start developing cloud-init diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 9f338ad1..9f5525a1 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -224,6 +224,10 @@ def device_part_info(devpath): freebsd_part = "/dev/" + util.find_freebsd_part(devpath) m = re.search('^(/dev/.+)p([0-9])$', freebsd_part) return (m.group(1), m.group(2)) + elif util.is_DragonFlyBSD(): + dragonflybsd_part = "/dev/" + util.find_dragonflybsd_part(devpath) + m = re.search('^(/dev/.+)s([0-9])$', dragonflybsd_part) + return (m.group(1), m.group(2)) if not os.path.exists(syspath): raise ValueError("%s had no syspath (%s)" % (devpath, syspath)) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 9afbb847..990a6939 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -85,6 +85,10 @@ def _resize_zfs(mount_point, devpth): return ('zpool', 'online', '-e', mount_point, devpth) +def _resize_hammer2(mount_point, devpth): + return ('hammer2', 'growfs', mount_point) + + def _can_skip_resize_ufs(mount_point, devpth): # possible errors cases on the code-path to growfs -N following: # https://github.com/freebsd/freebsd/blob/HEAD/sbin/growfs/growfs.c @@ -113,6 +117,7 @@ RESIZE_FS_PREFIXES_CMDS = [ ('xfs', _resize_xfs), ('ufs', _resize_ufs), ('zfs', _resize_zfs), + ('hammer2', _resize_hammer2), ] RESIZE_FS_PRECHECK_CMDS = { diff --git a/cloudinit/distros/dragonflybsd.py b/cloudinit/distros/dragonflybsd.py new file mode 100644 index 00000000..2d825518 --- /dev/null +++ b/cloudinit/distros/dragonflybsd.py @@ -0,0 +1,12 @@ +# Copyright (C) 2020-2021 Gonéri Le Bouder +# +# This file is part of cloud-init. See LICENSE file for license information. + +import cloudinit.distros.freebsd + + +class Distro(cloudinit.distros.freebsd.Distro): + home_dir = '/home' + + +# vi: ts=4 expandtab diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 9659843f..d94a52b8 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -18,6 +18,12 @@ LOG = logging.getLogger(__name__) class Distro(cloudinit.distros.bsd.BSD): + """ + Distro subclass for FreeBSD. + + (N.B. DragonFlyBSD inherits from this class.) + """ + usr_lib_exec = '/usr/local/lib' login_conf_fn = '/etc/login.conf' login_conf_fn_bak = '/etc/login.conf.orig' @@ -28,6 +34,7 @@ class Distro(cloudinit.distros.bsd.BSD): pkg_cmd_update_prefix = ["pkg", "update"] pkg_cmd_upgrade_prefix = ["pkg", "upgrade"] prefer_fqdn = True # See rc.conf(5) in FreeBSD + home_dir = '/usr/home' def _get_add_member_to_group_cmd(self, member_name, group_name): return ['pw', 'usermod', '-n', member_name, '-G', group_name] @@ -66,9 +73,12 @@ class Distro(cloudinit.distros.bsd.BSD): pw_useradd_cmd.append('-d/nonexistent') log_pw_useradd_cmd.append('-d/nonexistent') else: - pw_useradd_cmd.append('-d/usr/home/%s' % name) + pw_useradd_cmd.append('-d{home_dir}/{name}'.format( + home_dir=self.home_dir, name=name)) pw_useradd_cmd.append('-m') - log_pw_useradd_cmd.append('-d/usr/home/%s' % name) + log_pw_useradd_cmd.append('-d{home_dir}/{name}'.format( + home_dir=self.home_dir, name=name)) + log_pw_useradd_cmd.append('-m') # Run the command @@ -155,4 +165,5 @@ class Distro(cloudinit.distros.bsd.BSD): "update-sources", self.package_command, ["update"], freq=PER_INSTANCE) + # vi: ts=4 expandtab diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 6b3b84f7..b827d41a 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -351,7 +351,7 @@ def device_devid(devname): def get_devicelist(): - if util.is_FreeBSD(): + if util.is_FreeBSD() or util.is_DragonFlyBSD(): return list(get_interfaces_by_mac().values()) try: @@ -376,7 +376,7 @@ def is_disabled_cfg(cfg): def find_fallback_nic(blacklist_drivers=None): """Return the name of the 'fallback' network device.""" - if util.is_FreeBSD(): + if util.is_FreeBSD() or util.is_DragonFlyBSD(): return find_fallback_nic_on_freebsd(blacklist_drivers) elif util.is_NetBSD() or util.is_OpenBSD(): return find_fallback_nic_on_netbsd_or_openbsd(blacklist_drivers) @@ -816,7 +816,7 @@ def get_ib_interface_hwaddr(ifname, ethernet_format): def get_interfaces_by_mac(blacklist_drivers=None) -> dict: - if util.is_FreeBSD(): + if util.is_FreeBSD() or util.is_DragonFlyBSD(): return get_interfaces_by_mac_on_freebsd( blacklist_drivers=blacklist_drivers) elif util.is_NetBSD(): diff --git a/cloudinit/net/freebsd.py b/cloudinit/net/freebsd.py index c843d792..f8faf240 100644 --- a/cloudinit/net/freebsd.py +++ b/cloudinit/net/freebsd.py @@ -32,6 +32,13 @@ class Renderer(cloudinit.net.bsd.BSDRenderer): LOG.debug("freebsd generate postcmd disabled") return + for dhcp_interface in self.dhcp_interfaces(): + # Observed on DragonFlyBSD 6. If we use the "restart" parameter, + # the routes are not recreated. + subp.subp(['service', 'dhclient', 'stop', dhcp_interface], + rcs=[0, 1], + capture=True) + subp.subp(['service', 'netif', 'restart'], capture=True) # On FreeBSD 10, the restart of routing and dhclient is likely to fail # because @@ -42,7 +49,7 @@ class Renderer(cloudinit.net.bsd.BSDRenderer): subp.subp(['service', 'routing', 'restart'], capture=True, rcs=[0, 1]) for dhcp_interface in self.dhcp_interfaces(): - subp.subp(['service', 'dhclient', 'restart', dhcp_interface], + subp.subp(['service', 'dhclient', 'start', dhcp_interface], rcs=[0, 1], capture=True) @@ -57,4 +64,4 @@ class Renderer(cloudinit.net.bsd.BSDRenderer): def available(target=None): - return util.is_FreeBSD() + return util.is_FreeBSD() or util.is_DragonFlyBSD() diff --git a/cloudinit/util.py b/cloudinit/util.py index 2de1123e..f95dc435 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -392,7 +392,11 @@ def is_Linux(): @lru_cache() def is_BSD(): - return 'BSD' in platform.system() + if 'BSD' in platform.system(): + return True + if platform.system() == 'DragonFly': + return True + return False @lru_cache() @@ -400,6 +404,11 @@ def is_FreeBSD(): return system_info()['variant'] == "freebsd" +@lru_cache() +def is_DragonFlyBSD(): + return system_info()['variant'] == "dragonfly" + + @lru_cache() def is_NetBSD(): return system_info()['variant'] == "netbsd" @@ -534,7 +543,9 @@ def system_info(): var = 'suse' else: var = 'linux' - elif system in ('windows', 'darwin', "freebsd", "netbsd", "openbsd"): + elif system in ( + 'windows', 'darwin', "freebsd", "netbsd", + "openbsd", "dragonfly"): var = system info['variant'] = var @@ -1195,6 +1206,23 @@ def find_devs_with_openbsd(criteria=None, oformat='device', return ['/dev/' + i for i in devlist] +def find_devs_with_dragonflybsd(criteria=None, oformat='device', + tag=None, no_cache=False, path=None): + out, _err = subp.subp(['sysctl', '-n', 'kern.disks'], rcs=[0]) + devlist = [i for i in sorted(out.split(), reverse=True) + if not i.startswith("md") and not i.startswith("vn")] + + if criteria == "TYPE=iso9660": + devlist = [i for i in devlist + if i.startswith('cd') or i.startswith('acd')] + elif criteria in ["LABEL=CONFIG-2", "TYPE=vfat"]: + devlist = [i for i in devlist + if not (i.startswith('cd') or i.startswith('acd'))] + elif criteria: + LOG.debug("Unexpected criteria: %s", criteria) + return ['/dev/' + i for i in devlist] + + def find_devs_with(criteria=None, oformat='device', tag=None, no_cache=False, path=None): """ @@ -1213,6 +1241,9 @@ def find_devs_with(criteria=None, oformat='device', elif is_OpenBSD(): return find_devs_with_openbsd(criteria, oformat, tag, no_cache, path) + elif is_DragonFlyBSD(): + return find_devs_with_dragonflybsd(criteria, oformat, + tag, no_cache, path) blk_id_cmd = ['blkid'] options = [] @@ -2211,6 +2242,14 @@ def find_freebsd_part(fs): LOG.warning("Unexpected input in find_freebsd_part: %s", fs) +def find_dragonflybsd_part(fs): + splitted = fs.split('/') + if len(splitted) == 3 and splitted[1] == 'dev': + return splitted[2] + else: + LOG.warning("Unexpected input in find_dragonflybsd_part: %s", fs) + + def get_path_dev_freebsd(path, mnt_list): path_found = None for line in mnt_list.split("\n"): @@ -2264,6 +2303,9 @@ def parse_mount(path): # https://regex101.com/r/T2en7a/1 regex = (r'^(/dev/[\S]+|.*zroot\S*?) on (/[\S]*) ' r'(?=(?:type)[\s]+([\S]+)|\(([^,]*))') + if is_DragonFlyBSD(): + regex = (r'^(/dev/[\S]+|\S*?) on (/[\S]*) ' + r'(?=(?:type)[\s]+([\S]+)|\(([^,]*))') for line in mount_locs: m = re.search(regex, line) if not m: diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 2f6c3a7d..586384e4 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -1,8 +1,8 @@ ## template:jinja # The top level settings are used as module # and system configuration. - -{% if variant.endswith("bsd") %} +{% set is_bsd = variant in ["dragonfly", "freebsd", "netbsd", "openbsd"] %} +{% if is_bsd %} syslog_fix_perms: root:wheel {% elif variant in ["suse"] %} syslog_fix_perms: root:root @@ -61,11 +61,11 @@ cloud_init_modules: {% endif %} - bootcmd - write-files -{% if variant not in ["netbsd"] %} +{% if variant not in ["netbsd", "openbsd"] %} - growpart - resizefs {% endif %} -{% if variant not in ["freebsd", "netbsd"] %} +{% if not is_bsd %} - disk_setup - mounts {% endif %} @@ -158,6 +158,8 @@ system_info: "fedora", "freebsd", "netbsd", "openbsd", "rhel", "rocky", "suse", "ubuntu"] %} distro: {{ variant }} +{% elif variant in ["dragonfly"] %} + distro: dragonflybsd {% else %} # Unknown/fallback distro. distro: ubuntu @@ -249,6 +251,15 @@ system_info: groups: [wheel] sudo: ["ALL=(ALL) NOPASSWD:ALL"] shell: /bin/tcsh +{% elif variant in ["dragonfly"] %} + # Default user name + that default users groups (if added/used) + default_user: + name: dragonfly + lock_passwd: True + gecos: DragonFly + groups: [wheel] + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + shell: /bin/sh {% elif variant in ["netbsd"] %} default_user: name: netbsd @@ -269,4 +280,7 @@ system_info: {% if variant in ["freebsd", "netbsd", "openbsd"] %} network: renderers: ['{{ variant }}'] +{% elif variant in ["dragonfly"] %} + network: + renderers: ['freebsd'] {% endif %} diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst index f3e13edc..e4480754 100644 --- a/doc/rtd/topics/availability.rst +++ b/doc/rtd/topics/availability.rst @@ -14,12 +14,13 @@ distributions and clouds, both public and private. Distributions ============= -Cloud-init has support across all major Linux distributions, FreeBSD, NetBSD -and OpenBSD: +Cloud-init has support across all major Linux distributions, FreeBSD, NetBSD, +OpenBSD and DragonFlyBSD: - Alpine Linux - ArchLinux - Debian +- DragonFlyBSD - Fedora - FreeBSD - Gentoo Linux diff --git a/setup.py b/setup.py index cbacf48e..dcbe0843 100755 --- a/setup.py +++ b/setup.py @@ -156,7 +156,7 @@ USR = "usr" ETC = "etc" USR_LIB_EXEC = "usr/lib" LIB = "lib" -if os.uname()[0] == 'FreeBSD': +if os.uname()[0] in ['FreeBSD', 'DragonFly']: USR = "usr/local" USR_LIB_EXEC = "usr/local/lib" elif os.path.isfile('/etc/redhat-release'): diff --git a/sysvinit/freebsd/cloudinit b/sysvinit/freebsd/cloudinit index aa5bd118..d26f3d0f 100755 --- a/sysvinit/freebsd/cloudinit +++ b/sysvinit/freebsd/cloudinit @@ -2,7 +2,7 @@ # PROVIDE: cloudinit # REQUIRE: FILESYSTEMS NETWORKING cloudinitlocal ldconfig devd -# BEFORE: cloudconfig cloudfinal +# BEFORE: LOGIN cloudconfig cloudfinal . /etc/rc.subr diff --git a/tests/unittests/test_distros/test_dragonflybsd.py b/tests/unittests/test_distros/test_dragonflybsd.py new file mode 100644 index 00000000..df2c00f4 --- /dev/null +++ b/tests/unittests/test_distros/test_dragonflybsd.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python3 + + +import cloudinit.util +from cloudinit.tests.helpers import mock + + +def test_find_dragonflybsd_part(): + assert cloudinit.util.find_dragonflybsd_part("/dev/vbd0s3") == "vbd0s3" + + +@mock.patch("cloudinit.util.is_DragonFlyBSD") +@mock.patch("cloudinit.subp.subp") +def test_parse_mount(mock_subp, m_is_DragonFlyBSD): + mount_out = """ +vbd0s3 on / (hammer2, local) +devfs on /dev (devfs, nosymfollow, local) +/dev/vbd0s0a on /boot (ufs, local) +procfs on /proc (procfs, local) +tmpfs on /var/run/shm (tmpfs, local) +""" + + mock_subp.return_value = (mount_out, "") + m_is_DragonFlyBSD.return_value = True + assert cloudinit.util.parse_mount("/") == ("vbd0s3", "hammer2", "/") diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index e5292001..2290cab7 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -999,4 +999,22 @@ class TestFindDevs: devlist = util.find_devs_with_netbsd(criteria=criteria) assert devlist == expected_devlist + @pytest.mark.parametrize( + 'criteria,expected_devlist', ( + (None, ['/dev/vbd0', '/dev/cd0', '/dev/acd0']), + ('TYPE=iso9660', ['/dev/cd0', '/dev/acd0']), + ('TYPE=vfat', ['/dev/vbd0']), + ('LABEL_FATBOOT=A_LABEL', # lp: #1841466 + ['/dev/vbd0', '/dev/cd0', '/dev/acd0']), + ) + ) + @mock.patch("cloudinit.subp.subp") + def test_find_devs_with_dragonflybsd(self, m_subp, criteria, + expected_devlist): + m_subp.return_value = ( + 'md2 md1 cd0 vbd0 acd0 vn3 vn2 vn1 vn0 md0', '' + ) + devlist = util.find_devs_with_dragonflybsd(criteria=criteria) + assert devlist == expected_devlist + # vi: ts=4 expandtab -- cgit v1.2.3 From 950c186a7e0c66a3ed84ea97291e5829ca3d826c Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 15 Jun 2021 15:25:37 -0500 Subject: Replace deprecated collections.Iterable with abc replacement (#922) LP: #1932048 --- cloudinit/log.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/log.py b/cloudinit/log.py index 2e5df042..10149907 100644 --- a/cloudinit/log.py +++ b/cloudinit/log.py @@ -8,7 +8,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import collections +import collections.abc import io import logging import logging.config @@ -78,7 +78,7 @@ def setupLogging(cfg=None): for a_cfg in cfg['log_cfgs']: if isinstance(a_cfg, str): log_cfgs.append(a_cfg) - elif isinstance(a_cfg, (collections.Iterable)): + elif isinstance(a_cfg, (collections.abc.Iterable)): cfg_str = [str(c) for c in a_cfg] log_cfgs.append('\n'.join(cfg_str)) else: -- cgit v1.2.3 From c8d3f99be84a4a04074a94c451387932bd086b26 Mon Sep 17 00:00:00 2001 From: Mike Russell Date: Wed, 16 Jun 2021 05:51:00 -0700 Subject: Small Doc Update for ReportEventStack and Test (#920) - small document update for ReportEventStack explaining post_files parameter - small unit test for test_reporting demonstrating the close of an event with optional post_files list --- cloudinit/reporting/events.py | 8 ++++++++ tests/unittests/test_reporting.py | 18 ++++++++++++++++++ tools/.github-cla-signers | 1 + 3 files changed, 27 insertions(+) (limited to 'cloudinit') diff --git a/cloudinit/reporting/events.py b/cloudinit/reporting/events.py index b8677c8b..9afad747 100644 --- a/cloudinit/reporting/events.py +++ b/cloudinit/reporting/events.py @@ -165,7 +165,15 @@ class ReportEventStack(object): :param result_on_exception: The result value to set if an exception is caught. default value is FAIL. + + :param post_files: + Can hold filepaths of files that are to get posted/created + regarding a given event. Something like success or failure information + in a given log file. For each filepath, if it's a valid regular file + it will get: read & encoded as base64 at the close of the event. + Default value, if None, is an empty list. """ + def __init__(self, name, description, message=None, parent=None, reporting_enabled=None, result_on_exception=status.FAIL, post_files=None): diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py index 9f11fd5c..b78a6939 100644 --- a/tests/unittests/test_reporting.py +++ b/tests/unittests/test_reporting.py @@ -113,6 +113,7 @@ class TestReportingEvent(TestCase): class TestFinishReportingEvent(TestCase): + def test_as_has_result(self): result = events.status.SUCCESS name, desc = 'test_name', 'test_desc' @@ -121,6 +122,23 @@ class TestFinishReportingEvent(TestCase): self.assertTrue('result' in ret) self.assertEqual(ret['result'], result) + def test_has_result_with_optional_post_files(self): + result = events.status.SUCCESS + name, desc, files = 'test_name', 'test_desc', [ + '/really/fake/path/install.log'] + event = events.FinishReportingEvent( + name, desc, result, post_files=files) + ret = event.as_dict() + self.assertTrue('result' in ret) + self.assertTrue('files' in ret) + self.assertEqual(ret['result'], result) + posted_install_log = ret['files'][0] + self.assertTrue('path' in posted_install_log) + self.assertTrue('content' in posted_install_log) + self.assertTrue('encoding' in posted_install_log) + self.assertEqual(posted_install_log['path'], files[0]) + self.assertEqual(posted_install_log['encoding'], 'base64') + class TestBaseReportingHandler(TestCase): diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index 14916d31..0b9fbe9e 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -22,6 +22,7 @@ emmanuelthome esposem giggsoff hamalq +irishgordo izzyleung johnsonshi jordimassaguerpla -- cgit v1.2.3 From 1243c5a1fc1f3546b40e48a2033a9abab546e40f Mon Sep 17 00:00:00 2001 From: Mark Mercado Date: Thu, 17 Jun 2021 10:37:23 -0400 Subject: Fix the spelling of "DigitalOcean" (#924) The name "DigitalOcean" doesn't have a space in it; it's a single compound word written in Pascal case (upper camel case). --- README.md | 2 +- cloudinit/sources/DataSourceDigitalOcean.py | 2 +- doc/rtd/topics/availability.rst | 2 +- doc/rtd/topics/datasources/digitalocean.rst | 4 ++-- tools/.github-cla-signers | 1 + 5 files changed, 6 insertions(+), 5 deletions(-) (limited to 'cloudinit') diff --git a/README.md b/README.md index 02b2f666..73d9e780 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ get in contact with that distribution and send them our way! | Supported OSes | Supported Public Clouds | Supported Private Clouds | | --- | --- | --- | -| Alpine Linux
ArchLinux
Debian
DragonFlyBSD
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS/AlmaLinux/Rocky
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
Digital Ocean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| +| Alpine Linux
ArchLinux
Debian
DragonFlyBSD
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS/AlmaLinux/Rocky
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
DigitalOcean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| ## To start developing cloud-init diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py index 5040ce5b..08805d99 100644 --- a/cloudinit/sources/DataSourceDigitalOcean.py +++ b/cloudinit/sources/DataSourceDigitalOcean.py @@ -54,7 +54,7 @@ class DataSourceDigitalOcean(sources.DataSource): if not is_do: return False - LOG.info("Running on digital ocean. droplet_id=%s", droplet_id) + LOG.info("Running on DigitalOcean. droplet_id=%s", droplet_id) ipv4LL_nic = None if self.use_ip4LL: diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst index e4480754..a45a49d6 100644 --- a/doc/rtd/topics/availability.rst +++ b/doc/rtd/topics/availability.rst @@ -43,7 +43,7 @@ environments in the public cloud: - Softlayer - Rackspace Public Cloud - IBM Cloud -- Digital Ocean +- DigitalOcean - Bigstep - Hetzner - Joyent diff --git a/doc/rtd/topics/datasources/digitalocean.rst b/doc/rtd/topics/datasources/digitalocean.rst index 88f1e5f5..a4910408 100644 --- a/doc/rtd/topics/datasources/digitalocean.rst +++ b/doc/rtd/topics/datasources/digitalocean.rst @@ -1,7 +1,7 @@ .. _datasource_digital_ocean: -Digital Ocean -============= +DigitalOcean +============ The `DigitalOcean`_ datasource consumes the content served from DigitalOcean's `metadata service`_. This metadata service serves information about the diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index 0b9fbe9e..a7c36a8c 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -32,6 +32,7 @@ klausenbusk landon912 lucasmoura lungj +mamercad manuelisimo marlluslustosa matthewruffell -- cgit v1.2.3 From abd2da5777195e7e432b0d53a3f7f29d071dd50e Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 17 Jun 2021 16:44:55 -0500 Subject: Fix DNS in NetworkState (SC-133) (#923) v1 network config currently has no concept of interface-specific DNS, which is required for certain renderers. To fix this, added an optional 'interface' key on the v1 nameserver definition. If specified, it makes the DNS settings specific to the interface. Otherwise, it will be defined as global DNS as it always has. Additionally, DNS for v2 wasn't being recognized correctly. For DNS defined on a particular interface, these settings now also go into the global DNS settings as they were intended. --- cloudinit/net/network_state.py | 66 ++++++++++++++---- cloudinit/net/tests/test_network_state.py | 103 ++++++++++++++++++++++++++++ doc/rtd/topics/network-config-format-v1.rst | 5 ++ 3 files changed, 159 insertions(+), 15 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index e8bf9e39..8018cfb9 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -237,6 +237,7 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta): self._network_state = copy.deepcopy(self.initial_network_state) self._network_state['config'] = config self._parsed = False + self._interface_dns_map = {} @property def network_state(self): @@ -310,6 +311,21 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta): LOG.warning("Skipping invalid command: %s", command, exc_info=True) LOG.debug(self.dump_network_state()) + for interface, dns in self._interface_dns_map.items(): + iface = None + try: + iface = self._network_state['interfaces'][interface] + except KeyError as e: + raise ValueError( + 'Nameserver specified for interface {0}, ' + 'but interface {0} does not exist!'.format(interface) + ) from e + if iface: + nameservers, search = dns + iface['dns'] = { + 'addresses': nameservers, + 'search': search, + } def parse_config_v2(self, skip_broken=True): for command_type, command in self._config.items(): @@ -526,21 +542,40 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta): def handle_infiniband(self, command): self.handle_physical(command) - @ensure_command_keys(['address']) - def handle_nameserver(self, command): - dns = self._network_state.get('dns') + def _parse_dns(self, command): + nameservers = [] + search = [] if 'address' in command: addrs = command['address'] if not type(addrs) == list: addrs = [addrs] for addr in addrs: - dns['nameservers'].append(addr) + nameservers.append(addr) if 'search' in command: paths = command['search'] if not isinstance(paths, list): paths = [paths] for path in paths: - dns['search'].append(path) + search.append(path) + return nameservers, search + + @ensure_command_keys(['address']) + def handle_nameserver(self, command): + dns = self._network_state.get('dns') + nameservers, search = self._parse_dns(command) + if 'interface' in command: + self._interface_dns_map[command['interface']] = ( + nameservers, search + ) + else: + dns['nameservers'].extend(nameservers) + dns['search'].extend(search) + + @ensure_command_keys(['address']) + def _handle_individual_nameserver(self, command, iface): + _iface = self._network_state.get('interfaces') + nameservers, search = self._parse_dns(command) + _iface[iface]['dns'] = {'nameservers': nameservers, 'search': search} @ensure_command_keys(['destination']) def handle_route(self, command): @@ -706,16 +741,17 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta): def _v2_common(self, cfg): LOG.debug('v2_common: handling config:\n%s', cfg) - if 'nameservers' in cfg: - search = cfg.get('nameservers').get('search', []) - dns = cfg.get('nameservers').get('addresses', []) - name_cmd = {'type': 'nameserver'} - if len(search) > 0: - name_cmd.update({'search': search}) - if len(dns) > 0: - name_cmd.update({'addresses': dns}) - LOG.debug('v2(nameserver) -> v1(nameserver):\n%s', name_cmd) - self.handle_nameserver(name_cmd) + for iface, dev_cfg in cfg.items(): + if 'nameservers' in dev_cfg: + search = dev_cfg.get('nameservers').get('search', []) + dns = dev_cfg.get('nameservers').get('addresses', []) + name_cmd = {'type': 'nameserver'} + if len(search) > 0: + name_cmd.update({'search': search}) + if len(dns) > 0: + name_cmd.update({'address': dns}) + self.handle_nameserver(name_cmd) + self._handle_individual_nameserver(name_cmd, iface) def _handle_bond_bridge(self, command, cmd_type=None): """Common handler for bond and bridge types""" diff --git a/cloudinit/net/tests/test_network_state.py b/cloudinit/net/tests/test_network_state.py index 07d726e2..fc4724a1 100644 --- a/cloudinit/net/tests/test_network_state.py +++ b/cloudinit/net/tests/test_network_state.py @@ -2,12 +2,62 @@ from unittest import mock +import pytest + +from cloudinit import safeyaml from cloudinit.net import network_state from cloudinit.tests.helpers import CiTestCase netstate_path = 'cloudinit.net.network_state' +_V1_CONFIG_NAMESERVERS = """\ +network: + version: 1 + config: + - type: nameserver + interface: {iface} + address: + - 192.168.1.1 + - 8.8.8.8 + search: + - spam.local + - type: nameserver + address: + - 192.168.1.0 + - 4.4.4.4 + search: + - eggs.local + - type: physical + name: eth0 + mac_address: '00:11:22:33:44:55' + - type: physical + name: eth1 + mac_address: '66:77:88:99:00:11' +""" + +V1_CONFIG_NAMESERVERS_VALID = _V1_CONFIG_NAMESERVERS.format(iface='eth1') +V1_CONFIG_NAMESERVERS_INVALID = _V1_CONFIG_NAMESERVERS.format(iface='eth90') + +V2_CONFIG_NAMESERVERS = """\ +network: + version: 2 + ethernets: + eth0: + match: + macaddress: '00:11:22:33:44:55' + nameservers: + search: [spam.local, eggs.local] + addresses: [8.8.8.8] + eth1: + match: + macaddress: '66:77:88:99:00:11' + nameservers: + search: [foo.local, bar.local] + addresses: [4.4.4.4] +""" + + class TestNetworkStateParseConfig(CiTestCase): def setUp(self): @@ -55,4 +105,57 @@ class TestNetworkStateParseConfigV2(CiTestCase): self.assertEqual(ncfg, nsi.as_dict()['config']) +class TestNetworkStateParseNameservers: + def _parse_network_state_from_config(self, config): + yaml = safeyaml.load(config) + return network_state.parse_net_config_data(yaml['network']) + + def test_v1_nameservers_valid(self): + config = self._parse_network_state_from_config( + V1_CONFIG_NAMESERVERS_VALID) + + # If an interface was specified, DNS shouldn't be in the global list + assert ['192.168.1.0', '4.4.4.4'] == sorted( + config.dns_nameservers) + assert ['eggs.local'] == config.dns_searchdomains + + # If an interface was specified, DNS should be part of the interface + for iface in config.iter_interfaces(): + if iface['name'] == 'eth1': + assert iface['dns']['addresses'] == ['192.168.1.1', '8.8.8.8'] + assert iface['dns']['search'] == ['spam.local'] + else: + assert 'dns' not in iface + + def test_v1_nameservers_invalid(self): + with pytest.raises(ValueError): + self._parse_network_state_from_config( + V1_CONFIG_NAMESERVERS_INVALID) + + def test_v2_nameservers(self): + config = self._parse_network_state_from_config(V2_CONFIG_NAMESERVERS) + + # Ensure DNS defined on interface exists on interface + for iface in config.iter_interfaces(): + if iface['name'] == 'eth0': + assert iface['dns'] == { + 'nameservers': ['8.8.8.8'], + 'search': ['spam.local', 'eggs.local'], + } + else: + assert iface['dns'] == { + 'nameservers': ['4.4.4.4'], + 'search': ['foo.local', 'bar.local'] + } + + # Ensure DNS defined on interface also exists globally (since there + # is no global DNS definitions in v2) + assert ['4.4.4.4', '8.8.8.8'] == sorted(config.dns_nameservers) + assert [ + 'bar.local', + 'eggs.local', + 'foo.local', + 'spam.local', + ] == sorted(config.dns_searchdomains) + # vi: ts=4 expandtab diff --git a/doc/rtd/topics/network-config-format-v1.rst b/doc/rtd/topics/network-config-format-v1.rst index 17732c2a..3202163b 100644 --- a/doc/rtd/topics/network-config-format-v1.rst +++ b/doc/rtd/topics/network-config-format-v1.rst @@ -335,6 +335,10 @@ the following keys: - ``address``: List of IPv4 or IPv6 address of nameservers. - ``search``: List of of hostnames to include in the resolv.conf search path. +- ``interface``: Optional. Ties the nameserver definition to the specified + interface. The value specified here must match the `name` of an interface + defined in this config. If unspecified, this nameserver will be considered + a global nameserver. **Nameserver Example**:: @@ -349,6 +353,7 @@ the following keys: address: 192.168.23.14/27 gateway: 192.168.23.1 - type: nameserver + interface: interface0 # Ties nameserver to interface0 only address: - 192.168.23.2 - 8.8.8.8 -- cgit v1.2.3 From 35aa9db6f8e2ba05d366776c0e8d97f52217e930 Mon Sep 17 00:00:00 2001 From: sshedi <53473811+sshedi@users.noreply.github.com> Date: Fri, 18 Jun 2021 22:23:44 +0530 Subject: Add support for VMware PhotonOS (#909) Also added a new (currently experimental) systemd-networkd renderer, and includes a small refactor to cc_resolv_conf.py to support the resolved.conf used by systemd-resolved. --- README.md | 2 +- cloudinit/cmd/devel/net_convert.py | 11 +- cloudinit/config/cc_ntp.py | 26 +- cloudinit/config/cc_resolv_conf.py | 25 +- cloudinit/config/cc_yum_add_repo.py | 4 +- cloudinit/config/tests/test_resolv_conf.py | 28 +- cloudinit/distros/__init__.py | 4 +- cloudinit/distros/arch.py | 1 - cloudinit/distros/gentoo.py | 1 - cloudinit/distros/opensuse.py | 1 - cloudinit/distros/photon.py | 355 +++++++++++++++++++++ cloudinit/distros/rhel.py | 1 - cloudinit/net/networkd.py | 246 ++++++++++++++ cloudinit/net/renderers.py | 4 +- cloudinit/tests/test_util.py | 20 ++ cloudinit/util.py | 4 +- config/cloud.cfg.tmpl | 52 ++- systemd/cloud-init.service.tmpl | 2 + templates/chrony.conf.photon.tmpl | 48 +++ templates/hosts.photon.tmpl | 22 ++ templates/ntp.conf.photon.tmpl | 61 ++++ templates/resolv.conf.tmpl | 2 +- templates/systemd.resolved.conf.tmpl | 15 + tests/cloud_tests/util.py | 2 +- tests/unittests/test_cli.py | 2 +- tests/unittests/test_distros/test_netconfig.py | 99 +++++- .../test_handler/test_handler_set_hostname.py | 26 ++ tests/unittests/test_net.py | 244 +++++++++++++- tests/unittests/test_render_cloudcfg.py | 3 +- tools/render-cloudcfg | 4 +- 30 files changed, 1256 insertions(+), 59 deletions(-) create mode 100644 cloudinit/distros/photon.py create mode 100644 cloudinit/net/networkd.py create mode 100644 templates/chrony.conf.photon.tmpl create mode 100644 templates/hosts.photon.tmpl create mode 100644 templates/ntp.conf.photon.tmpl create mode 100644 templates/systemd.resolved.conf.tmpl (limited to 'cloudinit') diff --git a/README.md b/README.md index 6f7e4c99..462e3204 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ get in contact with that distribution and send them our way! | Supported OSes | Supported Public Clouds | Supported Private Clouds | | --- | --- | --- | -| Alpine Linux
ArchLinux
Debian
DragonFlyBSD
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS/AlmaLinux/Rocky
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
DigitalOcean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| +| Alpine Linux
ArchLinux
Debian
DragonFlyBSD
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS/AlmaLinux/Rocky/PhotonOS
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
DigitalOcean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| ## To start developing cloud-init diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py index 0668ffa3..5c649fd0 100755 --- a/cloudinit/cmd/devel/net_convert.py +++ b/cloudinit/cmd/devel/net_convert.py @@ -11,7 +11,7 @@ from cloudinit.sources import DataSourceAzure as azure from cloudinit.sources import DataSourceOVF as ovf from cloudinit import distros, safeyaml -from cloudinit.net import eni, netplan, network_state, sysconfig +from cloudinit.net import eni, netplan, networkd, network_state, sysconfig from cloudinit import log NAME = 'net-convert' @@ -51,7 +51,7 @@ def get_parser(parser=None): parser.add_argument("--debug", action='store_true', help='enable debug logging to stderr.') parser.add_argument("-O", "--output-kind", - choices=['eni', 'netplan', 'sysconfig'], + choices=['eni', 'netplan', 'networkd', 'sysconfig'], required=True, help="The network config format to emit") return parser @@ -118,9 +118,14 @@ def handle_args(name, args): config['netplan_path'] = config['netplan_path'][1:] # enable some netplan features config['features'] = ['dhcp-use-domains', 'ipv6-mtu'] - else: + elif args.output_kind == "networkd": + r_cls = networkd.Renderer + config = distro.renderer_configs.get('networkd') + elif args.output_kind == "sysconfig": r_cls = sysconfig.Renderer config = distro.renderer_configs.get('sysconfig') + else: + raise RuntimeError("Invalid output_kind") r = r_cls(config=config) sys.stderr.write(''.join([ diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index 70c24610..acf3251d 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -25,7 +25,7 @@ frequency = PER_INSTANCE NTP_CONF = '/etc/ntp.conf' NR_POOL_SERVERS = 4 distros = ['almalinux', 'alpine', 'centos', 'debian', 'fedora', 'opensuse', - 'rhel', 'rocky', 'sles', 'ubuntu'] + 'photon', 'rhel', 'rocky', 'sles', 'ubuntu'] NTP_CLIENT_CONFIG = { 'chrony': { @@ -80,24 +80,37 @@ DISTRO_CLIENT_CONFIG = { 'confpath': '/etc/chrony/chrony.conf', }, }, - 'rhel': { + 'opensuse': { + 'chrony': { + 'service_name': 'chronyd', + }, 'ntp': { + 'confpath': '/etc/ntp.conf', 'service_name': 'ntpd', }, - 'chrony': { - 'service_name': 'chronyd', + 'systemd-timesyncd': { + 'check_exe': '/usr/lib/systemd/systemd-timesyncd', }, }, - 'opensuse': { + 'photon': { 'chrony': { 'service_name': 'chronyd', }, 'ntp': { - 'confpath': '/etc/ntp.conf', 'service_name': 'ntpd', + 'confpath': '/etc/ntp.conf' }, 'systemd-timesyncd': { 'check_exe': '/usr/lib/systemd/systemd-timesyncd', + 'confpath': '/etc/systemd/timesyncd.conf', + }, + }, + 'rhel': { + 'ntp': { + 'service_name': 'ntpd', + }, + 'chrony': { + 'service_name': 'chronyd', }, }, 'sles': { @@ -551,7 +564,6 @@ def handle(name, cfg, cloud, log, _args): # Select which client is going to be used and get the configuration ntp_client_config = select_ntp_client(ntp_cfg.get('ntp_client'), cloud.distro) - # Allow user ntp config to override distro configurations ntp_client_config = util.mergemanydict( [ntp_client_config, ntp_cfg.get('config', {})], reverse=True) diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py index 466dad03..c51967e2 100644 --- a/cloudinit/config/cc_resolv_conf.py +++ b/cloudinit/config/cc_resolv_conf.py @@ -30,7 +30,7 @@ are configured correctly. **Module frequency:** per instance -**Supported distros:** alpine, fedora, rhel, sles +**Supported distros:** alpine, fedora, photon, rhel, sles **Config keys**:: @@ -47,18 +47,23 @@ are configured correctly. """ from cloudinit import log as logging -from cloudinit.settings import PER_INSTANCE from cloudinit import templater +from cloudinit.settings import PER_INSTANCE from cloudinit import util LOG = logging.getLogger(__name__) frequency = PER_INSTANCE -distros = ['alpine', 'fedora', 'opensuse', 'rhel', 'sles'] +distros = ['alpine', 'fedora', 'opensuse', 'photon', 'rhel', 'sles'] + +RESOLVE_CONFIG_TEMPLATE_MAP = { + '/etc/resolv.conf': 'resolv.conf', + '/etc/systemd/resolved.conf': 'systemd.resolved.conf', +} -def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"): +def generate_resolv_conf(template_fn, params, target_fname): flags = [] false_flags = [] @@ -104,12 +109,18 @@ def handle(name, cfg, cloud, log, _args): if "resolv_conf" not in cfg: log.warning("manage_resolv_conf True but no parameters provided!") - template_fn = cloud.get_template_filename('resolv.conf') - if not template_fn: + try: + template_fn = cloud.get_template_filename( + RESOLVE_CONFIG_TEMPLATE_MAP[cloud.distro.resolv_conf_fn]) + except KeyError: log.warning("No template found, not rendering /etc/resolv.conf") return - generate_resolv_conf(template_fn=template_fn, params=cfg["resolv_conf"]) + generate_resolv_conf( + template_fn=template_fn, + params=cfg["resolv_conf"], + target_fname=cloud.disro.resolve_conf_fn + ) return # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 7daa6bd9..67f09686 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -18,7 +18,7 @@ entry, the config entry will be skipped. **Module frequency:** per always -**Supported distros:** almalinux, centos, fedora, rhel, rocky +**Supported distros:** almalinux, centos, fedora, photon, rhel, rocky **Config keys**:: @@ -36,7 +36,7 @@ from configparser import ConfigParser from cloudinit import util -distros = ['almalinux', 'centos', 'fedora', 'rhel', 'rocky'] +distros = ['almalinux', 'centos', 'fedora', 'photon', 'rhel', 'rocky'] def _canonicalize_id(repo_id): diff --git a/cloudinit/config/tests/test_resolv_conf.py b/cloudinit/config/tests/test_resolv_conf.py index 6546a0b5..45a06c22 100644 --- a/cloudinit/config/tests/test_resolv_conf.py +++ b/cloudinit/config/tests/test_resolv_conf.py @@ -1,9 +1,8 @@ -from unittest import mock - import pytest +from unittest import mock from cloudinit.config.cc_resolv_conf import generate_resolv_conf - +from tests.unittests.test_distros.test_create_users import MyBaseDistro EXPECTED_HEADER = """\ # Your system has been configured with 'manage-resolv-conf' set to true. @@ -14,22 +13,28 @@ EXPECTED_HEADER = """\ class TestGenerateResolvConf: + + dist = MyBaseDistro() + tmpl_fn = "templates/resolv.conf.tmpl" + @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") - def test_default_target_fname_is_etc_resolvconf(self, m_render_to_file): - generate_resolv_conf("templates/resolv.conf.tmpl", mock.MagicMock()) + def test_dist_resolv_conf_fn(self, m_render_to_file): + self.dist.resolve_conf_fn = "/tmp/resolv-test.conf" + generate_resolv_conf(self.tmpl_fn, + mock.MagicMock(), + self.dist.resolve_conf_fn) assert [ - mock.call(mock.ANY, "/etc/resolv.conf", mock.ANY) + mock.call(mock.ANY, self.dist.resolve_conf_fn, mock.ANY) ] == m_render_to_file.call_args_list @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") def test_target_fname_is_used_if_passed(self, m_render_to_file): - generate_resolv_conf( - "templates/resolv.conf.tmpl", mock.MagicMock(), "/use/this/path" - ) + path = "/use/this/path" + generate_resolv_conf(self.tmpl_fn, mock.MagicMock(), path) assert [ - mock.call(mock.ANY, "/use/this/path", mock.ANY) + mock.call(mock.ANY, path, mock.ANY) ] == m_render_to_file.call_args_list # Patch in templater so we can assert on the actual generated content @@ -75,7 +80,8 @@ class TestGenerateResolvConf: def test_flags_and_options( self, m_write_file, params, expected_extra_line ): - generate_resolv_conf("templates/resolv.conf.tmpl", params) + target_fn = "/etc/resolv.conf" + generate_resolv_conf(self.tmpl_fn, params, target_fn) expected_content = EXPECTED_HEADER if expected_extra_line is not None: diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 57e33621..4991f42b 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -46,7 +46,8 @@ OSFAMILIES = { 'debian': ['debian', 'ubuntu'], 'freebsd': ['freebsd'], 'gentoo': ['gentoo'], - 'redhat': ['almalinux', 'amazon', 'centos', 'fedora', 'rhel', 'rocky'], + 'redhat': ['almalinux', 'amazon', 'centos', 'fedora', 'photon', 'rhel', + 'rocky'], 'suse': ['opensuse', 'sles'], } @@ -80,6 +81,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): _ci_pkl_version = 1 prefer_fqdn = False + resolve_conf_fn = "/etc/resolv.conf" def __init__(self, name, cfg, paths): self._paths = paths diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py index f8385f7f..246e6fe7 100644 --- a/cloudinit/distros/arch.py +++ b/cloudinit/distros/arch.py @@ -25,7 +25,6 @@ LOG = logging.getLogger(__name__) class Distro(distros.Distro): locale_gen_fn = "/etc/locale.gen" network_conf_dir = "/etc/netctl" - resolve_conf_fn = "/etc/resolv.conf" init_cmd = ['systemctl'] # init scripts renderer_configs = { "netplan": {"netplan_path": "/etc/netplan/50-cloud-init.yaml", diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py index e9b82602..68c03e7f 100644 --- a/cloudinit/distros/gentoo.py +++ b/cloudinit/distros/gentoo.py @@ -23,7 +23,6 @@ LOG = logging.getLogger(__name__) class Distro(distros.Distro): locale_conf_fn = '/etc/locale.gen' network_conf_fn = '/etc/conf.d/net' - resolve_conf_fn = '/etc/resolv.conf' hostname_conf_fn = '/etc/conf.d/hostname' init_cmd = ['rc-service'] # init scripts diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py index 7ca0ef99..270cc189 100644 --- a/cloudinit/distros/opensuse.py +++ b/cloudinit/distros/opensuse.py @@ -27,7 +27,6 @@ class Distro(distros.Distro): locale_conf_fn = '/etc/sysconfig/language' network_conf_fn = '/etc/sysconfig/network/config' network_script_tpl = '/etc/sysconfig/network/ifcfg-%s' - resolve_conf_fn = '/etc/resolv.conf' route_conf_tpl = '/etc/sysconfig/network/ifroute-%s' systemd_hostname_conf_fn = '/etc/hostname' systemd_locale_conf_fn = '/etc/locale.conf' diff --git a/cloudinit/distros/photon.py b/cloudinit/distros/photon.py new file mode 100644 index 00000000..8b78f98f --- /dev/null +++ b/cloudinit/distros/photon.py @@ -0,0 +1,355 @@ +#!/usr/bin/env python3 +# vi: ts=4 expandtab +# +# Copyright (C) 2021 VMware Inc. +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import util +from cloudinit import subp +from cloudinit import distros +from cloudinit import helpers +from cloudinit import log as logging +from cloudinit.distros import net_util +from cloudinit.settings import PER_INSTANCE +from cloudinit.distros import rhel_util as rhutil +from cloudinit.net.network_state import mask_to_net_prefix +from cloudinit.distros.parsers.hostname import HostnameConf + +LOG = logging.getLogger(__name__) + + +class Distro(distros.Distro): + hostname_conf_fn = '/etc/hostname' + network_conf_dir = '/etc/systemd/network/' + systemd_locale_conf_fn = '/etc/locale.conf' + resolve_conf_fn = '/etc/systemd/resolved.conf' + + renderer_configs = { + 'networkd': { + 'resolv_conf_fn': resolve_conf_fn, + 'network_conf_dir': network_conf_dir, + } + } + + # Should be fqdn if we can use it + prefer_fqdn = True + + def __init__(self, name, cfg, paths): + distros.Distro.__init__(self, name, cfg, paths) + # This will be used to restrict certain + # calls from repeatly happening (when they + # should only happen say once per instance...) + self._runner = helpers.Runners(paths) + self.osfamily = 'photon' + self.init_cmd = ['systemctl'] + + def exec_cmd(self, cmd, capture=False): + LOG.debug('Attempting to run: %s', cmd) + try: + (out, err) = subp.subp(cmd, capture=capture) + if err: + LOG.warning('Running %s resulted in stderr output: %s', + cmd, err) + return True, out, err + except subp.ProcessExecutionError: + util.logexc(LOG, 'Command %s failed', cmd) + return False, None, None + + def apply_locale(self, locale, out_fn=None): + # This has a dependancy on glibc-i18n, user need to manually install it + # and enable the option in cloud.cfg + if not out_fn: + out_fn = self.systemd_locale_conf_fn + + locale_cfg = { + 'LANG': locale, + } + + rhutil.update_sysconfig_file(out_fn, locale_cfg) + + # rhutil will modify /etc/locale.conf + # For locale change to take effect, reboot is needed or we can restart + # systemd-localed. This is equivalent of localectl + cmd = ['systemctl', 'restart', 'systemd-localed'] + _ret, _out, _err = self.exec_cmd(cmd) + + def install_packages(self, pkglist): + # self.update_package_sources() + self.package_command('install', pkgs=pkglist) + + def _write_network_config(self, netconfig): + if isinstance(netconfig, str): + self._write_network_(netconfig) + return + return self._supported_write_network_config(netconfig) + + def _bring_up_interfaces(self, device_names): + cmd = ['systemctl', 'restart', 'systemd-networkd', 'systemd-resolved'] + LOG.debug('Attempting to run bring up interfaces using command %s', + cmd) + ret, _out, _err = self.exec_cmd(cmd) + return ret + + def _write_hostname(self, hostname, out_fn): + conf = None + try: + # Try to update the previous one + # Let's see if we can read it first. + conf = HostnameConf(util.load_file(out_fn)) + conf.parse() + except IOError: + pass + if not conf: + conf = HostnameConf('') + conf.set_hostname(hostname) + util.write_file(out_fn, str(conf), mode=0o644) + + def _read_system_hostname(self): + sys_hostname = self._read_hostname(self.hostname_conf_fn) + return (self.hostname_conf_fn, sys_hostname) + + def _read_hostname(self, filename, default=None): + _ret, out, _err = self.exec_cmd(['hostname']) + + return out if out else default + + def _get_localhost_ip(self): + return '127.0.1.1' + + def set_timezone(self, tz): + distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) + + def package_command(self, command, args=None, pkgs=None): + if pkgs is None: + pkgs = [] + + cmd = ['tdnf', '-y'] + if args and isinstance(args, str): + cmd.append(args) + elif args and isinstance(args, list): + cmd.extend(args) + + cmd.append(command) + + pkglist = util.expand_package_list('%s-%s', pkgs) + cmd.extend(pkglist) + + # Allow the output of this to flow outwards (ie not be captured) + _ret, _out, _err = self.exec_cmd(cmd, capture=False) + + def update_package_sources(self): + self._runner.run('update-sources', self.package_command, + ['makecache'], freq=PER_INSTANCE) + + def _generate_resolv_conf(self): + resolv_conf_fn = self.resolve_conf_fn + resolv_templ_fn = 'systemd.resolved.conf' + + return resolv_conf_fn, resolv_templ_fn + + def _write_network_(self, settings): + entries = net_util.translate_network(settings) + LOG.debug('Translated ubuntu style network settings %s into %s', + settings, entries) + route_entries = [] + route_entries = translate_routes(settings) + dev_names = entries.keys() + nameservers = [] + searchdomains = [] + # Format for systemd + for (dev, info) in entries.items(): + if 'dns-nameservers' in info: + nameservers.extend(info['dns-nameservers']) + if 'dns-search' in info: + searchdomains.extend(info['dns-search']) + if dev == 'lo': + continue + + net_fn = self.network_conf_dir + '10-cloud-init-' + dev + net_fn += '.network' + dhcp_enabled = 'no' + if info.get('bootproto') == 'dhcp': + if (settings.find('inet dhcp') >= 0 and + settings.find('inet6 dhcp') >= 0): + dhcp_enabled = 'yes' + else: + if info.get('inet6') is True: + dhcp_enabled = 'ipv6' + else: + dhcp_enabled = 'ipv4' + + net_cfg = { + 'Name': dev, + 'DHCP': dhcp_enabled, + } + + if info.get('hwaddress'): + net_cfg['MACAddress'] = info.get('hwaddress') + if info.get('address'): + net_cfg['Address'] = '%s' % (info.get('address')) + if info.get('netmask'): + net_cfg['Address'] += '/%s' % ( + mask_to_net_prefix(info.get('netmask'))) + if info.get('gateway'): + net_cfg['Gateway'] = info.get('gateway') + if info.get('dns-nameservers'): + net_cfg['DNS'] = str( + tuple(info.get('dns-nameservers'))).replace(',', '') + if info.get('dns-search'): + net_cfg['Domains'] = str( + tuple(info.get('dns-search'))).replace(',', '') + route_entry = [] + if dev in route_entries: + route_entry = route_entries[dev] + route_index = 0 + found = True + while found: + route_name = 'routes.' + str(route_index) + if route_name in route_entries[dev]: + val = str(tuple(route_entries[dev][route_name])) + val = val.replace(',', '') + if val: + net_cfg[route_name] = val + else: + found = False + route_index += 1 + + if info.get('auto'): + self._write_interface_file(net_fn, net_cfg, route_entry) + + resolve_data = [] + new_resolve_data = [] + with open(self.resolve_conf_fn, 'r') as rf: + resolve_data = rf.readlines() + LOG.debug('Old Resolve Data\n') + LOG.debug('%s', resolve_data) + for item in resolve_data: + if ((nameservers and ('DNS=' in item)) or + (searchdomains and ('Domains=' in item))): + continue + else: + new_resolve_data.append(item) + + new_resolve_data = new_resolve_data + \ + convert_resolv_conf(nameservers, searchdomains) + LOG.debug('New resolve data\n') + LOG.debug('%s', new_resolve_data) + if nameservers or searchdomains: + util.write_file(self.resolve_conf_fn, ''.join(new_resolve_data)) + + return dev_names + + def _write_interface_file(self, net_fn, net_cfg, route_entry): + if not net_cfg['Name']: + return + content = '[Match]\n' + content += 'Name=%s\n' % (net_cfg['Name']) + if 'MACAddress' in net_cfg: + content += 'MACAddress=%s\n' % (net_cfg['MACAddress']) + content += '[Network]\n' + + if 'DHCP' in net_cfg and net_cfg['DHCP'] in {'yes', 'ipv4', 'ipv6'}: + content += 'DHCP=%s\n' % (net_cfg['DHCP']) + else: + if 'Address' in net_cfg: + content += 'Address=%s\n' % (net_cfg['Address']) + if 'Gateway' in net_cfg: + content += 'Gateway=%s\n' % (net_cfg['Gateway']) + if 'DHCP' in net_cfg and net_cfg['DHCP'] == 'no': + content += 'DHCP=%s\n' % (net_cfg['DHCP']) + + route_index = 0 + found = True + if route_entry: + while found: + route_name = 'routes.' + str(route_index) + if route_name in route_entry: + content += '[Route]\n' + if len(route_entry[route_name]) != 2: + continue + content += 'Gateway=%s\n' % ( + route_entry[route_name][0]) + content += 'Destination=%s\n' % ( + route_entry[route_name][1]) + else: + found = False + route_index += 1 + + util.write_file(net_fn, content) + + +def convert_resolv_conf(nameservers, searchdomains): + ''' Returns a string formatted for resolv.conf ''' + result = [] + if nameservers: + nslist = 'DNS=' + for ns in nameservers: + nslist = nslist + '%s ' % ns + nslist = nslist + '\n' + result.append(str(nslist)) + if searchdomains: + sdlist = 'Domains=' + for sd in searchdomains: + sdlist = sdlist + '%s ' % sd + sdlist = sdlist + '\n' + result.append(str(sdlist)) + return result + + +def translate_routes(settings): + entries = [] + for line in settings.splitlines(): + line = line.strip() + if not line or line.startswith('#'): + continue + split_up = line.split(None, 1) + if len(split_up) <= 1: + continue + entries.append(split_up) + consume = {} + ifaces = [] + for (cmd, args) in entries: + if cmd == 'iface': + if consume: + ifaces.append(consume) + consume = {} + consume[cmd] = args + else: + consume[cmd] = args + + absorb = False + for (cmd, args) in consume.items(): + if cmd == 'iface': + absorb = True + if absorb: + ifaces.append(consume) + out_ifaces = {} + for info in ifaces: + if 'iface' not in info: + continue + iface_details = info['iface'].split(None) + dev_name = None + if len(iface_details) >= 1: + dev = iface_details[0].strip().lower() + if dev: + dev_name = dev + if not dev_name: + continue + route_info = {} + route_index = 0 + found = True + while found: + route_name = 'routes.' + str(route_index) + if route_name in info: + val = info[route_name].split() + if val: + route_info[route_name] = val + else: + found = False + route_index += 1 + if dev_name in out_ifaces: + out_ifaces[dev_name].update(route_info) + else: + out_ifaces[dev_name] = route_info + return out_ifaces diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 0c00a531..80a6f1d8 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -36,7 +36,6 @@ class Distro(distros.Distro): hostname_conf_fn = "/etc/sysconfig/network" systemd_hostname_conf_fn = "/etc/hostname" network_script_tpl = '/etc/sysconfig/network-scripts/ifcfg-%s' - resolve_conf_fn = "/etc/resolv.conf" tz_local_fn = "/etc/localtime" usr_lib_exec = "/usr/libexec" renderer_configs = { diff --git a/cloudinit/net/networkd.py b/cloudinit/net/networkd.py new file mode 100644 index 00000000..71f87995 --- /dev/null +++ b/cloudinit/net/networkd.py @@ -0,0 +1,246 @@ +#!/usr/bin/env python3 +# vi: ts=4 expandtab +# +# Copyright (C) 2021 VMware Inc. +# +# Author: Shreenidhi Shedi +# +# This file is part of cloud-init. See LICENSE file for license information. + +import os + + +from . import renderer +from cloudinit import util +from cloudinit import subp +from cloudinit import log as logging +from collections import OrderedDict + +LOG = logging.getLogger(__name__) + + +class CfgParser: + def __init__(self): + self.conf_dict = OrderedDict({ + 'Match': [], + 'Link': [], + 'Network': [], + 'DHCPv4': [], + 'DHCPv6': [], + 'Address': [], + 'Route': [], + }) + + def update_section(self, sec, key, val): + for k in self.conf_dict.keys(): + if k == sec: + self.conf_dict[k].append(key+'='+str(val)) + self.conf_dict[k].sort() + + def get_final_conf(self): + contents = '' + for k, v in self.conf_dict.items(): + if not v: + continue + contents += '['+k+']\n' + for e in v: + contents += e + '\n' + contents += '\n' + + return contents + + def dump_data(self, target_fn): + if not target_fn: + LOG.warning('Target file not given') + return + + contents = self.get_final_conf() + LOG.debug('Final content: %s', contents) + util.write_file(target_fn, contents) + + +class Renderer(renderer.Renderer): + """ + Renders network information in /etc/systemd/network + + This Renderer is currently experimental and doesn't support all the + use cases supported by the other renderers yet. + """ + + def __init__(self, config=None): + if not config: + config = {} + self.resolved_conf = config.get('resolved_conf_fn', + '/etc/systemd/resolved.conf') + self.network_conf_dir = config.get('network_conf_dir', + '/etc/systemd/network/') + + def generate_match_section(self, iface, cfg): + sec = 'Match' + match_dict = { + 'name': 'Name', + 'driver': 'Driver', + 'mac_address': 'MACAddress' + } + + if not iface: + return + + for k, v in match_dict.items(): + if k in iface and iface[k]: + cfg.update_section(sec, v, iface[k]) + + return iface['name'] + + def generate_link_section(self, iface, cfg): + sec = 'Link' + + if not iface: + return + + if 'mtu' in iface and iface['mtu']: + cfg.update_section(sec, 'MTUBytes', iface['mtu']) + + def parse_routes(self, conf, cfg): + sec = 'Route' + for k, v in conf.items(): + if k == 'gateway': + cfg.update_section(sec, 'Gateway', v) + elif k == 'network': + tmp = v + if 'prefix' in conf: + tmp += '/' + str(conf['prefix']) + cfg.update_section(sec, 'Destination', tmp) + elif k == 'metric': + cfg.update_section(sec, 'Metric', v) + + def parse_subnets(self, iface, cfg): + dhcp = 'no' + for e in iface.get('subnets', []): + t = e['type'] + if t == 'dhcp4' or t == 'dhcp': + if dhcp == 'no': + dhcp = 'ipv4' + elif dhcp == 'ipv6': + dhcp = 'yes' + elif t == 'dhcp6': + if dhcp == 'no': + dhcp = 'ipv6' + elif dhcp == 'ipv4': + dhcp = 'yes' + if 'routes' in e and e['routes']: + for i in e['routes']: + self.parse_routes(i, cfg) + elif 'address' in e: + for k, v in e.items(): + if k == 'address': + tmp = v + if 'prefix' in e: + tmp += '/' + str(e['prefix']) + cfg.update_section('Address', 'Address', tmp) + elif k == 'gateway': + cfg.update_section('Route', 'Gateway', v) + elif k == 'dns_nameservers': + cfg.update_section('Network', 'DNS', ' '.join(v)) + elif k == 'dns_search': + cfg.update_section('Network', 'Domains', ' '.join(v)) + + cfg.update_section('Network', 'DHCP', dhcp) + + # This is to accommodate extra keys present in VMware config + def dhcp_domain(self, d, cfg): + for item in ['dhcp4domain', 'dhcp6domain']: + if item not in d: + continue + ret = str(d[item]).casefold() + try: + ret = util.translate_bool(ret) + ret = 'yes' if ret else 'no' + except ValueError: + if ret != 'route': + LOG.warning('Invalid dhcp4domain value - %s', ret) + ret = 'no' + if item == 'dhcp4domain': + section = 'DHCPv4' + else: + section = 'DHCPv6' + cfg.update_section(section, 'UseDomains', ret) + + def parse_dns(self, iface, cfg, ns): + sec = 'Network' + + dns_cfg_map = { + 'search': 'Domains', + 'nameservers': 'DNS', + 'addresses': 'DNS', + } + + dns = iface.get('dns') + if not dns and ns.version == 1: + dns = { + 'search': ns.dns_searchdomains, + 'nameservers': ns.dns_nameservers, + } + elif not dns and ns.version == 2: + return + + for k, v in dns_cfg_map.items(): + if k in dns and dns[k]: + cfg.update_section(sec, v, ' '.join(dns[k])) + + def create_network_file(self, link, conf, nwk_dir): + net_fn_owner = 'systemd-network' + + LOG.debug('Setting Networking Config for %s', link) + + net_fn = nwk_dir + '10-cloud-init-' + link + '.network' + util.write_file(net_fn, conf) + util.chownbyname(net_fn, net_fn_owner, net_fn_owner) + + def render_network_state(self, network_state, templates=None, target=None): + fp_nwkd = self.network_conf_dir + if target: + fp_nwkd = subp.target_path(target) + fp_nwkd + + util.ensure_dir(os.path.dirname(fp_nwkd)) + + ret_dict = self._render_content(network_state) + for k, v in ret_dict.items(): + self.create_network_file(k, v, fp_nwkd) + + def _render_content(self, ns): + ret_dict = {} + for iface in ns.iter_interfaces(): + cfg = CfgParser() + + link = self.generate_match_section(iface, cfg) + self.generate_link_section(iface, cfg) + self.parse_subnets(iface, cfg) + self.parse_dns(iface, cfg, ns) + + for route in ns.iter_routes(): + self.parse_routes(route, cfg) + + if ns.version == 2: + name = iface['name'] + # network state doesn't give dhcp domain info + # using ns.config as a workaround here + self.dhcp_domain(ns.config['ethernets'][name], cfg) + + ret_dict.update({link: cfg.get_final_conf()}) + + return ret_dict + + +def available(target=None): + expected = ['systemctl'] + search = ['/usr/bin', '/bin'] + for p in expected: + if not subp.which(p, search=search, target=target): + return False + return True + + +def network_state_to_networkd(ns): + renderer = Renderer({}) + return renderer._render_content(ns) diff --git a/cloudinit/net/renderers.py b/cloudinit/net/renderers.py index e2de4d55..c3931a98 100644 --- a/cloudinit/net/renderers.py +++ b/cloudinit/net/renderers.py @@ -4,6 +4,7 @@ from . import eni from . import freebsd from . import netbsd from . import netplan +from . import networkd from . import RendererNotFoundError from . import openbsd from . import sysconfig @@ -13,12 +14,13 @@ NAME_TO_RENDERER = { "freebsd": freebsd, "netbsd": netbsd, "netplan": netplan, + "networkd": networkd, "openbsd": openbsd, "sysconfig": sysconfig, } DEFAULT_PRIORITY = ["eni", "sysconfig", "netplan", "freebsd", - "netbsd", "openbsd"] + "netbsd", "openbsd", "networkd"] def search(priority=None, target=None, first=False): diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index f9bc31be..a1ccb1dc 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -177,6 +177,17 @@ OS_RELEASE_UBUNTU = dedent("""\ UBUNTU_CODENAME=xenial\n """) +OS_RELEASE_PHOTON = ("""\ + NAME="VMware Photon OS" + VERSION="4.0" + ID=photon + VERSION_ID=4.0 + PRETTY_NAME="VMware Photon OS/Linux" + ANSI_COLOR="1;34" + HOME_URL="https://vmware.github.io/photon/" + BUG_REPORT_URL="https://github.com/vmware/photon/issues" +""") + class FakeCloud(object): @@ -609,6 +620,15 @@ class TestGetLinuxDistro(CiTestCase): self.assertEqual( ('opensuse-tumbleweed', '20180920', platform.machine()), dist) + @mock.patch('cloudinit.util.load_file') + def test_get_linux_photon_os_release(self, m_os_release, m_path_exists): + """Verify we get the correct name and machine arch on PhotonOS""" + m_os_release.return_value = OS_RELEASE_PHOTON + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual( + ('photon', '4.0', 'VMware Photon OS/Linux'), dist) + @mock.patch('platform.system') @mock.patch('platform.dist', create=True) def test_get_linux_distro_no_data(self, m_platform_dist, diff --git a/cloudinit/util.py b/cloudinit/util.py index f95dc435..7995c6c8 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -483,6 +483,8 @@ def get_linux_distro(): # which will include both version codename and architecture # on all distributions. flavor = platform.machine() + elif distro_name == 'photon': + flavor = os_release.get('PRETTY_NAME', '') else: flavor = os_release.get('VERSION_CODENAME', '') if not flavor: @@ -531,7 +533,7 @@ def system_info(): linux_dist = info['dist'][0].lower() if linux_dist in ( 'almalinux', 'alpine', 'arch', 'centos', 'debian', 'fedora', - 'rhel', 'rocky', 'suse'): + 'photon', 'rhel', 'rocky', 'suse'): var = linux_dist elif linux_dist in ('ubuntu', 'linuxmint', 'mint'): var = 'ubuntu' diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 586384e4..d6dbb833 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -11,11 +11,21 @@ syslog_fix_perms: root:root # when a 'default' entry is found it will reference the 'default_user' # from the distro configuration specified below users: +{% if variant in ["photon"] %} + - name: root + lock_passwd: false +{% else %} - default +{% endif %} + +# VMware guest customization. +{% if variant in ["photon"] %} +disable_vmware_customization: true +{% endif %} # If this is set, 'root' will not be able to ssh in and they # will get a message to login instead as the default $user -{% if variant in ["freebsd"] %} +{% if variant in ["freebsd", "photon"] %} disable_root: false {% else %} disable_root: true @@ -38,6 +48,16 @@ preserve_hostname: false # This should not be required, but leave it in place until the real cause of # not finding -any- datasources is resolved. datasource_list: ['NoCloud', 'ConfigDrive', 'Azure', 'OpenStack', 'Ec2'] +{% elif variant in ["photon"] %} +# Datasources to check for cloud-config +datasource_list: [ + NoCloud, + ConfigDrive, + OVF, + OpenStack, + VMwareGuestInfo, + None + ] {% endif %} # Example datasource config # datasource: @@ -72,11 +92,13 @@ cloud_init_modules: - set_hostname - update_hostname - update_etc_hosts -{% if variant in ["alpine"] %} +{% if variant in ["alpine", "photon"] %} - resolv_conf {% endif %} {% if not variant.endswith("bsd") %} +{% if variant not in ["photon"] %} - ca-certs +{% endif %} - rsyslog {% endif %} - users-groups @@ -90,11 +112,15 @@ cloud_config_modules: - emit_upstart - snap {% endif %} +{% if variant not in ["photon"] %} - ssh-import-id - locale +{% endif %} - set-passwords -{% if variant in ["rhel", "fedora"] %} +{% if variant in ["rhel", "fedora", "photon"] %} +{% if variant not in ["photon"] %} - spacewalk +{% endif %} - yum-add-repo {% endif %} {% if variant in ["ubuntu", "unknown", "debian"] %} @@ -155,8 +181,8 @@ cloud_final_modules: system_info: # This will affect which distro class gets used {% if variant in ["almalinux", "alpine", "amazon", "arch", "centos", "debian", - "fedora", "freebsd", "netbsd", "openbsd", "rhel", "rocky", - "suse", "ubuntu"] %} + "fedora", "freebsd", "netbsd", "openbsd", "photon", "rhel", + "rocky", "suse", "ubuntu"] %} distro: {{ variant }} {% elif variant in ["dragonfly"] %} distro: dragonflybsd @@ -276,6 +302,22 @@ system_info: groups: [wheel] sudo: ["ALL=(ALL) NOPASSWD:ALL"] shell: /bin/ksh +{% elif variant == "photon" %} + default_user: + name: photon + lock_passwd: True + gecos: PhotonOS + groups: [wheel] + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + shell: /bin/bash + # Other config here will be given to the distro class and/or path classes + paths: + cloud_dir: /var/lib/cloud/ + templates_dir: /etc/cloud/templates/ + + ssh_svcname: sshd + +#manage_etc_hosts: true {% endif %} {% if variant in ["freebsd", "netbsd", "openbsd"] %} network: diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl index 4da1a905..c773e411 100644 --- a/systemd/cloud-init.service.tmpl +++ b/systemd/cloud-init.service.tmpl @@ -1,7 +1,9 @@ ## template:jinja [Unit] Description=Initial cloud-init job (metadata service crawler) +{% if variant not in ["photon"] %} DefaultDependencies=no +{% endif %} Wants=cloud-init-local.service Wants=sshd-keygen.service Wants=sshd.service diff --git a/templates/chrony.conf.photon.tmpl b/templates/chrony.conf.photon.tmpl new file mode 100644 index 00000000..8551f793 --- /dev/null +++ b/templates/chrony.conf.photon.tmpl @@ -0,0 +1,48 @@ +## template:jinja +# Use public servers from the pool.ntp.org project. +# Please consider joining the pool (http://www.pool.ntp.org/join.html). +{% if pools %}# pools +{% endif %} +{% for pool in pools -%} +pool {{pool}} iburst +{% endfor %} +{%- if servers %}# servers +{% endif %} +{% for server in servers -%} +server {{server}} iburst +{% endfor %} + +# Record the rate at which the system clock gains/losses time. +driftfile /var/lib/chrony/drift + +# Allow the system clock to be stepped in the first three updates +# if its offset is larger than 1 second. +makestep 1.0 3 + +# Enable kernel synchronization of the real-time clock (RTC). +rtcsync + +# Enable hardware timestamping on all interfaces that support it. +#hwtimestamp * + +# Increase the minimum number of selectable sources required to adjust +# the system clock. +#minsources 2 + +# Allow NTP client access from local network. +#allow 192.168.0.0/16 + +# Serve time even if not synchronized to a time source. +#local stratum 10 + +# Specify file containing keys for NTP authentication. +#keyfile /etc/chrony.keys + +# Get TAI-UTC offset and leap seconds from the system tz database. +leapsectz right/UTC + +# Specify directory for log files. +logdir /var/log/chrony + +# Select which information is logged. +#log measurements statistics tracking diff --git a/templates/hosts.photon.tmpl b/templates/hosts.photon.tmpl new file mode 100644 index 00000000..0fd6f722 --- /dev/null +++ b/templates/hosts.photon.tmpl @@ -0,0 +1,22 @@ +## template:jinja +{# +This file /etc/cloud/templates/hosts.photon.tmpl is only utilized +if enabled in cloud-config. Specifically, in order to enable it +you need to add the following to config: + manage_etc_hosts: True +-#} +# Your system has configured 'manage_etc_hosts' as True. +# As a result, if you wish for changes to this file to persist +# then you will need to either +# a.) make changes to the master file in /etc/cloud/templates/hosts.photon.tmpl +# b.) change or remove the value of 'manage_etc_hosts' in +# /etc/cloud/cloud.cfg or cloud-config from user-data +# +# The following lines are desirable for IPv4 capable hosts +127.0.0.1 {{fqdn}} {{hostname}} +127.0.0.1 localhost.localdomain localhost +127.0.0.1 localhost4.localdomain4 localhost4 + +# The following lines are desirable for IPv6 capable hosts +::1 {{fqdn}} {{hostname}} +::1 localhost6.localdomain6 localhost6 diff --git a/templates/ntp.conf.photon.tmpl b/templates/ntp.conf.photon.tmpl new file mode 100644 index 00000000..4d4910d1 --- /dev/null +++ b/templates/ntp.conf.photon.tmpl @@ -0,0 +1,61 @@ +## template:jinja + +# For more information about this file, see the man pages +# ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5). + +driftfile /var/lib/ntp/drift + +# Permit time synchronization with our time source, but do not +# permit the source to query or modify the service on this system. +restrict default kod nomodify notrap nopeer noquery +restrict -6 default kod nomodify notrap nopeer noquery + +# Permit all access over the loopback interface. This could +# be tightened as well, but to do so would effect some of +# the administrative functions. +restrict 127.0.0.1 +restrict -6 ::1 + +# Hosts on local network are less restricted. +#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap + +# Use public servers from the pool.ntp.org project. +# Please consider joining the pool (http://www.pool.ntp.org/join.html). +{% if pools %}# pools +{% endif %} +{% for pool in pools -%} +pool {{pool}} iburst +{% endfor %} +{%- if servers %}# servers +{% endif %} +{% for server in servers -%} +server {{server}} iburst +{% endfor %} + +#broadcast 192.168.1.255 autokey # broadcast server +#broadcastclient # broadcast client +#broadcast 224.0.1.1 autokey # multicast server +#multicastclient 224.0.1.1 # multicast client +#manycastserver 239.255.254.254 # manycast server +#manycastclient 239.255.254.254 autokey # manycast client + +# Enable public key cryptography. +#crypto + +includefile /etc/ntp/crypto/pw + +# Key file containing the keys and key identifiers used when operating +# with symmetric key cryptography. +keys /etc/ntp/keys + +# Specify the key identifiers which are trusted. +#trustedkey 4 8 42 + +# Specify the key identifier to use with the ntpdc utility. +#requestkey 8 + +# Specify the key identifier to use with the ntpq utility. +#controlkey 8 + +# Enable writing of statistics records. +#statistics clockstats cryptostats loopstats peerstats diff --git a/templates/resolv.conf.tmpl b/templates/resolv.conf.tmpl index f870be67..72a37bf7 100644 --- a/templates/resolv.conf.tmpl +++ b/templates/resolv.conf.tmpl @@ -22,7 +22,7 @@ domain {{domain}} sortlist {% for sort in sortlist %}{{sort}} {% endfor %} {% endif %} {# - Flags and options are required to be on the + Flags and options are required to be on the same line preceded by "options" keyword #} {% if options or flags %} diff --git a/templates/systemd.resolved.conf.tmpl b/templates/systemd.resolved.conf.tmpl new file mode 100644 index 00000000..fca50d37 --- /dev/null +++ b/templates/systemd.resolved.conf.tmpl @@ -0,0 +1,15 @@ +## template:jinja +# Your system has been configured with 'manage-resolv-conf' set to true. +# As a result, cloud-init has written this file with configuration data +# that it has been provided. Cloud-init, by default, will write this file +# a single time (PER_ONCE). +# +[Resolve] +LLMNR=false +{% if nameservers is defined %} +DNS={% for server in nameservers %}{{server}} {% endfor %} +{% endif %} + +{% if searchdomains is defined %} +Domains={% for search in searchdomains %}{{search}} {% endfor %} +{% endif %} diff --git a/tests/cloud_tests/util.py b/tests/cloud_tests/util.py index 7dcccbdd..49baadb0 100644 --- a/tests/cloud_tests/util.py +++ b/tests/cloud_tests/util.py @@ -23,7 +23,7 @@ from tests.cloud_tests import LOG OS_FAMILY_MAPPING = { 'debian': ['debian', 'ubuntu'], - 'redhat': ['centos', 'rhel', 'fedora'], + 'redhat': ['centos', 'photon', 'rhel', 'fedora'], 'gentoo': ['gentoo'], 'freebsd': ['freebsd'], 'suse': ['sles'], diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index fbc6ec11..fdb4026c 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -225,7 +225,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): expected_doc_sections = [ '**Supported distros:** all', ('**Supported distros:** almalinux, alpine, centos, debian, ' - 'fedora, opensuse, rhel, rocky, sles, ubuntu'), + 'fedora, opensuse, photon, rhel, rocky, sles, ubuntu'), '**Config schema**:\n **resize_rootfs:** (true/false/noblock)', '**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n' ] diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index a1df066a..562ee04a 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -2,6 +2,7 @@ import copy import os +import re from io import StringIO from textwrap import dedent from unittest import mock @@ -15,7 +16,6 @@ from cloudinit.tests.helpers import ( from cloudinit import subp from cloudinit import util - BASE_NET_CFG = ''' auto lo iface lo inet loopback @@ -771,6 +771,103 @@ class TestNetCfgDistroArch(TestNetCfgDistroBase): with_netplan=True) +class TestNetCfgDistroPhoton(TestNetCfgDistroBase): + + def setUp(self): + super(TestNetCfgDistroPhoton, self).setUp() + self.distro = self._get_distro('photon', renderers=['networkd']) + + def create_conf_dict(self, contents): + content_dict = {} + for line in contents: + if line: + line = line.strip() + if line and re.search(r'^\[(.+)\]$', line): + content_dict[line] = [] + key = line + elif line: + assert key + content_dict[key].append(line) + + return content_dict + + def compare_dicts(self, actual, expected): + for k, v in actual.items(): + self.assertEqual(sorted(expected[k]), sorted(v)) + + def _apply_and_verify(self, apply_fn, config, expected_cfgs=None, + bringup=False): + if not expected_cfgs: + raise ValueError('expected_cfg must not be None') + + tmpd = None + with mock.patch('cloudinit.net.networkd.available') as m_avail: + m_avail.return_value = True + with self.reRooted(tmpd) as tmpd: + apply_fn(config, bringup) + + results = dir2dict(tmpd) + for cfgpath, expected in expected_cfgs.items(): + actual = self.create_conf_dict(results[cfgpath].splitlines()) + self.compare_dicts(actual, expected) + self.assertEqual(0o644, get_mode(cfgpath, tmpd)) + + def nwk_file_path(self, ifname): + return '/etc/systemd/network/10-cloud-init-%s.network' % ifname + + def net_cfg_1(self, ifname): + ret = """\ + [Match] + Name=%s + [Network] + DHCP=no + [Address] + Address=192.168.1.5/24 + [Route] + Gateway=192.168.1.254""" % ifname + return ret + + def net_cfg_2(self, ifname): + ret = """\ + [Match] + Name=%s + [Network] + DHCP=ipv4""" % ifname + return ret + + def test_photon_network_config_v1(self): + tmp = self.net_cfg_1('eth0').splitlines() + expected_eth0 = self.create_conf_dict(tmp) + + tmp = self.net_cfg_2('eth1').splitlines() + expected_eth1 = self.create_conf_dict(tmp) + + expected_cfgs = { + self.nwk_file_path('eth0'): expected_eth0, + self.nwk_file_path('eth1'): expected_eth1, + } + + self._apply_and_verify(self.distro.apply_network_config, + V1_NET_CFG, + expected_cfgs.copy()) + + def test_photon_network_config_v2(self): + tmp = self.net_cfg_1('eth7').splitlines() + expected_eth7 = self.create_conf_dict(tmp) + + tmp = self.net_cfg_2('eth9').splitlines() + expected_eth9 = self.create_conf_dict(tmp) + + expected_cfgs = { + self.nwk_file_path('eth7'): expected_eth7, + self.nwk_file_path('eth9'): expected_eth9, + } + + self._apply_and_verify(self.distro.apply_network_config, + V2_NET_CFG, + expected_cfgs.copy()) + + def get_mode(path, target=None): return os.stat(subp.target_path(target, path)).st_mode & 0o777 diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/test_handler/test_handler_set_hostname.py index 73641b70..32ca3b7e 100644 --- a/tests/unittests/test_handler/test_handler_set_hostname.py +++ b/tests/unittests/test_handler/test_handler_set_hostname.py @@ -120,6 +120,32 @@ class TestHostname(t_help.FilesystemMockingTestCase): contents = util.load_file(distro.hostname_conf_fn) self.assertEqual('blah', contents.strip()) + @mock.patch('cloudinit.distros.Distro.uses_systemd', return_value=False) + def test_photon_hostname(self, m_uses_systemd): + cfg1 = { + 'hostname': 'photon', + 'prefer_fqdn_over_hostname': True, + 'fqdn': 'test1.vmware.com', + } + cfg2 = { + 'hostname': 'photon', + 'prefer_fqdn_over_hostname': False, + 'fqdn': 'test2.vmware.com', + } + + ds = None + distro = self._fetch_distro('photon', cfg1) + paths = helpers.Paths({'cloud_dir': self.tmp}) + cc = cloud.Cloud(ds, paths, {}, distro, None) + self.patchUtils(self.tmp) + for c in [cfg1, cfg2]: + cc_set_hostname.handle('cc_set_hostname', c, cc, LOG, []) + contents = util.load_file(distro.hostname_conf_fn, decode=True) + if c['prefer_fqdn_over_hostname']: + self.assertEqual(contents.strip(), c['fqdn']) + else: + self.assertEqual(contents.strip(), c['hostname']) + def test_multiple_calls_skips_unchanged_hostname(self): """Only new hostname or fqdn values will generate a hostname call.""" distro = self._fetch_distro('debian') diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index b72a62b8..b2ddbf99 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -5,7 +5,7 @@ from cloudinit import distros from cloudinit.net import cmdline from cloudinit.net import ( eni, interface_has_own_mac, natural_sort_key, netplan, network_state, - renderers, sysconfig) + renderers, sysconfig, networkd) from cloudinit.sources.helpers import openstack from cloudinit import temp_utils from cloudinit import subp @@ -821,6 +821,28 @@ iface eth1 inet static NETWORK_CONFIGS = { 'small': { + 'expected_networkd_eth99': textwrap.dedent("""\ + [Match] + Name=eth99 + MACAddress=c0:d6:9f:2c:e8:80 + [Network] + DHCP=ipv4 + Domains=wark.maas + DNS=1.2.3.4 5.6.7.8 + [Route] + Gateway=65.61.151.37 + Destination=0.0.0.0/0 + Metric=10000 + """).rstrip(' '), + 'expected_networkd_eth1': textwrap.dedent("""\ + [Match] + Name=eth1 + MACAddress=cf:d6:af:48:e8:80 + [Network] + DHCP=no + Domains=wark.maas + DNS=1.2.3.4 5.6.7.8 + """).rstrip(' '), 'expected_eni': textwrap.dedent("""\ auto lo iface lo inet loopback @@ -938,6 +960,12 @@ NETWORK_CONFIGS = { """), }, 'v4_and_v6': { + 'expected_networkd': textwrap.dedent("""\ + [Match] + Name=iface0 + [Network] + DHCP=yes + """).rstrip(' '), 'expected_eni': textwrap.dedent("""\ auto lo iface lo inet loopback @@ -973,6 +1001,17 @@ NETWORK_CONFIGS = { """).rstrip(' '), }, 'v4_and_v6_static': { + 'expected_networkd': textwrap.dedent("""\ + [Match] + Name=iface0 + [Link] + MTUBytes=8999 + [Network] + DHCP=no + [Address] + Address=192.168.14.2/24 + Address=2001:1::1/64 + """).rstrip(' '), 'expected_eni': textwrap.dedent("""\ auto lo iface lo inet loopback @@ -1059,6 +1098,12 @@ NETWORK_CONFIGS = { """).rstrip(' '), }, 'dhcpv6_only': { + 'expected_networkd': textwrap.dedent("""\ + [Match] + Name=iface0 + [Network] + DHCP=ipv6 + """).rstrip(' '), 'expected_eni': textwrap.dedent("""\ auto lo iface lo inet loopback @@ -4986,26 +5031,199 @@ class TestEniRoundTrip(CiTestCase): files['/etc/network/interfaces'].splitlines()) +class TestNetworkdNetRendering(CiTestCase): + + def create_conf_dict(self, contents): + content_dict = {} + for line in contents: + if line: + line = line.strip() + if line and re.search(r'^\[(.+)\]$', line): + content_dict[line] = [] + key = line + elif line: + content_dict[key].append(line) + + return content_dict + + def compare_dicts(self, actual, expected): + for k, v in actual.items(): + self.assertEqual(sorted(expected[k]), sorted(v)) + + @mock.patch("cloudinit.net.util.chownbyname", return_value=True) + @mock.patch("cloudinit.net.util.get_cmdline", return_value="root=myroot") + @mock.patch("cloudinit.net.sys_dev_path") + @mock.patch("cloudinit.net.read_sys_net") + @mock.patch("cloudinit.net.get_devicelist") + def test_networkd_default_generation(self, mock_get_devicelist, + mock_read_sys_net, + mock_sys_dev_path, + m_get_cmdline, + m_chown): + tmp_dir = self.tmp_dir() + _setup_test(tmp_dir, mock_get_devicelist, + mock_read_sys_net, mock_sys_dev_path) + + network_cfg = net.generate_fallback_config() + ns = network_state.parse_net_config_data(network_cfg, + skip_broken=False) + + render_dir = os.path.join(tmp_dir, "render") + os.makedirs(render_dir) + + render_target = 'etc/systemd/network/10-cloud-init-eth1000.network' + renderer = networkd.Renderer({}) + renderer.render_network_state(ns, target=render_dir) + + self.assertTrue(os.path.exists(os.path.join(render_dir, + render_target))) + with open(os.path.join(render_dir, render_target)) as fh: + contents = fh.readlines() + + actual = self.create_conf_dict(contents) + print(actual) + + expected = textwrap.dedent("""\ + [Match] + Name=eth1000 + MACAddress=07-1c-c6-75-a4-be + [Network] + DHCP=ipv4""").rstrip(' ') + + expected = self.create_conf_dict(expected.splitlines()) + + self.compare_dicts(actual, expected) + + +class TestNetworkdRoundTrip(CiTestCase): + + def create_conf_dict(self, contents): + content_dict = {} + for line in contents: + if line: + line = line.strip() + if line and re.search(r'^\[(.+)\]$', line): + content_dict[line] = [] + key = line + elif line: + content_dict[key].append(line) + + return content_dict + + def compare_dicts(self, actual, expected): + for k, v in actual.items(): + self.assertEqual(sorted(expected[k]), sorted(v)) + + def _render_and_read(self, network_config=None, state=None, nwkd_path=None, + dir=None): + if dir is None: + dir = self.tmp_dir() + + if network_config: + ns = network_state.parse_net_config_data(network_config) + elif state: + ns = state + else: + raise ValueError("Expected data or state, got neither") + + if not nwkd_path: + nwkd_path = '/etc/systemd/network/' + + renderer = networkd.Renderer(config={'network_conf_dir': nwkd_path}) + + renderer.render_network_state(ns, target=dir) + return dir2dict(dir) + + @mock.patch("cloudinit.net.util.chownbyname", return_value=True) + def testsimple_render_small_networkd(self, m_chown): + nwk_fn1 = '/etc/systemd/network/10-cloud-init-eth99.network' + nwk_fn2 = '/etc/systemd/network/10-cloud-init-eth1.network' + entry = NETWORK_CONFIGS['small'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + + actual = files[nwk_fn1].splitlines() + actual = self.create_conf_dict(actual) + + expected = entry['expected_networkd_eth99'].splitlines() + expected = self.create_conf_dict(expected) + + self.compare_dicts(actual, expected) + + actual = files[nwk_fn2].splitlines() + actual = self.create_conf_dict(actual) + + expected = entry['expected_networkd_eth1'].splitlines() + expected = self.create_conf_dict(expected) + + self.compare_dicts(actual, expected) + + @mock.patch("cloudinit.net.util.chownbyname", return_value=True) + def testsimple_render_v4_and_v6(self, m_chown): + nwk_fn = '/etc/systemd/network/10-cloud-init-iface0.network' + entry = NETWORK_CONFIGS['v4_and_v6'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + + actual = files[nwk_fn].splitlines() + actual = self.create_conf_dict(actual) + + expected = entry['expected_networkd'].splitlines() + expected = self.create_conf_dict(expected) + + self.compare_dicts(actual, expected) + + @mock.patch("cloudinit.net.util.chownbyname", return_value=True) + def testsimple_render_v4_and_v6_static(self, m_chown): + nwk_fn = '/etc/systemd/network/10-cloud-init-iface0.network' + entry = NETWORK_CONFIGS['v4_and_v6_static'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + + actual = files[nwk_fn].splitlines() + actual = self.create_conf_dict(actual) + + expected = entry['expected_networkd'].splitlines() + expected = self.create_conf_dict(expected) + + self.compare_dicts(actual, expected) + + @mock.patch("cloudinit.net.util.chownbyname", return_value=True) + def testsimple_render_dhcpv6_only(self, m_chown): + nwk_fn = '/etc/systemd/network/10-cloud-init-iface0.network' + entry = NETWORK_CONFIGS['dhcpv6_only'] + files = self._render_and_read(network_config=yaml.load(entry['yaml'])) + + actual = files[nwk_fn].splitlines() + actual = self.create_conf_dict(actual) + + expected = entry['expected_networkd'].splitlines() + expected = self.create_conf_dict(expected) + + self.compare_dicts(actual, expected) + + class TestRenderersSelect: @pytest.mark.parametrize( - 'renderer_selected,netplan,eni,nm,scfg,sys', ( + 'renderer_selected,netplan,eni,nm,scfg,sys,networkd', ( # -netplan -ifupdown -nm -scfg -sys raises error - (net.RendererNotFoundError, False, False, False, False, False), + (net.RendererNotFoundError, False, False, False, False, False, + False), # -netplan +ifupdown -nm -scfg -sys selects eni - ('eni', False, True, False, False, False), + ('eni', False, True, False, False, False, False), # +netplan +ifupdown -nm -scfg -sys selects eni - ('eni', True, True, False, False, False), + ('eni', True, True, False, False, False, False), # +netplan -ifupdown -nm -scfg -sys selects netplan - ('netplan', True, False, False, False, False), + ('netplan', True, False, False, False, False, False), # Ubuntu with Network-Manager installed # +netplan -ifupdown +nm -scfg -sys selects netplan - ('netplan', True, False, True, False, False), + ('netplan', True, False, True, False, False, False), # Centos/OpenSuse with Network-Manager installed selects sysconfig # -netplan -ifupdown +nm -scfg +sys selects netplan - ('sysconfig', False, False, True, False, True), + ('sysconfig', False, False, True, False, True, False), + # -netplan -ifupdown -nm -scfg -sys +networkd selects networkd + ('networkd', False, False, False, False, False, True), ), ) + @mock.patch("cloudinit.net.renderers.networkd.available") @mock.patch("cloudinit.net.renderers.netplan.available") @mock.patch("cloudinit.net.renderers.sysconfig.available") @mock.patch("cloudinit.net.renderers.sysconfig.available_sysconfig") @@ -5013,7 +5231,8 @@ class TestRenderersSelect: @mock.patch("cloudinit.net.renderers.eni.available") def test_valid_renderer_from_defaults_depending_on_availability( self, m_eni_avail, m_nm_avail, m_scfg_avail, m_sys_avail, - m_netplan_avail, renderer_selected, netplan, eni, nm, scfg, sys + m_netplan_avail, m_networkd_avail, renderer_selected, + netplan, eni, nm, scfg, sys, networkd ): """Assert proper renderer per DEFAULT_PRIORITY given availability.""" m_eni_avail.return_value = eni # ifupdown pkg presence @@ -5021,6 +5240,7 @@ class TestRenderersSelect: m_scfg_avail.return_value = scfg # sysconfig presence m_sys_avail.return_value = sys # sysconfig/ifup/down presence m_netplan_avail.return_value = netplan # netplan presence + m_networkd_avail.return_value = networkd # networkd presence if isinstance(renderer_selected, str): (renderer_name, _rnd_class) = renderers.select( priority=renderers.DEFAULT_PRIORITY @@ -5094,6 +5314,12 @@ class TestNetRenderers(CiTestCase): result = sysconfig.available() self.assertTrue(result) + @mock.patch("cloudinit.net.renderers.networkd.available") + def test_networkd_available(self, m_nwkd_avail): + m_nwkd_avail.return_value = True + found = renderers.search(priority=['networkd'], first=False) + self.assertEqual('networkd', found[0][0]) + @mock.patch( "cloudinit.net.is_openvswitch_internal_interface", diff --git a/tests/unittests/test_render_cloudcfg.py b/tests/unittests/test_render_cloudcfg.py index 495e2669..275879af 100644 --- a/tests/unittests/test_render_cloudcfg.py +++ b/tests/unittests/test_render_cloudcfg.py @@ -10,7 +10,8 @@ from cloudinit import util # TODO(Look to align with tools.render-cloudcfg or cloudinit.distos.OSFAMILIES) DISTRO_VARIANTS = ["amazon", "arch", "centos", "debian", "fedora", "freebsd", - "netbsd", "openbsd", "rhel", "suse", "ubuntu", "unknown"] + "netbsd", "openbsd", "photon", "rhel", "suse", "ubuntu", + "unknown"] @pytest.mark.allow_subp_for(sys.executable) diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg index 9ec554bd..7e667de4 100755 --- a/tools/render-cloudcfg +++ b/tools/render-cloudcfg @@ -5,8 +5,8 @@ import os import sys VARIANTS = ["almalinux", "alpine", "amazon", "arch", "centos", "debian", - "fedora", "freebsd", "netbsd", "openbsd", "rhel", "suse", "rocky", - "ubuntu", "unknown"] + "fedora", "freebsd", "netbsd", "openbsd", "photon", "rhel", + "suse","rocky", "ubuntu", "unknown"] if "avoid-pep8-E402-import-not-top-of-file": -- cgit v1.2.3 From b5aecbe9512fa546255cc93b178b4081342fc247 Mon Sep 17 00:00:00 2001 From: sshedi <53473811+sshedi@users.noreply.github.com> Date: Tue, 29 Jun 2021 00:12:34 +0530 Subject: Removed distro specific network code from Photon (#929) Minor fixes in networkd renderer & fixed corresponding tests Removed datasource_list for Photon from cloud.cfg.tmpl & added a comment in cloud.cfg.tmpl about not to use multiline array for datasource_list. Signed-off-by: Shreenidhi Shedi --- cloudinit/distros/photon.py | 217 ------------------------- cloudinit/net/networkd.py | 51 +++--- config/cloud.cfg.tmpl | 12 +- tests/unittests/test_distros/test_netconfig.py | 41 +++++ tests/unittests/test_net.py | 4 + 5 files changed, 79 insertions(+), 246 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/distros/photon.py b/cloudinit/distros/photon.py index 8b78f98f..45125be7 100644 --- a/cloudinit/distros/photon.py +++ b/cloudinit/distros/photon.py @@ -10,10 +10,8 @@ from cloudinit import subp from cloudinit import distros from cloudinit import helpers from cloudinit import log as logging -from cloudinit.distros import net_util from cloudinit.settings import PER_INSTANCE from cloudinit.distros import rhel_util as rhutil -from cloudinit.net.network_state import mask_to_net_prefix from cloudinit.distros.parsers.hostname import HostnameConf LOG = logging.getLogger(__name__) @@ -79,9 +77,6 @@ class Distro(distros.Distro): self.package_command('install', pkgs=pkglist) def _write_network_config(self, netconfig): - if isinstance(netconfig, str): - self._write_network_(netconfig) - return return self._supported_write_network_config(netconfig) def _bring_up_interfaces(self, device_names): @@ -141,215 +136,3 @@ class Distro(distros.Distro): def update_package_sources(self): self._runner.run('update-sources', self.package_command, ['makecache'], freq=PER_INSTANCE) - - def _generate_resolv_conf(self): - resolv_conf_fn = self.resolve_conf_fn - resolv_templ_fn = 'systemd.resolved.conf' - - return resolv_conf_fn, resolv_templ_fn - - def _write_network_(self, settings): - entries = net_util.translate_network(settings) - LOG.debug('Translated ubuntu style network settings %s into %s', - settings, entries) - route_entries = [] - route_entries = translate_routes(settings) - dev_names = entries.keys() - nameservers = [] - searchdomains = [] - # Format for systemd - for (dev, info) in entries.items(): - if 'dns-nameservers' in info: - nameservers.extend(info['dns-nameservers']) - if 'dns-search' in info: - searchdomains.extend(info['dns-search']) - if dev == 'lo': - continue - - net_fn = self.network_conf_dir + '10-cloud-init-' + dev - net_fn += '.network' - dhcp_enabled = 'no' - if info.get('bootproto') == 'dhcp': - if (settings.find('inet dhcp') >= 0 and - settings.find('inet6 dhcp') >= 0): - dhcp_enabled = 'yes' - else: - if info.get('inet6') is True: - dhcp_enabled = 'ipv6' - else: - dhcp_enabled = 'ipv4' - - net_cfg = { - 'Name': dev, - 'DHCP': dhcp_enabled, - } - - if info.get('hwaddress'): - net_cfg['MACAddress'] = info.get('hwaddress') - if info.get('address'): - net_cfg['Address'] = '%s' % (info.get('address')) - if info.get('netmask'): - net_cfg['Address'] += '/%s' % ( - mask_to_net_prefix(info.get('netmask'))) - if info.get('gateway'): - net_cfg['Gateway'] = info.get('gateway') - if info.get('dns-nameservers'): - net_cfg['DNS'] = str( - tuple(info.get('dns-nameservers'))).replace(',', '') - if info.get('dns-search'): - net_cfg['Domains'] = str( - tuple(info.get('dns-search'))).replace(',', '') - route_entry = [] - if dev in route_entries: - route_entry = route_entries[dev] - route_index = 0 - found = True - while found: - route_name = 'routes.' + str(route_index) - if route_name in route_entries[dev]: - val = str(tuple(route_entries[dev][route_name])) - val = val.replace(',', '') - if val: - net_cfg[route_name] = val - else: - found = False - route_index += 1 - - if info.get('auto'): - self._write_interface_file(net_fn, net_cfg, route_entry) - - resolve_data = [] - new_resolve_data = [] - with open(self.resolve_conf_fn, 'r') as rf: - resolve_data = rf.readlines() - LOG.debug('Old Resolve Data\n') - LOG.debug('%s', resolve_data) - for item in resolve_data: - if ((nameservers and ('DNS=' in item)) or - (searchdomains and ('Domains=' in item))): - continue - else: - new_resolve_data.append(item) - - new_resolve_data = new_resolve_data + \ - convert_resolv_conf(nameservers, searchdomains) - LOG.debug('New resolve data\n') - LOG.debug('%s', new_resolve_data) - if nameservers or searchdomains: - util.write_file(self.resolve_conf_fn, ''.join(new_resolve_data)) - - return dev_names - - def _write_interface_file(self, net_fn, net_cfg, route_entry): - if not net_cfg['Name']: - return - content = '[Match]\n' - content += 'Name=%s\n' % (net_cfg['Name']) - if 'MACAddress' in net_cfg: - content += 'MACAddress=%s\n' % (net_cfg['MACAddress']) - content += '[Network]\n' - - if 'DHCP' in net_cfg and net_cfg['DHCP'] in {'yes', 'ipv4', 'ipv6'}: - content += 'DHCP=%s\n' % (net_cfg['DHCP']) - else: - if 'Address' in net_cfg: - content += 'Address=%s\n' % (net_cfg['Address']) - if 'Gateway' in net_cfg: - content += 'Gateway=%s\n' % (net_cfg['Gateway']) - if 'DHCP' in net_cfg and net_cfg['DHCP'] == 'no': - content += 'DHCP=%s\n' % (net_cfg['DHCP']) - - route_index = 0 - found = True - if route_entry: - while found: - route_name = 'routes.' + str(route_index) - if route_name in route_entry: - content += '[Route]\n' - if len(route_entry[route_name]) != 2: - continue - content += 'Gateway=%s\n' % ( - route_entry[route_name][0]) - content += 'Destination=%s\n' % ( - route_entry[route_name][1]) - else: - found = False - route_index += 1 - - util.write_file(net_fn, content) - - -def convert_resolv_conf(nameservers, searchdomains): - ''' Returns a string formatted for resolv.conf ''' - result = [] - if nameservers: - nslist = 'DNS=' - for ns in nameservers: - nslist = nslist + '%s ' % ns - nslist = nslist + '\n' - result.append(str(nslist)) - if searchdomains: - sdlist = 'Domains=' - for sd in searchdomains: - sdlist = sdlist + '%s ' % sd - sdlist = sdlist + '\n' - result.append(str(sdlist)) - return result - - -def translate_routes(settings): - entries = [] - for line in settings.splitlines(): - line = line.strip() - if not line or line.startswith('#'): - continue - split_up = line.split(None, 1) - if len(split_up) <= 1: - continue - entries.append(split_up) - consume = {} - ifaces = [] - for (cmd, args) in entries: - if cmd == 'iface': - if consume: - ifaces.append(consume) - consume = {} - consume[cmd] = args - else: - consume[cmd] = args - - absorb = False - for (cmd, args) in consume.items(): - if cmd == 'iface': - absorb = True - if absorb: - ifaces.append(consume) - out_ifaces = {} - for info in ifaces: - if 'iface' not in info: - continue - iface_details = info['iface'].split(None) - dev_name = None - if len(iface_details) >= 1: - dev = iface_details[0].strip().lower() - if dev: - dev_name = dev - if not dev_name: - continue - route_info = {} - route_index = 0 - found = True - while found: - route_name = 'routes.' + str(route_index) - if route_name in info: - val = info[route_name].split() - if val: - route_info[route_name] = val - else: - found = False - route_index += 1 - if dev_name in out_ifaces: - out_ifaces[dev_name].update(route_info) - else: - out_ifaces[dev_name] = route_info - return out_ifaces diff --git a/cloudinit/net/networkd.py b/cloudinit/net/networkd.py index 71f87995..2dffce59 100644 --- a/cloudinit/net/networkd.py +++ b/cloudinit/net/networkd.py @@ -35,6 +35,8 @@ class CfgParser: for k in self.conf_dict.keys(): if k == sec: self.conf_dict[k].append(key+'='+str(val)) + # remove duplicates from list + self.conf_dict[k] = list(dict.fromkeys(self.conf_dict[k])) self.conf_dict[k].sort() def get_final_conf(self): @@ -103,19 +105,27 @@ class Renderer(renderer.Renderer): def parse_routes(self, conf, cfg): sec = 'Route' + route_cfg_map = { + 'gateway': 'Gateway', + 'network': 'Destination', + 'metric': 'Metric', + } + + # prefix is derived using netmask by network_state + prefix = '' + if 'prefix' in conf: + prefix = '/' + str(conf['prefix']) + for k, v in conf.items(): - if k == 'gateway': - cfg.update_section(sec, 'Gateway', v) - elif k == 'network': - tmp = v - if 'prefix' in conf: - tmp += '/' + str(conf['prefix']) - cfg.update_section(sec, 'Destination', tmp) - elif k == 'metric': - cfg.update_section(sec, 'Metric', v) + if k not in route_cfg_map: + continue + if k == 'network': + v += prefix + cfg.update_section(sec, route_cfg_map[k], v) def parse_subnets(self, iface, cfg): dhcp = 'no' + sec = 'Network' for e in iface.get('subnets', []): t = e['type'] if t == 'dhcp4' or t == 'dhcp': @@ -131,21 +141,24 @@ class Renderer(renderer.Renderer): if 'routes' in e and e['routes']: for i in e['routes']: self.parse_routes(i, cfg) - elif 'address' in e: + if 'address' in e: + subnet_cfg_map = { + 'address': 'Address', + 'gateway': 'Gateway', + 'dns_nameservers': 'DNS', + 'dns_search': 'Domains', + } for k, v in e.items(): if k == 'address': - tmp = v if 'prefix' in e: - tmp += '/' + str(e['prefix']) - cfg.update_section('Address', 'Address', tmp) + v += '/' + str(e['prefix']) + cfg.update_section('Address', subnet_cfg_map[k], v) elif k == 'gateway': - cfg.update_section('Route', 'Gateway', v) - elif k == 'dns_nameservers': - cfg.update_section('Network', 'DNS', ' '.join(v)) - elif k == 'dns_search': - cfg.update_section('Network', 'Domains', ' '.join(v)) + cfg.update_section('Route', subnet_cfg_map[k], v) + elif k == 'dns_nameservers' or k == 'dns_search': + cfg.update_section(sec, subnet_cfg_map[k], ' '.join(v)) - cfg.update_section('Network', 'DHCP', dhcp) + cfg.update_section(sec, 'DHCP', dhcp) # This is to accommodate extra keys present in VMware config def dhcp_domain(self, d, cfg): diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index d6dbb833..cb2a625b 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -44,20 +44,12 @@ ssh_pwauth: 0 # This will cause the set+update hostname module to not operate (if true) preserve_hostname: false +# If you use datasource_list array, keep array items in a single line. +# If you use multi line array, ds-identify script won't read array items. {% if variant.endswith("bsd") %} # This should not be required, but leave it in place until the real cause of # not finding -any- datasources is resolved. datasource_list: ['NoCloud', 'ConfigDrive', 'Azure', 'OpenStack', 'Ec2'] -{% elif variant in ["photon"] %} -# Datasources to check for cloud-config -datasource_list: [ - NoCloud, - ConfigDrive, - OVF, - OpenStack, - VMwareGuestInfo, - None - ] {% endif %} # Example datasource config # datasource: diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index 562ee04a..d09e46af 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -15,6 +15,7 @@ from cloudinit.tests.helpers import ( FilesystemMockingTestCase, dir2dict) from cloudinit import subp from cloudinit import util +from cloudinit import safeyaml BASE_NET_CFG = ''' auto lo @@ -88,6 +89,24 @@ V1_NET_CFG = {'config': [{'name': 'eth0', 'type': 'physical'}], 'version': 1} +V1_NET_CFG_WITH_DUPS = """\ +# same value in interface specific dns and global dns +# should produce single entry in network file +version: 1 +config: + - type: physical + name: eth0 + subnets: + - type: static + address: 192.168.0.102/24 + dns_nameservers: [1.2.3.4] + dns_search: [test.com] + interface: eth0 + - type: nameserver + address: [1.2.3.4] + search: [test.com] +""" + V1_NET_CFG_OUTPUT = """\ # This file is generated from information provided by the datasource. Changes # to it will not persist across an instance reboot. To disable cloud-init's @@ -867,6 +886,28 @@ class TestNetCfgDistroPhoton(TestNetCfgDistroBase): V2_NET_CFG, expected_cfgs.copy()) + def test_photon_network_config_v1_with_duplicates(self): + expected = """\ + [Match] + Name=eth0 + [Network] + DHCP=no + DNS=1.2.3.4 + Domains=test.com + [Address] + Address=192.168.0.102/24""" + + net_cfg = safeyaml.load(V1_NET_CFG_WITH_DUPS) + + expected = self.create_conf_dict(expected.splitlines()) + expected_cfgs = { + self.nwk_file_path('eth0'): expected, + } + + self._apply_and_verify(self.distro.apply_network_config, + net_cfg, + expected_cfgs.copy()) + def get_mode(path, target=None): return os.stat(subp.target_path(target, path)).st_mode & 0o777 diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index b2ddbf99..1aab51ee 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -825,10 +825,14 @@ NETWORK_CONFIGS = { [Match] Name=eth99 MACAddress=c0:d6:9f:2c:e8:80 + [Address] + Address=192.168.21.3/24 [Network] DHCP=ipv4 + Domains=barley.maas sach.maas Domains=wark.maas DNS=1.2.3.4 5.6.7.8 + DNS=8.8.8.8 8.8.4.4 [Route] Gateway=65.61.151.37 Destination=0.0.0.0/0 -- cgit v1.2.3 From 78e89b03ecb29e7df3181b1219a0b5f44b9d7532 Mon Sep 17 00:00:00 2001 From: Robert Schweikert Date: Thu, 1 Jul 2021 12:35:40 -0400 Subject: - Detect a Python version change and clear the cache (#857) summary: Clear cache when a Python version change is detected When a distribution gets updated it is possible that the Python version changes. Python makes no guarantee that pickle is consistent across versions as such we need to purge the cache and start over. Co-authored-by: James Falcon --- cloudinit/cmd/main.py | 30 +++++++++++ cloudinit/cmd/tests/test_main.py | 2 + .../assets/test_version_change.pkl | Bin 0 -> 21 bytes .../modules/test_ssh_auth_key_fingerprints.py | 2 +- .../modules/test_version_change.py | 56 +++++++++++++++++++++ tests/integration_tests/util.py | 4 ++ 6 files changed, 93 insertions(+), 1 deletion(-) create mode 100644 tests/integration_tests/assets/test_version_change.pkl create mode 100644 tests/integration_tests/modules/test_version_change.py (limited to 'cloudinit') diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index baf1381f..21213a4a 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -210,6 +210,35 @@ def attempt_cmdline_url(path, network=True, cmdline=None): (cmdline_name, url, path)) +def purge_cache_on_python_version_change(init): + """Purge the cache if python version changed on us. + + There could be changes not represented in our cache (obj.pkl) after we + upgrade to a new version of python, so at that point clear the cache + """ + current_python_version = '%d.%d' % ( + sys.version_info.major, sys.version_info.minor + ) + python_version_path = os.path.join( + init.paths.get_cpath('data'), 'python-version' + ) + if os.path.exists(python_version_path): + cached_python_version = open(python_version_path).read() + # The Python version has changed out from under us, anything that was + # pickled previously is likely useless due to API changes. + if cached_python_version != current_python_version: + LOG.debug('Python version change detected. Purging cache') + init.purge_cache(True) + util.write_file(python_version_path, current_python_version) + else: + if os.path.exists(init.paths.get_ipath_cur('obj_pkl')): + LOG.info( + 'Writing python-version file. ' + 'Cache compatibility status is currently unknown.' + ) + util.write_file(python_version_path, current_python_version) + + def main_init(name, args): deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK] if args.local: @@ -276,6 +305,7 @@ def main_init(name, args): util.logexc(LOG, "Failed to initialize, likely bad things to come!") # Stage 4 path_helper = init.paths + purge_cache_on_python_version_change(init) mode = sources.DSMODE_LOCAL if args.local else sources.DSMODE_NETWORK if mode == sources.DSMODE_NETWORK: diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py index 78b27441..1f5975b0 100644 --- a/cloudinit/cmd/tests/test_main.py +++ b/cloudinit/cmd/tests/test_main.py @@ -17,6 +17,8 @@ myargs = namedtuple('MyArgs', 'debug files force local reporter subcommand') class TestMain(FilesystemMockingTestCase): + with_logs = True + allowed_subp = False def setUp(self): super(TestMain, self).setUp() diff --git a/tests/integration_tests/assets/test_version_change.pkl b/tests/integration_tests/assets/test_version_change.pkl new file mode 100644 index 00000000..65ae93e5 Binary files /dev/null and b/tests/integration_tests/assets/test_version_change.pkl differ diff --git a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py index b9b0d85e..e1946cb1 100644 --- a/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py +++ b/tests/integration_tests/modules/test_ssh_auth_key_fingerprints.py @@ -18,7 +18,7 @@ USER_DATA_SSH_AUTHKEY_DISABLE = """\ no_ssh_fingerprints: true """ -USER_DATA_SSH_AUTHKEY_ENABLE="""\ +USER_DATA_SSH_AUTHKEY_ENABLE = """\ #cloud-config ssh_genkeytypes: - ecdsa diff --git a/tests/integration_tests/modules/test_version_change.py b/tests/integration_tests/modules/test_version_change.py new file mode 100644 index 00000000..4e9ab63f --- /dev/null +++ b/tests/integration_tests/modules/test_version_change.py @@ -0,0 +1,56 @@ +from pathlib import Path + +from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.util import ASSETS_DIR + + +PICKLE_PATH = Path('/var/lib/cloud/instance/obj.pkl') +TEST_PICKLE = ASSETS_DIR / 'test_version_change.pkl' + + +def _assert_no_pickle_problems(log): + assert 'Failed loading pickled blob' not in log + assert 'Traceback' not in log + assert 'WARN' not in log + + +def test_reboot_without_version_change(client: IntegrationInstance): + log = client.read_from_file('/var/log/cloud-init.log') + assert 'Python version change detected' not in log + assert 'Cache compatibility status is currently unknown.' not in log + _assert_no_pickle_problems(log) + + client.restart() + log = client.read_from_file('/var/log/cloud-init.log') + assert 'Python version change detected' not in log + assert 'Could not determine Python version used to write cache' not in log + _assert_no_pickle_problems(log) + + # Now ensure that loading a bad pickle gives us problems + client.push_file(TEST_PICKLE, PICKLE_PATH) + client.restart() + log = client.read_from_file('/var/log/cloud-init.log') + assert 'Failed loading pickled blob from {}'.format(PICKLE_PATH) in log + + +def test_cache_purged_on_version_change(client: IntegrationInstance): + # Start by pushing the invalid pickle so we'll hit an error if the + # cache didn't actually get purged + client.push_file(TEST_PICKLE, PICKLE_PATH) + client.execute("echo '1.0' > /var/lib/cloud/data/python-version") + client.restart() + log = client.read_from_file('/var/log/cloud-init.log') + assert 'Python version change detected. Purging cache' in log + _assert_no_pickle_problems(log) + + +def test_log_message_on_missing_version_file(client: IntegrationInstance): + # Start by pushing a pickle so we can see the log message + client.push_file(TEST_PICKLE, PICKLE_PATH) + client.execute("rm /var/lib/cloud/data/python-version") + client.restart() + log = client.read_from_file('/var/log/cloud-init.log') + assert ( + 'Writing python-version file. ' + 'Cache compatibility status is currently unknown.' + ) in log diff --git a/tests/integration_tests/util.py b/tests/integration_tests/util.py index 3ef12358..8d726bb2 100644 --- a/tests/integration_tests/util.py +++ b/tests/integration_tests/util.py @@ -3,10 +3,14 @@ import multiprocessing import os import time from contextlib import contextmanager +from pathlib import Path log = logging.getLogger('integration_testing') +ASSETS_DIR = Path('tests/integration_tests/assets') + + def verify_ordered_items_in_text(to_verify: list, text: str): """Assert all items in list appear in order in text. -- cgit v1.2.3 From 81299de5fe3b6e491a965a6ebef66c6b8bf2c037 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 1 Jul 2021 14:43:07 -0500 Subject: Add new network activators to bring up interfaces (#919) Currently _bring_up_interfaces() is a no-op for any distro using renderers. We need to be able to support bringing up a single interfaces, a list of interfaces, and all interfaces. This should be independent of the renderers, as the network config is often generated independent of the mechanism used to apply it. Additionally, I included a refactor to remove "_supported_write_network_config". We had a confusing call chain of apply_network_config->_write_network_config->_supported_write_network_config. The last two have been combined. --- cloudinit/cmd/devel/net_convert.py | 3 - cloudinit/distros/__init__.py | 39 +++---- cloudinit/distros/alpine.py | 13 --- cloudinit/distros/arch.py | 10 +- cloudinit/distros/bsd.py | 3 - cloudinit/distros/debian.py | 14 +-- cloudinit/distros/opensuse.py | 9 -- cloudinit/distros/photon.py | 3 - cloudinit/distros/rhel.py | 9 -- cloudinit/net/activators.py | 156 ++++++++++++++++++++++++++ cloudinit/net/netplan.py | 10 +- cloudinit/net/network_state.py | 59 +++++----- cloudinit/net/renderer.py | 2 + cloudinit/net/renderers.py | 13 ++- cloudinit/net/sysconfig.py | 11 +- cloudinit/net/tests/test_network_state.py | 6 +- tests/unittests/test_net.py | 2 +- tests/unittests/test_net_activators.py | 177 ++++++++++++++++++++++++++++++ 18 files changed, 413 insertions(+), 126 deletions(-) create mode 100644 cloudinit/net/activators.py create mode 100644 tests/unittests/test_net_activators.py (limited to 'cloudinit') diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py index 5c649fd0..f4a98e5e 100755 --- a/cloudinit/cmd/devel/net_convert.py +++ b/cloudinit/cmd/devel/net_convert.py @@ -96,9 +96,6 @@ def handle_args(name, args): pre_ns = ovf.get_network_config_from_conf(config, False) ns = network_state.parse_net_config_data(pre_ns) - if not ns: - raise RuntimeError("No valid network_state object created from" - " input data") if args.debug: sys.stderr.write('\n'.join( diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 4991f42b..2caa8bc2 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -16,13 +16,16 @@ import stat import string import urllib.parse from io import StringIO +from typing import Any, Mapping from cloudinit import importer from cloudinit import log as logging from cloudinit import net +from cloudinit.net import activators from cloudinit.net import eni from cloudinit.net import network_state from cloudinit.net import renderers +from cloudinit.net.network_state import parse_net_config_data from cloudinit import persistence from cloudinit import ssh_util from cloudinit import type_utils @@ -72,7 +75,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): hostname_conf_fn = "/etc/hostname" tz_zone_dir = "/usr/share/zoneinfo" init_cmd = ['service'] # systemctl, service etc - renderer_configs = {} + renderer_configs = {} # type: Mapping[str, Mapping[str, Any]] _preferred_ntp_clients = None networking_cls = LinuxNetworking # This is used by self.shutdown_command(), and can be overridden in @@ -106,14 +109,12 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): raise NotImplementedError() def _write_network(self, settings): - raise RuntimeError( + """Deprecated. Remove if/when arch and gentoo support renderers.""" + raise NotImplementedError( "Legacy function '_write_network' was called in distro '%s'.\n" "_write_network_config needs implementation.\n" % self.name) - def _write_network_config(self, settings): - raise NotImplementedError() - - def _supported_write_network_config(self, network_config): + def _write_network_state(self, network_state): priority = util.get_cfg_by_path( self._cfg, ('network', 'renderers'), None) @@ -121,8 +122,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): LOG.debug("Selected renderer '%s' from priority list: %s", name, priority) renderer = render_cls(config=self.renderer_configs.get(name)) - renderer.render_network_config(network_config) - return [] + renderer.render_network_state(network_state) def _find_tz_file(self, tz): tz_file = os.path.join(self.tz_zone_dir, str(tz)) @@ -174,6 +174,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): mirror_info=arch_info) def apply_network(self, settings, bring_up=True): + """Deprecated. Remove if/when arch and gentoo support renderers.""" # this applies network where 'settings' is interfaces(5) style # it is obsolete compared to apply_network_config # Write it out @@ -188,6 +189,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): return False def _apply_network_from_network_config(self, netconfig, bring_up=True): + """Deprecated. Remove if/when arch and gentoo support renderers.""" distro = self.__class__ LOG.warning("apply_network_config is not currently implemented " "for distribution '%s'. Attempting to use apply_network", @@ -208,8 +210,9 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): # apply network config netconfig # This method is preferred to apply_network which only takes # a much less complete network config format (interfaces(5)). + network_state = parse_net_config_data(netconfig) try: - dev_names = self._write_network_config(netconfig) + self._write_network_state(network_state) except NotImplementedError: # backwards compat until all distros have apply_network_config return self._apply_network_from_network_config( @@ -217,7 +220,8 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): # Now try to bring them up if bring_up: - return self._bring_up_interfaces(dev_names) + network_activator = activators.select_activator() + network_activator.bring_up_all_interfaces(network_state) return False def apply_network_config_names(self, netconfig): @@ -393,20 +397,11 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): return self._preferred_ntp_clients def _bring_up_interface(self, device_name): - cmd = ['ifup', device_name] - LOG.debug("Attempting to run bring up interface %s using command %s", - device_name, cmd) - try: - (_out, err) = subp.subp(cmd) - if len(err): - LOG.warning("Running %s resulted in stderr output: %s", - cmd, err) - return True - except subp.ProcessExecutionError: - util.logexc(LOG, "Running interface command %s failed", cmd) - return False + """Deprecated. Remove if/when arch and gentoo support renderers.""" + raise NotImplementedError def _bring_up_interfaces(self, device_names): + """Deprecated. Remove if/when arch and gentoo support renderers.""" am_failed = 0 for d in device_names: if not self._bring_up_interface(d): diff --git a/cloudinit/distros/alpine.py b/cloudinit/distros/alpine.py index ca5bfe80..e4bed5a2 100644 --- a/cloudinit/distros/alpine.py +++ b/cloudinit/distros/alpine.py @@ -73,19 +73,6 @@ class Distro(distros.Distro): self.update_package_sources() self.package_command('add', pkgs=pkglist) - def _write_network_config(self, netconfig): - return self._supported_write_network_config(netconfig) - - def _bring_up_interfaces(self, device_names): - use_all = False - for d in device_names: - if d == 'all': - use_all = True - if use_all: - return distros.Distro._bring_up_interface(self, '-a') - else: - return distros.Distro._bring_up_interfaces(self, device_names) - def _write_hostname(self, your_hostname, out_fn): conf = None try: diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py index 246e6fe7..c9acb11f 100644 --- a/cloudinit/distros/arch.py +++ b/cloudinit/distros/arch.py @@ -61,9 +61,9 @@ class Distro(distros.Distro): self.update_package_sources() self.package_command('', pkgs=pkglist) - def _write_network_config(self, netconfig): + def _write_network_state(self, network_state): try: - return self._supported_write_network_config(netconfig) + super()._write_network_state(network_state) except RendererNotFoundError as e: # Fall back to old _write_network raise NotImplementedError from e @@ -101,12 +101,6 @@ class Distro(distros.Distro): util.logexc(LOG, "Running interface command %s failed", cmd) return False - def _bring_up_interfaces(self, device_names): - for d in device_names: - if not self._bring_up_interface(d): - return False - return True - def _write_hostname(self, your_hostname, out_fn): conf = None try: diff --git a/cloudinit/distros/bsd.py b/cloudinit/distros/bsd.py index f717a667..c2fc1e0b 100644 --- a/cloudinit/distros/bsd.py +++ b/cloudinit/distros/bsd.py @@ -120,9 +120,6 @@ class BSD(distros.Distro): # Allow the output of this to flow outwards (ie not be captured) subp.subp(cmd, env=self._get_pkg_cmd_environ(), capture=False) - def _write_network_config(self, netconfig): - return self._supported_write_network_config(netconfig) - def set_timezone(self, tz): distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 844aaf21..089e0c3e 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -111,19 +111,9 @@ class Distro(distros.Distro): self.update_package_sources() self.package_command('install', pkgs=pkglist) - def _write_network_config(self, netconfig): + def _write_network_state(self, network_state): _maybe_remove_legacy_eth0() - return self._supported_write_network_config(netconfig) - - def _bring_up_interfaces(self, device_names): - use_all = False - for d in device_names: - if d == 'all': - use_all = True - if use_all: - return distros.Distro._bring_up_interface(self, '--all') - else: - return distros.Distro._bring_up_interfaces(self, device_names) + return super()._write_network_state(network_state) def _write_hostname(self, your_hostname, out_fn): conf = None diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py index 270cc189..b4193ac2 100644 --- a/cloudinit/distros/opensuse.py +++ b/cloudinit/distros/opensuse.py @@ -116,12 +116,6 @@ class Distro(distros.Distro): self._runner.run("update-sources", self.package_command, ['refresh'], freq=PER_INSTANCE) - def _bring_up_interfaces(self, device_names): - if device_names and 'all' in device_names: - raise RuntimeError(('Distro %s can not translate ' - 'the device name "all"') % (self.name)) - return distros.Distro._bring_up_interfaces(self, device_names) - def _read_hostname(self, filename, default=None): if self.uses_systemd() and filename.endswith('/previous-hostname'): return util.load_file(filename).strip() @@ -174,9 +168,6 @@ class Distro(distros.Distro): conf.set_hostname(hostname) util.write_file(out_fn, str(conf), 0o644) - def _write_network_config(self, netconfig): - return self._supported_write_network_config(netconfig) - @property def preferred_ntp_clients(self): """The preferred ntp client is dependent on the version.""" diff --git a/cloudinit/distros/photon.py b/cloudinit/distros/photon.py index 45125be7..0ced7b5f 100644 --- a/cloudinit/distros/photon.py +++ b/cloudinit/distros/photon.py @@ -76,9 +76,6 @@ class Distro(distros.Distro): # self.update_package_sources() self.package_command('install', pkgs=pkglist) - def _write_network_config(self, netconfig): - return self._supported_write_network_config(netconfig) - def _bring_up_interfaces(self, device_names): cmd = ['systemctl', 'restart', 'systemd-networkd', 'systemd-resolved'] LOG.debug('Attempting to run bring up interfaces using command %s', diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 80a6f1d8..be5b3d24 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -65,9 +65,6 @@ class Distro(distros.Distro): def install_packages(self, pkglist): self.package_command('install', pkgs=pkglist) - def _write_network_config(self, netconfig): - return self._supported_write_network_config(netconfig) - def apply_locale(self, locale, out_fn=None): if self.uses_systemd(): if not out_fn: @@ -117,12 +114,6 @@ class Distro(distros.Distro): else: return default - def _bring_up_interfaces(self, device_names): - if device_names and 'all' in device_names: - raise RuntimeError(('Distro %s can not translate ' - 'the device name "all"') % (self.name)) - return distros.Distro._bring_up_interfaces(self, device_names) - def set_timezone(self, tz): tz_file = self._find_tz_file(tz) if self.uses_systemd(): diff --git a/cloudinit/net/activators.py b/cloudinit/net/activators.py new file mode 100644 index 00000000..34fee3bf --- /dev/null +++ b/cloudinit/net/activators.py @@ -0,0 +1,156 @@ +# This file is part of cloud-init. See LICENSE file for license information. +import logging +import os +from abc import ABC, abstractmethod +from typing import Iterable, List, Type + +from cloudinit import subp +from cloudinit import util +from cloudinit.net.eni import available as eni_available +from cloudinit.net.netplan import available as netplan_available +from cloudinit.net.network_state import NetworkState +from cloudinit.net.sysconfig import NM_CFG_FILE + + +LOG = logging.getLogger(__name__) + + +class NetworkActivator(ABC): + @staticmethod + @abstractmethod + def available() -> bool: + raise NotImplementedError() + + @staticmethod + @abstractmethod + def bring_up_interface(device_name: str) -> bool: + raise NotImplementedError() + + @classmethod + def bring_up_interfaces(cls, device_names: Iterable[str]) -> bool: + all_succeeded = True + for device in device_names: + if not cls.bring_up_interface(device): + all_succeeded = False + return all_succeeded + + @classmethod + def bring_up_all_interfaces(cls, network_state: NetworkState) -> bool: + return cls.bring_up_interfaces( + [i['name'] for i in network_state.iter_interfaces()] + ) + + +class IfUpDownActivator(NetworkActivator): + # Note that we're not overriding bring_up_interfaces to pass something + # like ifup --all because it isn't supported everywhere. + # E.g., NetworkManager has a ifupdown plugin that requires the name + # of a specific connection. + @staticmethod + def available(target=None) -> bool: + """Return true if ifupdown can be used on this system.""" + return eni_available(target=target) + + @staticmethod + def bring_up_interface(device_name: str) -> bool: + """Bring up interface using ifup.""" + cmd = ['ifup', device_name] + LOG.debug("Attempting to run bring up interface %s using command %s", + device_name, cmd) + try: + (_out, err) = subp.subp(cmd) + if len(err): + LOG.warning("Running %s resulted in stderr output: %s", + cmd, err) + return True + except subp.ProcessExecutionError: + util.logexc(LOG, "Running interface command %s failed", cmd) + return False + + +class NetworkManagerActivator(NetworkActivator): + @staticmethod + def available(target=None) -> bool: + config_present = os.path.isfile( + subp.target_path(target, path=NM_CFG_FILE) + ) + nmcli_present = subp.which('nmcli', target=target) + return config_present and bool(nmcli_present) + + @staticmethod + def bring_up_interface(device_name: str) -> bool: + try: + subp.subp(['nmcli', 'connection', 'up', device_name]) + except subp.ProcessExecutionError: + util.logexc(LOG, "nmcli failed to bring up {}".format(device_name)) + return False + return True + + +class NetplanActivator(NetworkActivator): + @staticmethod + def available(target=None) -> bool: + return netplan_available(target=target) + + @staticmethod + def _apply_netplan(): + LOG.debug('Applying current netplan config') + try: + subp.subp(['netplan', 'apply'], capture=True) + except subp.ProcessExecutionError: + util.logexc(LOG, "netplan apply failed") + return False + return True + + @staticmethod + def bring_up_interface(device_name: str) -> bool: + LOG.debug("Calling 'netplan apply' rather than " + "bringing up individual interfaces") + return NetplanActivator._apply_netplan() + + @staticmethod + def bring_up_interfaces(device_names: Iterable[str]) -> bool: + LOG.debug("Calling 'netplan apply' rather than " + "bringing up individual interfaces") + return NetplanActivator._apply_netplan() + + @staticmethod + def bring_up_all_interfaces(network_state: NetworkState) -> bool: + return NetplanActivator._apply_netplan() + + +# This section is mostly copied and pasted from renderers.py. An abstract +# version to encompass both seems overkill at this point +DEFAULT_PRIORITY = [ + IfUpDownActivator, + NetworkManagerActivator, + NetplanActivator, +] + + +def search_activator( + priority=None, target=None +) -> List[Type[NetworkActivator]]: + if priority is None: + priority = DEFAULT_PRIORITY + + unknown = [i for i in priority if i not in DEFAULT_PRIORITY] + if unknown: + raise ValueError( + "Unknown activators provided in priority list: %s" % unknown) + + return [activator for activator in priority if activator.available(target)] + + +def select_activator(priority=None, target=None) -> Type[NetworkActivator]: + found = search_activator(priority, target) + if not found: + if priority is None: + priority = DEFAULT_PRIORITY + tmsg = "" + if target and target != "/": + tmsg = " in target=%s" % target + raise RuntimeError( + "No available network activators found%s. Searched " + "through list: %s" % (tmsg, priority)) + return found[0] diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py index 53347c83..41acf963 100644 --- a/cloudinit/net/netplan.py +++ b/cloudinit/net/netplan.py @@ -4,7 +4,12 @@ import copy import os from . import renderer -from .network_state import subnet_is_ipv6, NET_CONFIG_TO_V2, IPV6_DYNAMIC_TYPES +from .network_state import ( + NetworkState, + subnet_is_ipv6, + NET_CONFIG_TO_V2, + IPV6_DYNAMIC_TYPES, +) from cloudinit import log as logging from cloudinit import util @@ -256,7 +261,7 @@ class Renderer(renderer.Renderer): os.path.islink(SYS_CLASS_NET + iface)]: subp.subp(cmd, capture=True) - def _render_content(self, network_state): + def _render_content(self, network_state: NetworkState): # if content already in netplan format, pass it back if network_state.version == 2: @@ -426,4 +431,5 @@ def network_state_to_netplan(network_state, header=None): contents = renderer._render_content(network_state) return header + contents + # vi: ts=4 expandtab diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 8018cfb9..95b064f0 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -58,38 +58,6 @@ NET_CONFIG_TO_V2 = { 'bridge_waitport': None}} -def parse_net_config_data(net_config, skip_broken=True): - """Parses the config, returns NetworkState object - - :param net_config: curtin network config dict - """ - state = None - version = net_config.get('version') - config = net_config.get('config') - if version == 2: - # v2 does not have explicit 'config' key so we - # pass the whole net-config as-is - config = net_config - - if version and config is not None: - nsi = NetworkStateInterpreter(version=version, config=config) - nsi.parse_config(skip_broken=skip_broken) - state = nsi.get_network_state() - - return state - - -def parse_net_config(path, skip_broken=True): - """Parses a curtin network configuration file and - return network state""" - ns = None - net_config = util.read_conf(path) - if 'network' in net_config: - ns = parse_net_config_data(net_config.get('network'), - skip_broken=skip_broken) - return ns - - def from_state_file(state_file): state = util.read_conf(state_file) nsi = NetworkStateInterpreter() @@ -1088,4 +1056,31 @@ def mask_and_ipv4_to_bcast_addr(mask, ip): return bcast_str +def parse_net_config_data(net_config, skip_broken=True) -> NetworkState: + """Parses the config, returns NetworkState object + + :param net_config: curtin network config dict + """ + state = None + version = net_config.get('version') + config = net_config.get('config') + if version == 2: + # v2 does not have explicit 'config' key so we + # pass the whole net-config as-is + config = net_config + + if version and config is not None: + nsi = NetworkStateInterpreter(version=version, config=config) + nsi.parse_config(skip_broken=skip_broken) + state = nsi.get_network_state() + + if not state: + raise RuntimeError( + "No valid network_state object created from network config. " + "Did you specify the correct version?" + ) + + return state + + # vi: ts=4 expandtab diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py index 2a61a7a8..27447bc2 100644 --- a/cloudinit/net/renderer.py +++ b/cloudinit/net/renderer.py @@ -28,6 +28,8 @@ filter_by_physical = filter_by_type('physical') class Renderer(object): + def __init__(self, config=None): + pass @staticmethod def _render_persistent_net(network_state): diff --git a/cloudinit/net/renderers.py b/cloudinit/net/renderers.py index c3931a98..822b45de 100644 --- a/cloudinit/net/renderers.py +++ b/cloudinit/net/renderers.py @@ -1,10 +1,13 @@ # This file is part of cloud-init. See LICENSE file for license information. +from typing import List, Tuple, Type + from . import eni from . import freebsd from . import netbsd from . import netplan from . import networkd +from . import renderer from . import RendererNotFoundError from . import openbsd from . import sysconfig @@ -23,7 +26,9 @@ DEFAULT_PRIORITY = ["eni", "sysconfig", "netplan", "freebsd", "netbsd", "openbsd", "networkd"] -def search(priority=None, target=None, first=False): +def search( + priority=None, target=None, first=False +) -> List[Tuple[str, Type[renderer.Renderer]]]: if priority is None: priority = DEFAULT_PRIORITY @@ -40,13 +45,13 @@ def search(priority=None, target=None, first=False): if render_mod.available(target): cur = (name, render_mod.Renderer) if first: - return cur + return [cur] found.append(cur) return found -def select(priority=None, target=None): +def select(priority=None, target=None) -> Tuple[str, Type[renderer.Renderer]]: found = search(priority, target=target, first=True) if not found: if priority is None: @@ -57,6 +62,6 @@ def select(priority=None, target=None): raise RendererNotFoundError( "No available network renderers found%s. Searched " "through list: %s" % (tmsg, priority)) - return found + return found[0] # vi: ts=4 expandtab diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 3a433c99..8031cd3a 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -18,8 +18,8 @@ from .network_state import ( is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6, IPV6_DYNAMIC_TYPES) LOG = logging.getLogger(__name__) -NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf" KNOWN_DISTROS = ['almalinux', 'centos', 'fedora', 'rhel', 'rocky', 'suse'] +NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf" def _make_header(sep='#'): @@ -931,7 +931,9 @@ class Renderer(renderer.Renderer): netrules_path = subp.target_path(target, self.netrules_path) util.write_file(netrules_path, netrules_content, file_mode) if available_nm(target=target): - enable_ifcfg_rh(subp.target_path(target, path=NM_CFG_FILE)) + enable_ifcfg_rh(subp.target_path( + target, path=NM_CFG_FILE + )) sysconfig_path = subp.target_path(target, templates.get('control')) # Distros configuring /etc/sysconfig/network as a file e.g. Centos @@ -978,7 +980,10 @@ def available_sysconfig(target=None): def available_nm(target=None): - if not os.path.isfile(subp.target_path(target, path=NM_CFG_FILE)): + if not os.path.isfile(subp.target_path( + target, + path=NM_CFG_FILE + )): return False return True diff --git a/cloudinit/net/tests/test_network_state.py b/cloudinit/net/tests/test_network_state.py index fc4724a1..84e8308a 100644 --- a/cloudinit/net/tests/test_network_state.py +++ b/cloudinit/net/tests/test_network_state.py @@ -67,11 +67,13 @@ class TestNetworkStateParseConfig(CiTestCase): def test_missing_version_returns_none(self): ncfg = {} - self.assertEqual(None, network_state.parse_net_config_data(ncfg)) + with self.assertRaises(RuntimeError): + network_state.parse_net_config_data(ncfg) def test_unknown_versions_returns_none(self): ncfg = {'version': 13.2} - self.assertEqual(None, network_state.parse_net_config_data(ncfg)) + with self.assertRaises(RuntimeError): + network_state.parse_net_config_data(ncfg) def test_version_2_passes_self_as_config(self): ncfg = {'version': 2, 'otherconfig': {}, 'somemore': [1, 2, 3]} diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 1aab51ee..43e209c1 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -5277,7 +5277,7 @@ class TestNetRenderers(CiTestCase): # available should only be called until one is found. m_eni_avail.return_value = True m_sysc_avail.side_effect = Exception("Should not call me") - found = renderers.search(priority=['eni', 'sysconfig'], first=True) + found = renderers.search(priority=['eni', 'sysconfig'], first=True)[0] self.assertEqual(['eni'], [found[0]]) @mock.patch("cloudinit.net.renderers.sysconfig.available") diff --git a/tests/unittests/test_net_activators.py b/tests/unittests/test_net_activators.py new file mode 100644 index 00000000..f11486ff --- /dev/null +++ b/tests/unittests/test_net_activators.py @@ -0,0 +1,177 @@ +from collections import namedtuple +from unittest.mock import patch + +import pytest + +from cloudinit.net.activators import ( + DEFAULT_PRIORITY, + search_activator, + select_activator, +) +from cloudinit.net.activators import ( + IfUpDownActivator, + NetplanActivator, + NetworkManagerActivator +) +from cloudinit.net.network_state import parse_net_config_data +from cloudinit.safeyaml import load + + +V1_CONFIG = """\ +version: 1 +config: +- type: physical + name: eth0 +- type: physical + name: eth1 +""" + +V2_CONFIG = """\ +version: 2 +ethernets: + eth0: + dhcp4: true + eth1: + dhcp4: true +""" + +IF_UP_DOWN_AVAILABLE_CALLS = [ + (('ifquery',), {'search': ['/sbin', '/usr/sbin'], 'target': None}), + (('ifup',), {'search': ['/sbin', '/usr/sbin'], 'target': None}), + (('ifdown',), {'search': ['/sbin', '/usr/sbin'], 'target': None}), +] + +IF_UP_DOWN_CALL_LIST = [ + ((['ifup', 'eth0'], ), {}), + ((['ifup', 'eth1'], ), {}), +] + +NETPLAN_AVAILABLE_CALLS = [ + (('netplan',), {'search': ['/usr/sbin', '/sbin'], 'target': None}), +] + +NETPLAN_CALL_LIST = [ + ((['netplan', 'apply'], ), {'capture': True}), +] + +NETWORK_MANAGER_AVAILABLE_CALLS = [ + (('nmcli',), {'target': None}), +] + +NETWORK_MANAGER_CALL_LIST = [ + ((['nmcli', 'connection', 'up', 'eth0'], ), {}), + ((['nmcli', 'connection', 'up', 'eth1'], ), {}), +] + + +@pytest.yield_fixture +def available_mocks(): + mocks = namedtuple('Mocks', 'm_which, m_file') + with patch('cloudinit.subp.which', return_value=True) as m_which: + with patch('os.path.isfile', return_value=True) as m_file: + yield mocks(m_which, m_file) + + +@pytest.yield_fixture +def unavailable_mocks(): + mocks = namedtuple('Mocks', 'm_which, m_file') + with patch('cloudinit.subp.which', return_value=False) as m_which: + with patch('os.path.isfile', return_value=False) as m_file: + yield mocks(m_which, m_file) + + +class TestSearchAndSelect: + def test_defaults(self, available_mocks): + resp = search_activator() + assert resp == DEFAULT_PRIORITY + + activator = select_activator() + assert activator == DEFAULT_PRIORITY[0] + + def test_priority(self, available_mocks): + new_order = [NetplanActivator, NetworkManagerActivator] + resp = search_activator(priority=new_order) + assert resp == new_order + + activator = select_activator(priority=new_order) + assert activator == new_order[0] + + def test_target(self, available_mocks): + search_activator(target='/tmp') + assert '/tmp' == available_mocks.m_which.call_args[1]['target'] + + select_activator(target='/tmp') + assert '/tmp' == available_mocks.m_which.call_args[1]['target'] + + @patch('cloudinit.net.activators.IfUpDownActivator.available', + return_value=False) + def test_first_not_available(self, m_available, available_mocks): + resp = search_activator() + assert resp == DEFAULT_PRIORITY[1:] + + resp = select_activator() + assert resp == DEFAULT_PRIORITY[1] + + def test_priority_not_exist(self, available_mocks): + with pytest.raises(ValueError): + search_activator(priority=['spam', 'eggs']) + with pytest.raises(ValueError): + select_activator(priority=['spam', 'eggs']) + + def test_none_available(self, unavailable_mocks): + resp = search_activator() + assert resp == [] + + with pytest.raises(RuntimeError): + select_activator() + + +@pytest.mark.parametrize('activator, available_calls, expected_call_list', [ + (IfUpDownActivator, IF_UP_DOWN_AVAILABLE_CALLS, IF_UP_DOWN_CALL_LIST), + (NetplanActivator, NETPLAN_AVAILABLE_CALLS, NETPLAN_CALL_LIST), + (NetworkManagerActivator, NETWORK_MANAGER_AVAILABLE_CALLS, + NETWORK_MANAGER_CALL_LIST), +]) +class TestIfUpDownActivator: + def test_available( + self, activator, available_calls, expected_call_list, available_mocks + ): + activator.available() + assert available_mocks.m_which.call_args_list == available_calls + + @patch('cloudinit.subp.subp', return_value=('', '')) + def test_bring_up_interface( + self, m_subp, activator, available_calls, expected_call_list, + available_mocks + ): + activator.bring_up_interface('eth0') + assert len(m_subp.call_args_list) == 1 + assert m_subp.call_args_list[0] == expected_call_list[0] + + @patch('cloudinit.subp.subp', return_value=('', '')) + def test_bring_up_interfaces( + self, m_subp, activator, available_calls, expected_call_list, + available_mocks + ): + activator.bring_up_interfaces(['eth0', 'eth1']) + assert expected_call_list == m_subp.call_args_list + + @patch('cloudinit.subp.subp', return_value=('', '')) + def test_bring_up_all_interfaces_v1( + self, m_subp, activator, available_calls, expected_call_list, + available_mocks + ): + network_state = parse_net_config_data(load(V1_CONFIG)) + activator.bring_up_all_interfaces(network_state) + for call in m_subp.call_args_list: + assert call in expected_call_list + + @patch('cloudinit.subp.subp', return_value=('', '')) + def test_bring_up_all_interfaces_v2( + self, m_subp, activator, available_calls, expected_call_list, + available_mocks + ): + network_state = parse_net_config_data(load(V2_CONFIG)) + activator.bring_up_all_interfaces(network_state) + for call in m_subp.call_args_list: + assert call in expected_call_list -- cgit v1.2.3 From db51b656ee997382c3c4792271fa08398f43e105 Mon Sep 17 00:00:00 2001 From: Gonéri Le Bouder Date: Thu, 1 Jul 2021 18:12:31 -0400 Subject: freebsd/net_v1 format: read MTU from root (#930) We read the MTU from the subnet entries. With the v1 format, the MTU can be set at the root level of the interface entry in the `config` section. Limitation, we won't set the MTU if the interface use DHCP. This would require a bit of refactoring. Also simplify/clarify how we pass the target variable in `cloudinit.net.bsd`. See: https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=256309 Reported-by: Andrey Fesenko --- cloudinit/net/bsd.py | 16 +++++----- tests/unittests/test_net_freebsd.py | 63 +++++++++++++++++++++++++++++++++++-- 2 files changed, 69 insertions(+), 10 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/net/bsd.py b/cloudinit/net/bsd.py index aab968a8..916cea32 100644 --- a/cloudinit/net/bsd.py +++ b/cloudinit/net/bsd.py @@ -33,7 +33,7 @@ class BSDRenderer(renderer.Renderer): self.interface_configurations = {} self._postcmds = config.get('postcmds', True) - def _ifconfig_entries(self, settings, target=None): + def _ifconfig_entries(self, settings): ifname_by_mac = net.get_interfaces_by_mac() for interface in settings.iter_interfaces(): device_name = interface.get("name") @@ -76,10 +76,10 @@ class BSDRenderer(renderer.Renderer): self.interface_configurations[device_name] = { 'address': subnet.get('address'), 'netmask': subnet.get('netmask'), - 'mtu': subnet.get('mtu'), + 'mtu': subnet.get('mtu') or interface.get('mtu'), } - def _route_entries(self, settings, target=None): + def _route_entries(self, settings): routes = list(settings.iter_routes()) for interface in settings.iter_interfaces(): subnets = interface.get("subnets", []) @@ -102,7 +102,7 @@ class BSDRenderer(renderer.Renderer): gateway = route.get('gateway') self.set_route(network, netmask, gateway) - def _resolve_conf(self, settings, target=None): + def _resolve_conf(self, settings): nameservers = settings.dns_nameservers searchdomains = settings.dns_searchdomains for interface in settings.iter_interfaces(): @@ -115,11 +115,11 @@ class BSDRenderer(renderer.Renderer): # fails. try: resolvconf = ResolvConf(util.load_file(subp.target_path( - target, self.resolv_conf_fn))) + self.target, self.resolv_conf_fn))) resolvconf.parse() except IOError: util.logexc(LOG, "Failed to parse %s, use new empty file", - subp.target_path(target, self.resolv_conf_fn)) + subp.target_path(self.target, self.resolv_conf_fn)) resolvconf = ResolvConf('') resolvconf.parse() @@ -137,10 +137,12 @@ class BSDRenderer(renderer.Renderer): except ValueError: util.logexc(LOG, "Failed to add search domain %s", domain) util.write_file( - subp.target_path(target, self.resolv_conf_fn), + subp.target_path(self.target, self.resolv_conf_fn), str(resolvconf), 0o644) def render_network_state(self, network_state, templates=None, target=None): + if target: + self.target = target self._ifconfig_entries(settings=network_state) self._route_entries(settings=network_state) self._resolve_conf(settings=network_state) diff --git a/tests/unittests/test_net_freebsd.py b/tests/unittests/test_net_freebsd.py index 414b4830..466d472b 100644 --- a/tests/unittests/test_net_freebsd.py +++ b/tests/unittests/test_net_freebsd.py @@ -1,8 +1,24 @@ -from cloudinit import net +import os +import yaml + +import cloudinit.net +import cloudinit.net.network_state +from cloudinit.tests.helpers import (CiTestCase, mock, readResource, dir2dict) -from cloudinit.tests.helpers import (CiTestCase, mock, readResource) SAMPLE_FREEBSD_IFCONFIG_OUT = readResource("netinfo/freebsd-ifconfig-output") +V1 = """ +config: +- id: eno1 + mac_address: 08:94:ef:51:ae:e0 + mtu: 1470 + name: eno1 + subnets: + - address: 172.20.80.129/25 + type: static + type: physical +version: 1 +""" class TestInterfacesByMac(CiTestCase): @@ -12,8 +28,49 @@ class TestInterfacesByMac(CiTestCase): def test_get_interfaces_by_mac(self, mock_is_FreeBSD, mock_subp): mock_is_FreeBSD.return_value = True mock_subp.return_value = (SAMPLE_FREEBSD_IFCONFIG_OUT, 0) - a = net.get_interfaces_by_mac() + a = cloudinit.net.get_interfaces_by_mac() assert a == {'52:54:00:50:b7:0d': 'vtnet0', '80:00:73:63:5c:48': 're0.33', '02:14:39:0e:25:00': 'bridge0', '02:ff:60:8c:f3:72': 'vnet0:11'} + + +class TestFreeBSDRoundTrip(CiTestCase): + + def _render_and_read(self, network_config=None, state=None, + netplan_path=None, target=None): + if target is None: + target = self.tmp_dir() + os.mkdir("%s/etc" % target) + with open("%s/etc/rc.conf" % target, 'a') as fd: + fd.write("# dummy rc.conf\n") + with open("%s/etc/resolv.conf" % target, 'a') as fd: + fd.write("# dummy resolv.conf\n") + + if network_config: + ns = cloudinit.net.network_state.parse_net_config_data( + network_config) + elif state: + ns = state + else: + raise ValueError("Expected data or state, got neither") + + renderer = cloudinit.net.freebsd.Renderer() + renderer.render_network_state(ns, target=target) + return dir2dict(target) + + @mock.patch('cloudinit.subp.subp') + def test_render_output_has_yaml(self, mock_subp): + + entry = { + 'yaml': V1, + } + network_config = yaml.load(entry['yaml']) + ns = cloudinit.net.network_state.parse_net_config_data(network_config) + files = self._render_and_read(state=ns) + assert files == { + '/etc/resolv.conf': '# dummy resolv.conf\n', + '/etc/rc.conf': ( + "# dummy rc.conf\n" + "ifconfig_eno1=" + "'172.20.80.129 netmask 255.255.255.128 mtu 1470'\n")} -- cgit v1.2.3 From 108611aee26e09bec683e6cf1b8e03bec9362de9 Mon Sep 17 00:00:00 2001 From: xiaofengw-vmware <42736879+xiaofengw-vmware@users.noreply.github.com> Date: Thu, 8 Jul 2021 23:14:33 +0800 Subject: VMware: new "allow_raw_data" switch (#939) Add a new switch allow_raw_data to control raw data feature, update the documentation. Fix bugs about max_wait. --- cloudinit/sources/DataSourceOVF.py | 35 +++++++++---- doc/rtd/topics/datasources/ovf.rst | 4 ++ tests/unittests/test_datasource/test_ovf.py | 79 ++++++++++++++++++++++++++--- 3 files changed, 100 insertions(+), 18 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index bbeada0b..9e83dccc 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -98,9 +98,20 @@ class DataSourceOVF(sources.DataSource): found.append(seed) elif system_type and 'vmware' in system_type.lower(): LOG.debug("VMware Virtualization Platform found") + allow_vmware_cust = False + allow_raw_data = False if not self.vmware_customization_supported: LOG.debug("Skipping the check for " "VMware Customization support") + else: + allow_vmware_cust = not util.get_cfg_option_bool( + self.sys_cfg, "disable_vmware_customization", True) + allow_raw_data = util.get_cfg_option_bool( + self.ds_cfg, "allow_raw_data", True) + + if not (allow_vmware_cust or allow_raw_data): + LOG.debug( + "Customization for VMware platform is disabled.") else: search_paths = ( "/usr/lib/vmware-tools", "/usr/lib64/vmware-tools", @@ -148,19 +159,21 @@ class DataSourceOVF(sources.DataSource): GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED, vmwareImcConfigFilePath, self._vmware_cust_conf) - else: - LOG.debug("Did not find VMware Customization Config File") - - # Honor disable_vmware_customization setting on metadata absent - if not md_path: - if util.get_cfg_option_bool(self.sys_cfg, - "disable_vmware_customization", - True): + # Don't handle the customization for below 2 cases: + # 1. meta data is found, allow_raw_data is False. + # 2. no meta data is found, allow_vmware_cust is False. + if md_path and not allow_raw_data: LOG.debug( - "Customization for VMware platform is disabled.") + "Customization using raw data is disabled.") # reset vmwareImcConfigFilePath to None to avoid # customization for VMware platform vmwareImcConfigFilePath = None + if md_path is None and not allow_vmware_cust: + LOG.debug( + "Customization using VMware config is disabled.") + vmwareImcConfigFilePath = None + else: + LOG.debug("Did not find VMware Customization Config File") use_raw_data = bool(vmwareImcConfigFilePath and md_path) if use_raw_data: @@ -429,7 +442,7 @@ def get_max_wait_from_cfg(cfg): LOG.warning("Failed to get '%s', using %s", max_wait_cfg_option, default_max_wait) - if max_wait <= 0: + if max_wait < 0: LOG.warning("Invalid value '%s' for '%s', using '%s' instead", max_wait, max_wait_cfg_option, default_max_wait) max_wait = default_max_wait @@ -440,6 +453,8 @@ def get_max_wait_from_cfg(cfg): def wait_for_imc_cfg_file(filename, maxwait=180, naplen=5, dirpath="/var/run/vmware-imc"): waited = 0 + if maxwait <= naplen: + naplen = 1 while waited < maxwait: fileFullPath = os.path.join(dirpath, filename) diff --git a/doc/rtd/topics/datasources/ovf.rst b/doc/rtd/topics/datasources/ovf.rst index 43ee45ba..bd5df860 100644 --- a/doc/rtd/topics/datasources/ovf.rst +++ b/doc/rtd/topics/datasources/ovf.rst @@ -18,6 +18,10 @@ configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`). The settings that may be configured are: + * disable_vmware_customization: disable or enable the vmware customization + based on vmware customization files. (default: True) + * allow_raw_data: enable or disable the vmware customization based on raw + cloud-init data including metadata and userdata. (default: True) * vmware_cust_file_max_wait: the maximum amount of clock time in seconds that should be spent waiting for vmware customization files. (default: 15) diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index dce01f5d..e2718077 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -138,18 +138,17 @@ class TestDatasourceOVF(CiTestCase): self.assertIn( 'DEBUG: No system-product-name found', self.logs.getvalue()) - def test_get_data_no_vmware_customization_disabled(self): - """When cloud-init workflow for vmware is disabled via sys_cfg and - no meta data provided, log a message. + def test_get_data_vmware_customization_disabled(self): + """When vmware customization is disabled via sys_cfg and + allow_raw_data is disabled via ds_cfg, log a message. """ paths = Paths({'cloud_dir': self.tdir}) ds = self.datasource( - sys_cfg={'disable_vmware_customization': True}, distro={}, - paths=paths) + sys_cfg={'disable_vmware_customization': True, + 'datasource': {'OVF': {'allow_raw_data': False}}}, + distro={}, paths=paths) conf_file = self.tmp_path('test-cust', self.tdir) conf_content = dedent("""\ - [CUSTOM-SCRIPT] - SCRIPT-NAME = test-script [MISC] MARKER-ID = 12345345 """) @@ -168,7 +167,71 @@ class TestDatasourceOVF(CiTestCase): 'DEBUG: Customization for VMware platform is disabled.', self.logs.getvalue()) - def test_get_data_vmware_customization_disabled(self): + def test_get_data_vmware_customization_sys_cfg_disabled(self): + """When vmware customization is disabled via sys_cfg and + no meta data is found, log a message. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True, + 'datasource': {'OVF': {'allow_raw_data': True}}}, + distro={}, paths=paths) + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [MISC] + MARKER-ID = 12345345 + """) + util.write_file(conf_file, conf_content) + retcode = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'transport_iso9660': NOT_FOUND, + 'transport_vmware_guestinfo': NOT_FOUND, + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file}, + ds.get_data) + self.assertFalse(retcode, 'Expected False return from ds.get_data') + self.assertIn( + 'DEBUG: Customization using VMware config is disabled.', + self.logs.getvalue()) + + def test_get_data_allow_raw_data_disabled(self): + """When allow_raw_data is disabled via ds_cfg and + meta data is found, log a message. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': False, + 'datasource': {'OVF': {'allow_raw_data': False}}}, + distro={}, paths=paths) + + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + """) + util.write_file(conf_file, conf_content) + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + util.write_file(metadata_file, "This is meta data") + retcode = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'transport_iso9660': NOT_FOUND, + 'transport_vmware_guestinfo': NOT_FOUND, + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'collect_imc_file_paths': [self.tdir + '/test-meta', '', '']}, + ds.get_data) + self.assertFalse(retcode, 'Expected False return from ds.get_data') + self.assertIn( + 'DEBUG: Customization using raw data is disabled.', + self.logs.getvalue()) + + def test_get_data_vmware_customization_enabled(self): """When cloud-init workflow for vmware is enabled via sys_cfg log a message. """ -- cgit v1.2.3 From 9b52405c6f0de5e00d5ee9c1d13540425d8f6bf5 Mon Sep 17 00:00:00 2001 From: Emanuele Giuseppe Esposito Date: Mon, 12 Jul 2021 20:21:02 +0200 Subject: ssh-util: allow cloudinit to merge all ssh keys into a custom user file, defined in AuthorizedKeysFile (#937) This patch aims to fix LP1911680, by analyzing the files provided in sshd_config and merge all keys into an user-specific file. Also introduces additional tests to cover this specific case. The file is picked by analyzing the path given in AuthorizedKeysFile. If it points inside the current user folder (path is /home/user/*), it means it is an user-specific file, so we can copy all user-keys there. If it contains a %u or %h, it means that there will be a specific authorized_keys file for each user, so we can copy all user-keys there. If no path points to an user-specific file, for example when only /etc/ssh/authorized_keys is given, default to ~/.ssh/authorized_keys. Note that if there are more than a single user-specific file, the last one will be picked. Signed-off-by: Emanuele Giuseppe Esposito Co-authored-by: James Falcon LP: #1911680 RHBZ:1862967 --- cloudinit/ssh_util.py | 22 +- tests/integration_tests/assets/keys/id_rsa.test1 | 38 ++++ .../integration_tests/assets/keys/id_rsa.test1.pub | 1 + tests/integration_tests/assets/keys/id_rsa.test2 | 38 ++++ .../integration_tests/assets/keys/id_rsa.test2.pub | 1 + tests/integration_tests/assets/keys/id_rsa.test3 | 38 ++++ .../integration_tests/assets/keys/id_rsa.test3.pub | 1 + .../integration_tests/modules/test_ssh_keysfile.py | 85 +++++++ tests/integration_tests/util.py | 15 +- tests/unittests/test_sshutil.py | 246 ++++++++++++++++++++- 10 files changed, 470 insertions(+), 15 deletions(-) create mode 100644 tests/integration_tests/assets/keys/id_rsa.test1 create mode 100644 tests/integration_tests/assets/keys/id_rsa.test1.pub create mode 100644 tests/integration_tests/assets/keys/id_rsa.test2 create mode 100644 tests/integration_tests/assets/keys/id_rsa.test2.pub create mode 100644 tests/integration_tests/assets/keys/id_rsa.test3 create mode 100644 tests/integration_tests/assets/keys/id_rsa.test3.pub create mode 100644 tests/integration_tests/modules/test_ssh_keysfile.py (limited to 'cloudinit') diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index c08042d6..89057262 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -252,13 +252,15 @@ def render_authorizedkeysfile_paths(value, homedir, username): def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG): (ssh_dir, pw_ent) = users_ssh_info(username) default_authorizedkeys_file = os.path.join(ssh_dir, 'authorized_keys') + user_authorizedkeys_file = default_authorizedkeys_file auth_key_fns = [] with util.SeLinuxGuard(ssh_dir, recursive=True): try: ssh_cfg = parse_ssh_config_map(sshd_cfg_file) + key_paths = ssh_cfg.get("authorizedkeysfile", + "%h/.ssh/authorized_keys") auth_key_fns = render_authorizedkeysfile_paths( - ssh_cfg.get("authorizedkeysfile", "%h/.ssh/authorized_keys"), - pw_ent.pw_dir, username) + key_paths, pw_ent.pw_dir, username) except (IOError, OSError): # Give up and use a default key filename @@ -267,8 +269,22 @@ def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG): "config from %r, using 'AuthorizedKeysFile' file " "%r instead", DEF_SSHD_CFG, auth_key_fns[0]) + # check if one of the keys is the user's one + for key_path, auth_key_fn in zip(key_paths.split(), auth_key_fns): + if any([ + '%u' in key_path, + '%h' in key_path, + auth_key_fn.startswith('{}/'.format(pw_ent.pw_dir)) + ]): + user_authorizedkeys_file = auth_key_fn + + if user_authorizedkeys_file != default_authorizedkeys_file: + LOG.debug( + "AuthorizedKeysFile has an user-specific authorized_keys, " + "using %s", user_authorizedkeys_file) + # always store all the keys in the user's private file - return (default_authorizedkeys_file, parse_authorized_keys(auth_key_fns)) + return (user_authorizedkeys_file, parse_authorized_keys(auth_key_fns)) def setup_user_keys(keys, username, options=None): diff --git a/tests/integration_tests/assets/keys/id_rsa.test1 b/tests/integration_tests/assets/keys/id_rsa.test1 new file mode 100644 index 00000000..bd4c822e --- /dev/null +++ b/tests/integration_tests/assets/keys/id_rsa.test1 @@ -0,0 +1,38 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn +NhAAAAAwEAAQAAAYEAtRlG96aJ23URvAgO/bBsuLl+lquc350aSwV98/i8vlvOn5GVcHye +t/rXQg4lZ4s0owG3kWyQFY8nvTk+G+UNU8fN0anAzBDi+4MzsejkF9scjTMFmXVrIpICqV +3bYQNjPv6r+ubQdkD01du3eB9t5/zl84gtshp0hBdofyz8u1/A25s7fVU67GyI7PdKvaS+ +yvJSInZnb2e9VQzfJC+qAnN7gUZatBKjdgUtJeiUUeDaVnaS17b0aoT9iBO0sIcQtOTBlY +lCjFt1TAMLZ64Hj3SfGZB7Yj0Z+LzFB2IWX1zzsjI68YkYPKOSL/NYhQU9e55kJQ7WnngN +HY/2n/A7dNKSFDmgM5c9IWgeZ7fjpsfIYAoJ/CAxFIND+PEHd1gCS6xoEhaUVyh5WH/Xkw +Kv1nx4AiZ2BFCE+75kySRLZUJ+5y0r3DU5ktMXeURzVIP7pu0R8DCul+GU+M/+THyWtAEO +geaNJ6fYpo2ipDhbmTYt3kk2lMIapRxGBFs+37sdAAAFgGGJssNhibLDAAAAB3NzaC1yc2 +EAAAGBALUZRvemidt1EbwIDv2wbLi5fparnN+dGksFffP4vL5bzp+RlXB8nrf610IOJWeL +NKMBt5FskBWPJ705PhvlDVPHzdGpwMwQ4vuDM7Ho5BfbHI0zBZl1ayKSAqld22EDYz7+q/ +rm0HZA9NXbt3gfbef85fOILbIadIQXaH8s/LtfwNubO31VOuxsiOz3Sr2kvsryUiJ2Z29n +vVUM3yQvqgJze4FGWrQSo3YFLSXolFHg2lZ2kte29GqE/YgTtLCHELTkwZWJQoxbdUwDC2 +euB490nxmQe2I9Gfi8xQdiFl9c87IyOvGJGDyjki/zWIUFPXueZCUO1p54DR2P9p/wO3TS +khQ5oDOXPSFoHme346bHyGAKCfwgMRSDQ/jxB3dYAkusaBIWlFcoeVh/15MCr9Z8eAImdg +RQhPu+ZMkkS2VCfuctK9w1OZLTF3lEc1SD+6btEfAwrpfhlPjP/kx8lrQBDoHmjSen2KaN +oqQ4W5k2Ld5JNpTCGqUcRgRbPt+7HQAAAAMBAAEAAAGBAJJCTOd70AC2ptEGbR0EHHqADT +Wgefy7A94tHFEqxTy0JscGq/uCGimaY7kMdbcPXT59B4VieWeAC2cuUPP0ZHQSfS5ke7oT +tU3N47U+0uBVbNS4rUAH7bOo2o9wptnOA5x/z+O+AARRZ6tEXQOd1oSy4gByLf2Wkh2QTi +vP6Hln1vlFgKEzcXg6G8fN3MYWxKRhWmZM3DLERMvorlqqSBLcs5VvfZfLKcsKWTExioAq +KgwEjYm8T9+rcpsw1xBus3j9k7wCI1Sus6PCDjq0pcYKLMYM7p8ygnU2tRYrOztdIxgWRA +w/1oenm1Mqq2tV5xJcBCwCLOGe6SFwkIRywOYc57j5McH98Xhhg9cViyyBdXy/baF0mro+ +qPhOsWDxqwD4VKZ9UmQ6O8kPNKcc7QcIpFJhcO0g9zbp/MT0KueaWYrTKs8y4lUkTT7Xz6 ++MzlR122/JwlAbBo6Y2kWtB+y+XwBZ0BfyJsm2czDhKm7OI5KfuBNhq0tFfKwOlYBq4QAA +AMAyvUof1R8LLISkdO3EFTKn5RGNkPPoBJmGs6LwvU7NSjjLj/wPQe4jsIBc585tvbrddp +60h72HgkZ5tqOfdeBYOKqX0qQQBHUEvI6M+NeQTQRev8bCHMLXQ21vzpClnrwNzlja359E +uTRfiPRwIlyPLhOUiClBDSAnBI9h82Hkk3zzsQ/xGfsPB7iOjRbW69bMRSVCRpeweCVmWC +77DTsEOq69V2TdljhQNIXE5OcOWonIlfgPiI74cdd+dLhzc/AAAADBAO1/JXd2kYiRyNkZ +aXTLcwiSgBQIYbobqVP3OEtTclr0P1JAvby3Y4cCaEhkenx+fBqgXAku5lKM+U1Q9AEsMk +cjIhaDpb43rU7GPjMn4zHwgGsEKd5pC1yIQ2PlK+cHanAdsDjIg+6RR+fuvid/mBeBOYXb +Py0sa3HyekLJmCdx4UEyNASoiNaGFLQVAqo+RACsXy6VMxFH5dqDYlvwrfUQLwxJmse9Vb +GEuuPAsklNugZqssC2XOIujFVUpslduQAAAMEAwzVHQVtsc3icCSzEAARpDTUdTbI29OhB +/FMBnjzS9/3SWfLuBOSm9heNCHs2jdGNb8cPdKZuY7S9Fx6KuVUPyTbSSYkjj0F4fTeC9g +0ym4p4UWYdF67WSWwLORkaG8K0d+G/CXkz8hvKUg6gcZWKBHAE1ROrHu1nsc8v7mkiKq4I +bnTw5Q9TgjbWcQWtgPq0wXyyl/K8S1SFdkMCTOHDD0RQ+jTV2WNGVwFTodIRHenX+Rw2g4 +CHbTWbsFrHR1qFAAAACmphbWVzQG5ld3Q= +-----END OPENSSH PRIVATE KEY----- diff --git a/tests/integration_tests/assets/keys/id_rsa.test1.pub b/tests/integration_tests/assets/keys/id_rsa.test1.pub new file mode 100644 index 00000000..3d2e26e1 --- /dev/null +++ b/tests/integration_tests/assets/keys/id_rsa.test1.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC1GUb3ponbdRG8CA79sGy4uX6Wq5zfnRpLBX3z+Ly+W86fkZVwfJ63+tdCDiVnizSjAbeRbJAVjye9OT4b5Q1Tx83RqcDMEOL7gzOx6OQX2xyNMwWZdWsikgKpXdthA2M+/qv65tB2QPTV27d4H23n/OXziC2yGnSEF2h/LPy7X8Dbmzt9VTrsbIjs90q9pL7K8lIidmdvZ71VDN8kL6oCc3uBRlq0EqN2BS0l6JRR4NpWdpLXtvRqhP2IE7SwhxC05MGViUKMW3VMAwtnrgePdJ8ZkHtiPRn4vMUHYhZfXPOyMjrxiRg8o5Iv81iFBT17nmQlDtaeeA0dj/af8Dt00pIUOaAzlz0haB5nt+Omx8hgCgn8IDEUg0P48Qd3WAJLrGgSFpRXKHlYf9eTAq/WfHgCJnYEUIT7vmTJJEtlQn7nLSvcNTmS0xd5RHNUg/um7RHwMK6X4ZT4z/5MfJa0AQ6B5o0np9imjaKkOFuZNi3eSTaUwhqlHEYEWz7fux0= test1@host diff --git a/tests/integration_tests/assets/keys/id_rsa.test2 b/tests/integration_tests/assets/keys/id_rsa.test2 new file mode 100644 index 00000000..5854d901 --- /dev/null +++ b/tests/integration_tests/assets/keys/id_rsa.test2 @@ -0,0 +1,38 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn +NhAAAAAwEAAQAAAYEAvK50D2PWOc4ikyHVRJS6tDhqzjL5cKiivID4p1X8BYCVw83XAEGO +LnItUyVXHNADlh6fpVq1NY6A2JVtygoPF6ZFx8ph7IWMmnhDdnxLLyGsbhd1M1tiXJD/R+ +3WnGHRJ4PKrQavMLgqHRrieV3QVVfjFSeo6jX/4TruP6ZmvITMZWJrXaGphxJ/pPykEdkO +i8AmKU9FNviojyPS2nNtj9B/635IdgWvrd7Vf5Ycsw9MR55LWSidwa856RH62Yl6LpEGTH +m1lJiMk1u88JPSqvohhaUkLKkFpcQwcB0m76W1KOyllJsmX8bNXrlZsI+WiiYI7Xl5vQm2 +17DEuNeavtPAtDMxu8HmTg2UJ55Naxehbfe2lx2k5kYGGw3i1O1OVN2pZ2/OB71LucYd/5 +qxPaz03wswcGOJYGPkNc40vdES/Scc7Yt8HsnZuzqkyOgzn0HiUCzoYUYLYTpLf+yGmwxS +yAEY056aOfkCsboKHOKiOmlJxNaZZFQkX1evep4DAAAFgC7HMbUuxzG1AAAAB3NzaC1yc2 +EAAAGBALyudA9j1jnOIpMh1USUurQ4as4y+XCooryA+KdV/AWAlcPN1wBBji5yLVMlVxzQ +A5Yen6VatTWOgNiVbcoKDxemRcfKYeyFjJp4Q3Z8Sy8hrG4XdTNbYlyQ/0ft1pxh0SeDyq +0GrzC4Kh0a4nld0FVX4xUnqOo1/+E67j+mZryEzGVia12hqYcSf6T8pBHZDovAJilPRTb4 +qI8j0tpzbY/Qf+t+SHYFr63e1X+WHLMPTEeeS1koncGvOekR+tmJei6RBkx5tZSYjJNbvP +CT0qr6IYWlJCypBaXEMHAdJu+ltSjspZSbJl/GzV65WbCPloomCO15eb0JttewxLjXmr7T +wLQzMbvB5k4NlCeeTWsXoW33tpcdpOZGBhsN4tTtTlTdqWdvzge9S7nGHf+asT2s9N8LMH +BjiWBj5DXONL3REv0nHO2LfB7J2bs6pMjoM59B4lAs6GFGC2E6S3/shpsMUsgBGNOemjn5 +ArG6ChziojppScTWmWRUJF9Xr3qeAwAAAAMBAAEAAAGASj/kkEHbhbfmxzujL2/P4Sfqb+ +aDXqAeGkwujbs6h/fH99vC5ejmSMTJrVSeaUo6fxLiBDIj6UWA0rpLEBzRP59BCpRL4MXV +RNxav/+9nniD4Hb+ug0WMhMlQmsH71ZW9lPYqCpfOq7ec8GmqdgPKeaCCEspH7HMVhfYtd +eHylwAC02lrpz1l5/h900sS5G9NaWR3uPA+xbzThDs4uZVkSidjlCNt1QZhDSSk7jA5n34 +qJ5UTGu9WQDZqyxWKND+RIyQuFAPGQyoyCC1FayHO2sEhT5qHuumL14Mn81XpzoXFoKyql +rhBDe+pHhKArBYt92Evch0k1ABKblFxtxLXcvk4Fs7pHi+8k4+Cnazej2kcsu1kURlMZJB +w2QT/8BV4uImbH05LtyscQuwGzpIoxqrnHrvg5VbohStmhoOjYybzqqW3/M0qhkn5JgTiy +dJcHRJisRnAcmbmEchYtLDi6RW1e022H4I9AFXQqyr5HylBq6ugtWcFCsrcX8ibZ8xAAAA +wQCAOPgwae6yZLkrYzRfbxZtGKNmhpI0EtNSDCHYuQQapFZJe7EFENs/VAaIiiut0yajGj +c3aoKcwGIoT8TUM8E3GSNW6+WidUOC7H6W+/6N2OYZHRBACGz820xO+UBCl2oSk+dLBlfr +IQzBGUWn5uVYCs0/2nxfCdFyHtMK8dMF/ypbdG+o1rXz5y9b7PVG6Mn+o1Rjsdkq7VERmy +Pukd8hwATOIJqoKl3TuFyBeYFLqe+0e7uTeswQFw17PF31VjAAAADBAOpJRQb8c6qWqsvv +vkve0uMuL0DfWW0G6+SxjPLcV6aTWL5xu0Grd8uBxDkkHU/CDrAwpchXyuLsvbw21Eje/u +U5k9nLEscWZwcX7odxlK+EfAY2Bf5+Hd9bH5HMzTRJH8KkWK1EppOLPyiDxz4LZGzPLVyv +/1PgSuvXkSWk1KIE4SvSemyxGX2tPVI6uO+URqevfnPOS1tMB7BMQlgkR6eh4bugx9UYx9 +mwlXonNa4dN0iQxZ7N4rKFBbT/uyB2bQAAAMEAzisnkD8k9Tn8uyhxpWLHwb03X4ZUUHDV +zu15e4a8dZ+mM8nHO986913Xz5JujlJKkGwFTvgWkIiR2zqTEauZHARH7gANpaweTm6lPd +E4p2S0M3ulY7xtp9lCFIrDhMPPkGq8SFZB6qhgucHcZSRLq6ZDou3S2IdNOzDTpBtkhRCS +0zFcdTLh3zZweoy8HGbW36bwB6s1CIL76Pd4F64i0Ms9CCCU6b+E5ArFhYQIsXiDbgHWbD +tZRSm2GEgnDGAvAAAACmphbWVzQG5ld3Q= +-----END OPENSSH PRIVATE KEY----- diff --git a/tests/integration_tests/assets/keys/id_rsa.test2.pub b/tests/integration_tests/assets/keys/id_rsa.test2.pub new file mode 100644 index 00000000..f3831a57 --- /dev/null +++ b/tests/integration_tests/assets/keys/id_rsa.test2.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC8rnQPY9Y5ziKTIdVElLq0OGrOMvlwqKK8gPinVfwFgJXDzdcAQY4uci1TJVcc0AOWHp+lWrU1joDYlW3KCg8XpkXHymHshYyaeEN2fEsvIaxuF3UzW2JckP9H7dacYdEng8qtBq8wuCodGuJ5XdBVV+MVJ6jqNf/hOu4/pma8hMxlYmtdoamHEn+k/KQR2Q6LwCYpT0U2+KiPI9Lac22P0H/rfkh2Ba+t3tV/lhyzD0xHnktZKJ3BrznpEfrZiXoukQZMebWUmIyTW7zwk9Kq+iGFpSQsqQWlxDBwHSbvpbUo7KWUmyZfxs1euVmwj5aKJgjteXm9CbbXsMS415q+08C0MzG7weZODZQnnk1rF6Ft97aXHaTmRgYbDeLU7U5U3alnb84HvUu5xh3/mrE9rPTfCzBwY4lgY+Q1zjS90RL9Jxzti3weydm7OqTI6DOfQeJQLOhhRgthOkt/7IabDFLIARjTnpo5+QKxugoc4qI6aUnE1plkVCRfV696ngM= test2@host diff --git a/tests/integration_tests/assets/keys/id_rsa.test3 b/tests/integration_tests/assets/keys/id_rsa.test3 new file mode 100644 index 00000000..2596c762 --- /dev/null +++ b/tests/integration_tests/assets/keys/id_rsa.test3 @@ -0,0 +1,38 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn +NhAAAAAwEAAQAAAYEApPG4MdkYQKD57/qreFrh9GRC22y66qZOWZWRjC887rrbvBzO69hV +yJpTIXleJEvpWiHYcjMR5G6NNFsnNtZ4fxDqmSc4vcFj53JsE/XNqLKq6psXadCb5vkNpG +bxA+Z5bJlzJ969PgJIIEbgc86sei4kgR2MuPWqtZbY5GkpNCTqWuLYeFK+14oFruA2nyWH +9MOIRDHK/d597psHy+LTMtymO7ZPhO571abKw6jvvwiSeDxVE9kV7KAQIuM9/S3gftvgQQ +ron3GL34pgmIabdSGdbfHqGDooryJhlbquJZELBN236KgRNTCAjVvUzjjQr1eRP3xssGwV +O6ECBGCQLl/aYogAgtwnwj9iXqtfiLK3EwlgjquU4+JQ0CVtLhG3gIZB+qoMThco0pmHTr +jtfQCwrztsBBFunSa2/CstuV1mQ5O5ZrZ6ACo9yPRBNkns6+CiKdtMtCtzi3k2RDz9jpYm +Pcak03Lr7IkdC1Tp6+jA+//yPHSO1o4CqW89IQzNAAAFgEUd7lZFHe5WAAAAB3NzaC1yc2 +EAAAGBAKTxuDHZGECg+e/6q3ha4fRkQttsuuqmTlmVkYwvPO6627wczuvYVciaUyF5XiRL +6Voh2HIzEeRujTRbJzbWeH8Q6pknOL3BY+dybBP1zaiyquqbF2nQm+b5DaRm8QPmeWyZcy +fevT4CSCBG4HPOrHouJIEdjLj1qrWW2ORpKTQk6lri2HhSvteKBa7gNp8lh/TDiEQxyv3e +fe6bB8vi0zLcpju2T4Tue9WmysOo778Ikng8VRPZFeygECLjPf0t4H7b4EEK6J9xi9+KYJ +iGm3UhnW3x6hg6KK8iYZW6riWRCwTdt+ioETUwgI1b1M440K9XkT98bLBsFTuhAgRgkC5f +2mKIAILcJ8I/Yl6rX4iytxMJYI6rlOPiUNAlbS4Rt4CGQfqqDE4XKNKZh0647X0AsK87bA +QRbp0mtvwrLbldZkOTuWa2egAqPcj0QTZJ7OvgoinbTLQrc4t5NkQ8/Y6WJj3GpNNy6+yJ +HQtU6evowPv/8jx0jtaOAqlvPSEMzQAAAAMBAAEAAAGAGaqbdPZJNdVWzyb8g6/wtSzc0n +Qq6dSTIJGLonq/So69HpqFAGIbhymsger24UMGvsXBfpO/1wH06w68HWZmPa+OMeLOi4iK +WTuO4dQ/+l5DBlq32/lgKSLcIpb6LhcxEdsW9j9Mx1dnjc45owun/yMq/wRwH1/q/nLIsV +JD3R9ZcGcYNDD8DWIm3D17gmw+qbG7hJES+0oh4n0xS2KyZpm7LFOEMDVEA8z+hE/HbryQ +vjD1NC91n+qQWD1wKfN3WZDRwip3z1I5VHMpvXrA/spHpa9gzHK5qXNmZSz3/dfA1zHjCR +2dHjJnrIUH8nyPfw8t+COC+sQBL3Nr0KUWEFPRM08cOcQm4ctzg17aDIZBONjlZGKlReR8 +1zfAw84Q70q2spLWLBLXSFblHkaOfijEbejIbaz2UUEQT27WD7RHAORdQlkx7eitk66T9d +DzIq/cpYhm5Fs8KZsh3PLldp9nsHbD2Oa9J9LJyI4ryuIW0mVwRdvPSiiYi3K+mDCpAAAA +wBe+ugEEJ+V7orb1f4Zez0Bd4FNkEc52WZL4CWbaCtM+ZBg5KnQ6xW14JdC8IS9cNi/I5P +yLsBvG4bWPLGgQruuKY6oLueD6BFnKjqF6ACUCiSQldh4BAW1nYc2U48+FFvo3ZQyudFSy +QEFlhHmcaNMDo0AIJY5Xnq2BG3nEX7AqdtZ8hhenHwLCRQJatDwSYBHDpSDdh9vpTnGp/2 +0jBz25Ko4UANzvSAc3sA4yN3jfpoM366TgdNf8x3g1v7yljQAAAMEA0HSQjzH5nhEwB58k +mYYxnBYp1wb86zIuVhAyjZaeinvBQSTmLow8sXIHcCVuD3CgBezlU2SX5d9YuvRU9rcthi +uzn4wWnbnzYy4SwzkMJXchUAkumFVD8Hq5TNPh2Z+033rLLE08EhYypSeVpuzdpFoStaS9 +3DUZA2bR/zLZI9MOVZRUcYImNegqIjOYHY8Sbj3/0QPV6+WpUJFMPvvedWhfaOsRMTA6nr +VLG4pxkrieVl0UtuRGbzD/exXhXVi7AAAAwQDKkJj4ez/+KZFYlZQKiV0BrfUFcgS6ElFM +2CZIEagCtu8eedrwkNqx2FUX33uxdvUTr4c9I3NvWeEEGTB9pgD4lh1x/nxfuhyGXtimFM +GnznGV9oyz0DmKlKiKSEGwWf5G+/NiiCwwVJ7wsQQm7TqNtkQ9b8MhWWXC7xlXKUs7dmTa +e8AqAndCCMEnbS1UQFO/R5PNcZXkFWDggLQ/eWRYKlrXgdnUgH6h0saOcViKpNJBUXb3+x +eauhOY52PS/BcAAAAKamFtZXNAbmV3dAE= +-----END OPENSSH PRIVATE KEY----- diff --git a/tests/integration_tests/assets/keys/id_rsa.test3.pub b/tests/integration_tests/assets/keys/id_rsa.test3.pub new file mode 100644 index 00000000..057db632 --- /dev/null +++ b/tests/integration_tests/assets/keys/id_rsa.test3.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCk8bgx2RhAoPnv+qt4WuH0ZELbbLrqpk5ZlZGMLzzuutu8HM7r2FXImlMheV4kS+laIdhyMxHkbo00Wyc21nh/EOqZJzi9wWPncmwT9c2osqrqmxdp0Jvm+Q2kZvED5nlsmXMn3r0+AkggRuBzzqx6LiSBHYy49aq1ltjkaSk0JOpa4th4Ur7XigWu4DafJYf0w4hEMcr93n3umwfL4tMy3KY7tk+E7nvVpsrDqO+/CJJ4PFUT2RXsoBAi4z39LeB+2+BBCuifcYvfimCYhpt1IZ1t8eoYOiivImGVuq4lkQsE3bfoqBE1MICNW9TOONCvV5E/fGywbBU7oQIEYJAuX9piiACC3CfCP2Jeq1+IsrcTCWCOq5Tj4lDQJW0uEbeAhkH6qgxOFyjSmYdOuO19ALCvO2wEEW6dJrb8Ky25XWZDk7lmtnoAKj3I9EE2Sezr4KIp20y0K3OLeTZEPP2OliY9xqTTcuvsiR0LVOnr6MD7//I8dI7WjgKpbz0hDM0= test3@host diff --git a/tests/integration_tests/modules/test_ssh_keysfile.py b/tests/integration_tests/modules/test_ssh_keysfile.py new file mode 100644 index 00000000..f82d7649 --- /dev/null +++ b/tests/integration_tests/modules/test_ssh_keysfile.py @@ -0,0 +1,85 @@ +import paramiko +import pytest +from io import StringIO +from paramiko.ssh_exception import SSHException + +from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.util import get_test_rsa_keypair + +TEST_USER1_KEYS = get_test_rsa_keypair('test1') +TEST_USER2_KEYS = get_test_rsa_keypair('test2') +TEST_DEFAULT_KEYS = get_test_rsa_keypair('test3') + +USERDATA = """\ +#cloud-config +bootcmd: + - sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile /etc/ssh/authorized_keys %h/.ssh/authorized_keys2;' /etc/ssh/sshd_config +ssh_authorized_keys: + - {default} +users: +- default +- name: test_user1 + ssh_authorized_keys: + - {user1} +- name: test_user2 + ssh_authorized_keys: + - {user2} +""".format( # noqa: E501 + default=TEST_DEFAULT_KEYS.public_key, + user1=TEST_USER1_KEYS.public_key, + user2=TEST_USER2_KEYS.public_key, +) + + +@pytest.mark.ubuntu +@pytest.mark.user_data(USERDATA) +def test_authorized_keys(client: IntegrationInstance): + expected_keys = [ + ('test_user1', '/home/test_user1/.ssh/authorized_keys2', + TEST_USER1_KEYS), + ('test_user2', '/home/test_user2/.ssh/authorized_keys2', + TEST_USER2_KEYS), + ('ubuntu', '/home/ubuntu/.ssh/authorized_keys2', + TEST_DEFAULT_KEYS), + ('root', '/root/.ssh/authorized_keys2', TEST_DEFAULT_KEYS), + ] + + for user, filename, keys in expected_keys: + contents = client.read_from_file(filename) + if user in ['ubuntu', 'root']: + # Our personal public key gets added by pycloudlib + lines = contents.split('\n') + assert len(lines) == 2 + assert keys.public_key.strip() in contents + else: + assert contents.strip() == keys.public_key.strip() + + # Ensure we can actually connect + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + paramiko_key = paramiko.RSAKey.from_private_key(StringIO( + keys.private_key)) + + # Will fail with AuthenticationException if + # we cannot connect + ssh.connect( + client.instance.ip, + username=user, + pkey=paramiko_key, + look_for_keys=False, + allow_agent=False, + ) + + # Ensure other uses can't connect using our key + other_users = [u[0] for u in expected_keys if u[2] != keys] + for other_user in other_users: + with pytest.raises(SSHException): + print('trying to connect as {} with key from {}'.format( + other_user, user)) + ssh.connect( + client.instance.ip, + username=other_user, + pkey=paramiko_key, + look_for_keys=False, + allow_agent=False, + ) diff --git a/tests/integration_tests/util.py b/tests/integration_tests/util.py index 8d726bb2..ce62ffc8 100644 --- a/tests/integration_tests/util.py +++ b/tests/integration_tests/util.py @@ -3,12 +3,15 @@ import multiprocessing import os import time from contextlib import contextmanager +from collections import namedtuple from pathlib import Path -log = logging.getLogger('integration_testing') +log = logging.getLogger('integration_testing') +key_pair = namedtuple('key_pair', 'public_key private_key') ASSETS_DIR = Path('tests/integration_tests/assets') +KEY_PATH = ASSETS_DIR / 'keys' def verify_ordered_items_in_text(to_verify: list, text: str): @@ -51,3 +54,13 @@ def emit_dots_on_travis(): yield finally: dot_process.terminate() + + +def get_test_rsa_keypair(key_name: str = 'test1') -> key_pair: + private_key_path = KEY_PATH / 'id_rsa.{}'.format(key_name) + public_key_path = KEY_PATH / 'id_rsa.{}.pub'.format(key_name) + with public_key_path.open() as public_file: + public_key = public_file.read() + with private_key_path.open() as private_file: + private_key = private_file.read() + return key_pair(public_key, private_key) diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py index fd1d1bac..bcb8044f 100644 --- a/tests/unittests/test_sshutil.py +++ b/tests/unittests/test_sshutil.py @@ -570,20 +570,33 @@ class TestBasicAuthorizedKeyParse(test_helpers.CiTestCase): ssh_util.render_authorizedkeysfile_paths( "%h/.keys", "/homedirs/bobby", "bobby")) + def test_all(self): + self.assertEqual( + ["/homedirs/bobby/.keys", "/homedirs/bobby/.secret/keys", + "/keys/path1", "/opt/bobby/keys"], + ssh_util.render_authorizedkeysfile_paths( + "%h/.keys .secret/keys /keys/path1 /opt/%u/keys", + "/homedirs/bobby", "bobby")) + class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): @patch("cloudinit.ssh_util.pwd.getpwnam") def test_multiple_authorizedkeys_file_order1(self, m_getpwnam): - fpw = FakePwEnt(pw_name='bobby', pw_dir='/home2/bobby') + fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') m_getpwnam.return_value = fpw - authorized_keys = self.tmp_path('authorized_keys') + user_ssh_folder = "%s/.ssh" % fpw.pw_dir + + # /tmp/home2/bobby/.ssh/authorized_keys = rsa + authorized_keys = self.tmp_path('authorized_keys', dir=user_ssh_folder) util.write_file(authorized_keys, VALID_CONTENT['rsa']) - user_keys = self.tmp_path('user_keys') + # /tmp/home2/bobby/.ssh/user_keys = dsa + user_keys = self.tmp_path('user_keys', dir=user_ssh_folder) util.write_file(user_keys, VALID_CONTENT['dsa']) - sshd_config = self.tmp_path('sshd_config') + # /tmp/sshd_config + sshd_config = self.tmp_path('sshd_config', dir="/tmp") util.write_file( sshd_config, "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys) @@ -593,33 +606,244 @@ class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): fpw.pw_name, sshd_config) content = ssh_util.update_authorized_keys(auth_key_entries, []) - self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn) + self.assertEqual(user_keys, auth_key_fn) self.assertTrue(VALID_CONTENT['rsa'] in content) self.assertTrue(VALID_CONTENT['dsa'] in content) @patch("cloudinit.ssh_util.pwd.getpwnam") def test_multiple_authorizedkeys_file_order2(self, m_getpwnam): - fpw = FakePwEnt(pw_name='suzie', pw_dir='/home/suzie') + fpw = FakePwEnt(pw_name='suzie', pw_dir='/tmp/home/suzie') m_getpwnam.return_value = fpw - authorized_keys = self.tmp_path('authorized_keys') + user_ssh_folder = "%s/.ssh" % fpw.pw_dir + + # /tmp/home/suzie/.ssh/authorized_keys = rsa + authorized_keys = self.tmp_path('authorized_keys', dir=user_ssh_folder) util.write_file(authorized_keys, VALID_CONTENT['rsa']) - user_keys = self.tmp_path('user_keys') + # /tmp/home/suzie/.ssh/user_keys = dsa + user_keys = self.tmp_path('user_keys', dir=user_ssh_folder) util.write_file(user_keys, VALID_CONTENT['dsa']) - sshd_config = self.tmp_path('sshd_config') + # /tmp/sshd_config + sshd_config = self.tmp_path('sshd_config', dir="/tmp") util.write_file( sshd_config, - "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys) + "AuthorizedKeysFile %s %s" % (user_keys, authorized_keys) ) (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( - fpw.pw_name, sshd_config + fpw.pw_name, sshd_config) + content = ssh_util.update_authorized_keys(auth_key_entries, []) + + self.assertEqual(authorized_keys, auth_key_fn) + self.assertTrue(VALID_CONTENT['rsa'] in content) + self.assertTrue(VALID_CONTENT['dsa'] in content) + + @patch("cloudinit.ssh_util.pwd.getpwnam") + def test_multiple_authorizedkeys_file_local_global(self, m_getpwnam): + fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') + m_getpwnam.return_value = fpw + user_ssh_folder = "%s/.ssh" % fpw.pw_dir + + # /tmp/home2/bobby/.ssh/authorized_keys = rsa + authorized_keys = self.tmp_path('authorized_keys', dir=user_ssh_folder) + util.write_file(authorized_keys, VALID_CONTENT['rsa']) + + # /tmp/home2/bobby/.ssh/user_keys = dsa + user_keys = self.tmp_path('user_keys', dir=user_ssh_folder) + util.write_file(user_keys, VALID_CONTENT['dsa']) + + # /tmp/etc/ssh/authorized_keys = ecdsa + authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys', + dir="/tmp") + util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) + + # /tmp/sshd_config + sshd_config = self.tmp_path('sshd_config', dir="/tmp") + util.write_file( + sshd_config, + "AuthorizedKeysFile %s %s %s" % (authorized_keys_global, + user_keys, authorized_keys) + ) + + (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( + fpw.pw_name, sshd_config) + content = ssh_util.update_authorized_keys(auth_key_entries, []) + + self.assertEqual(authorized_keys, auth_key_fn) + self.assertTrue(VALID_CONTENT['rsa'] in content) + self.assertTrue(VALID_CONTENT['ecdsa'] in content) + self.assertTrue(VALID_CONTENT['dsa'] in content) + + @patch("cloudinit.ssh_util.pwd.getpwnam") + def test_multiple_authorizedkeys_file_local_global2(self, m_getpwnam): + fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') + m_getpwnam.return_value = fpw + user_ssh_folder = "%s/.ssh" % fpw.pw_dir + + # /tmp/home2/bobby/.ssh/authorized_keys2 = rsa + authorized_keys = self.tmp_path('authorized_keys2', + dir=user_ssh_folder) + util.write_file(authorized_keys, VALID_CONTENT['rsa']) + + # /tmp/home2/bobby/.ssh/user_keys3 = dsa + user_keys = self.tmp_path('user_keys3', dir=user_ssh_folder) + util.write_file(user_keys, VALID_CONTENT['dsa']) + + # /tmp/etc/ssh/authorized_keys = ecdsa + authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys', + dir="/tmp") + util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) + + # /tmp/sshd_config + sshd_config = self.tmp_path('sshd_config', dir="/tmp") + util.write_file( + sshd_config, + "AuthorizedKeysFile %s %s %s" % (authorized_keys_global, + authorized_keys, user_keys) + ) + + (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( + fpw.pw_name, sshd_config) + content = ssh_util.update_authorized_keys(auth_key_entries, []) + + self.assertEqual(user_keys, auth_key_fn) + self.assertTrue(VALID_CONTENT['rsa'] in content) + self.assertTrue(VALID_CONTENT['ecdsa'] in content) + self.assertTrue(VALID_CONTENT['dsa'] in content) + + @patch("cloudinit.ssh_util.pwd.getpwnam") + def test_multiple_authorizedkeys_file_global(self, m_getpwnam): + fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') + m_getpwnam.return_value = fpw + + # /tmp/etc/ssh/authorized_keys = rsa + authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys', + dir="/tmp") + util.write_file(authorized_keys_global, VALID_CONTENT['rsa']) + + # /tmp/sshd_config + sshd_config = self.tmp_path('sshd_config') + util.write_file( + sshd_config, + "AuthorizedKeysFile %s" % (authorized_keys_global) ) + + (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( + fpw.pw_name, sshd_config) content = ssh_util.update_authorized_keys(auth_key_entries, []) self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn) self.assertTrue(VALID_CONTENT['rsa'] in content) + + @patch("cloudinit.ssh_util.pwd.getpwnam") + def test_multiple_authorizedkeys_file_multiuser(self, m_getpwnam): + fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') + m_getpwnam.return_value = fpw + user_ssh_folder = "%s/.ssh" % fpw.pw_dir + # /tmp/home2/bobby/.ssh/authorized_keys2 = rsa + authorized_keys = self.tmp_path('authorized_keys2', + dir=user_ssh_folder) + util.write_file(authorized_keys, VALID_CONTENT['rsa']) + # /tmp/home2/bobby/.ssh/user_keys3 = dsa + user_keys = self.tmp_path('user_keys3', dir=user_ssh_folder) + util.write_file(user_keys, VALID_CONTENT['dsa']) + + fpw2 = FakePwEnt(pw_name='suzie', pw_dir='/tmp/home/suzie') + user_ssh_folder = "%s/.ssh" % fpw2.pw_dir + # /tmp/home/suzie/.ssh/authorized_keys2 = ssh-xmss@openssh.com + authorized_keys2 = self.tmp_path('authorized_keys2', + dir=user_ssh_folder) + util.write_file(authorized_keys2, + VALID_CONTENT['ssh-xmss@openssh.com']) + + # /tmp/etc/ssh/authorized_keys = ecdsa + authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys2', + dir="/tmp") + util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) + + # /tmp/sshd_config + sshd_config = self.tmp_path('sshd_config', dir="/tmp") + util.write_file( + sshd_config, + "AuthorizedKeysFile %s %%h/.ssh/authorized_keys2 %s" % + (authorized_keys_global, user_keys) + ) + + # process first user + (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( + fpw.pw_name, sshd_config) + content = ssh_util.update_authorized_keys(auth_key_entries, []) + + self.assertEqual(user_keys, auth_key_fn) + self.assertTrue(VALID_CONTENT['rsa'] in content) + self.assertTrue(VALID_CONTENT['ecdsa'] in content) + self.assertTrue(VALID_CONTENT['dsa'] in content) + self.assertFalse(VALID_CONTENT['ssh-xmss@openssh.com'] in content) + + m_getpwnam.return_value = fpw2 + # process second user + (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( + fpw2.pw_name, sshd_config) + content = ssh_util.update_authorized_keys(auth_key_entries, []) + + self.assertEqual(authorized_keys2, auth_key_fn) + self.assertTrue(VALID_CONTENT['ssh-xmss@openssh.com'] in content) + self.assertTrue(VALID_CONTENT['ecdsa'] in content) + self.assertTrue(VALID_CONTENT['dsa'] in content) + self.assertFalse(VALID_CONTENT['rsa'] in content) + + @patch("cloudinit.ssh_util.pwd.getpwnam") + def test_multiple_authorizedkeys_file_multiuser2(self, m_getpwnam): + fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home/bobby') + m_getpwnam.return_value = fpw + user_ssh_folder = "%s/.ssh" % fpw.pw_dir + # /tmp/home/bobby/.ssh/authorized_keys2 = rsa + authorized_keys = self.tmp_path('authorized_keys2', + dir=user_ssh_folder) + util.write_file(authorized_keys, VALID_CONTENT['rsa']) + # /tmp/home/bobby/.ssh/user_keys3 = dsa + user_keys = self.tmp_path('user_keys3', dir=user_ssh_folder) + util.write_file(user_keys, VALID_CONTENT['dsa']) + + fpw2 = FakePwEnt(pw_name='badguy', pw_dir='/tmp/home/badguy') + user_ssh_folder = "%s/.ssh" % fpw2.pw_dir + # /tmp/home/badguy/home/bobby = "" + authorized_keys2 = self.tmp_path('home/bobby', dir="/tmp/home/badguy") + + # /tmp/etc/ssh/authorized_keys = ecdsa + authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys2', + dir="/tmp") + util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) + + # /tmp/sshd_config + sshd_config = self.tmp_path('sshd_config', dir="/tmp") + util.write_file( + sshd_config, + "AuthorizedKeysFile %s %%h/.ssh/authorized_keys2 %s %s" % + (authorized_keys_global, user_keys, authorized_keys2) + ) + + # process first user + (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( + fpw.pw_name, sshd_config) + content = ssh_util.update_authorized_keys(auth_key_entries, []) + + self.assertEqual(user_keys, auth_key_fn) + self.assertTrue(VALID_CONTENT['rsa'] in content) + self.assertTrue(VALID_CONTENT['ecdsa'] in content) + self.assertTrue(VALID_CONTENT['dsa'] in content) + + m_getpwnam.return_value = fpw2 + # process second user + (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( + fpw2.pw_name, sshd_config) + content = ssh_util.update_authorized_keys(auth_key_entries, []) + + # badguy should not take the key from the other user! + self.assertEqual(authorized_keys2, auth_key_fn) + self.assertTrue(VALID_CONTENT['ecdsa'] in content) self.assertTrue(VALID_CONTENT['dsa'] in content) + self.assertFalse(VALID_CONTENT['rsa'] in content) # vi: ts=4 expandtab -- cgit v1.2.3 From eacb0353803263934aa2ac827c37e461c87cb107 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 15 Jul 2021 17:52:21 -0500 Subject: Fix MIME policy failure on python version upgrade (#934) Python 3.6 added a new `policy` attribute to `MIMEMultipart`. MIMEMultipart may be part of the cached object pickle of a datasource. Upgrading from an old version of python to 3.6+ will cause the datasource to be invalid after pickle load. This commit uses the upgrade framework to attempt to access the mime message and fail early (thus discarding the cache) if we cannot. Commit 78e89b03 should fix this issue more generally. --- cloudinit/sources/__init__.py | 18 + cloudinit/stages.py | 2 + .../integration_tests/assets/trusty_with_mime.pkl | 572 +++++++++++++++++++++ .../integration_tests/modules/test_persistence.py | 30 ++ 4 files changed, 622 insertions(+) create mode 100644 tests/integration_tests/assets/trusty_with_mime.pkl create mode 100644 tests/integration_tests/modules/test_persistence.py (limited to 'cloudinit') diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index a07c4b4f..9d25b0ee 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -75,6 +75,10 @@ NetworkConfigSource = namedtuple('NetworkConfigSource', _NETCFG_SOURCE_NAMES)(*_NETCFG_SOURCE_NAMES) +class DatasourceUnpickleUserDataError(Exception): + """Raised when userdata is unable to be unpickled due to python upgrades""" + + class DataSourceNotFoundException(Exception): pass @@ -239,6 +243,20 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): self.vendordata2 = None if not hasattr(self, 'vendordata2_raw'): self.vendordata2_raw = None + if hasattr(self, 'userdata') and self.userdata is not None: + # If userdata stores MIME data, on < python3.6 it will be + # missing the 'policy' attribute that exists on >=python3.6. + # Calling str() on the userdata will attempt to access this + # policy attribute. This will raise an exception, causing + # the pickle load to fail, so cloud-init will discard the cache + try: + str(self.userdata) + except AttributeError as e: + LOG.debug( + "Unable to unpickle datasource: %s." + " Ignoring current cache.", e + ) + raise DatasourceUnpickleUserDataError() from e def __str__(self): return type_utils.obj_name(self) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 3688be2e..06e0d9b1 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -1070,6 +1070,8 @@ def _pkl_load(fname): return None try: return pickle.loads(pickle_contents) + except sources.DatasourceUnpickleUserDataError: + return None except Exception: util.logexc(LOG, "Failed loading pickled blob from %s", fname) return None diff --git a/tests/integration_tests/assets/trusty_with_mime.pkl b/tests/integration_tests/assets/trusty_with_mime.pkl new file mode 100644 index 00000000..a4089ecf --- /dev/null +++ b/tests/integration_tests/assets/trusty_with_mime.pkl @@ -0,0 +1,572 @@ +ccopy_reg +_reconstructor +p1 +(ccloudinit.sources.DataSourceNoCloud +DataSourceNoCloudNet +p2 +c__builtin__ +object +p3 +NtRp4 +(dp5 +S'paths' +p6 +g1 +(ccloudinit.helpers +Paths +p7 +g3 +NtRp8 +(dp9 +S'lookups' +p10 +(dp11 +S'cloud_config' +p12 +S'cloud-config.txt' +p13 +sS'userdata' +p14 +S'user-data.txt.i' +p15 +sS'vendordata' +p16 +S'vendor-data.txt.i' +p17 +sS'userdata_raw' +p18 +S'user-data.txt' +p19 +sS'boothooks' +p20 +g20 +sS'scripts' +p21 +g21 +sS'sem' +p22 +g22 +sS'data' +p23 +g23 +sS'vendor_scripts' +p24 +S'scripts/vendor' +p25 +sS'handlers' +p26 +g26 +sS'obj_pkl' +p27 +S'obj.pkl' +p28 +sS'vendordata_raw' +p29 +S'vendor-data.txt' +p30 +sS'vendor_cloud_config' +p31 +S'vendor-cloud-config.txt' +p32 +ssS'template_tpl' +p33 +S'/etc/cloud/templates/%s.tmpl' +p34 +sS'cfgs' +p35 +(dp36 +S'cloud_dir' +p37 +S'/var/lib/cloud/' +p38 +sS'templates_dir' +p39 +S'/etc/cloud/templates/' +p40 +sS'upstart_dir' +p41 +S'/etc/init/' +p42 +ssS'cloud_dir' +p43 +g38 +sS'datasource' +p44 +NsS'upstart_conf_d' +p45 +g42 +sS'boot_finished' +p46 +S'/var/lib/cloud/instance/boot-finished' +p47 +sS'instance_link' +p48 +S'/var/lib/cloud/instance' +p49 +sS'seed_dir' +p50 +S'/var/lib/cloud/seed' +p51 +sbsS'supported_seed_starts' +p52 +(S'http://' +p53 +S'https://' +p54 +S'ftp://' +p55 +tp56 +sS'sys_cfg' +p57 +(dp58 +S'output' +p59 +(dp60 +S'all' +p61 +S'| tee -a /var/log/cloud-init-output.log' +p62 +ssS'users' +p63 +(lp64 +S'default' +p65 +asS'def_log_file' +p66 +S'/var/log/cloud-init.log' +p67 +sS'cloud_final_modules' +p68 +(lp69 +S'rightscale_userdata' +p70 +aS'scripts-vendor' +p71 +aS'scripts-per-once' +p72 +aS'scripts-per-boot' +p73 +aS'scripts-per-instance' +p74 +aS'scripts-user' +p75 +aS'ssh-authkey-fingerprints' +p76 +aS'keys-to-console' +p77 +aS'phone-home' +p78 +aS'final-message' +p79 +aS'power-state-change' +p80 +asS'disable_root' +p81 +I01 +sS'syslog_fix_perms' +p82 +S'syslog:adm' +p83 +sS'log_cfgs' +p84 +(lp85 +(lp86 +S'[loggers]\nkeys=root,cloudinit\n\n[handlers]\nkeys=consoleHandler,cloudLogHandler\n\n[formatters]\nkeys=simpleFormatter,arg0Formatter\n\n[logger_root]\nlevel=DEBUG\nhandlers=consoleHandler,cloudLogHandler\n\n[logger_cloudinit]\nlevel=DEBUG\nqualname=cloudinit\nhandlers=\npropagate=1\n\n[handler_consoleHandler]\nclass=StreamHandler\nlevel=WARNING\nformatter=arg0Formatter\nargs=(sys.stderr,)\n\n[formatter_arg0Formatter]\nformat=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s\n\n[formatter_simpleFormatter]\nformat=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s\n' +p87 +aS'[handler_cloudLogHandler]\nclass=handlers.SysLogHandler\nlevel=DEBUG\nformatter=simpleFormatter\nargs=("/dev/log", handlers.SysLogHandler.LOG_USER)\n' +p88 +aa(lp89 +g87 +aS"[handler_cloudLogHandler]\nclass=FileHandler\nlevel=DEBUG\nformatter=arg0Formatter\nargs=('/var/log/cloud-init.log',)\n" +p90 +aasS'cloud_init_modules' +p91 +(lp92 +S'migrator' +p93 +aS'seed_random' +p94 +aS'bootcmd' +p95 +aS'write-files' +p96 +aS'growpart' +p97 +aS'resizefs' +p98 +aS'set_hostname' +p99 +aS'update_hostname' +p100 +aS'update_etc_hosts' +p101 +aS'ca-certs' +p102 +aS'rsyslog' +p103 +aS'users-groups' +p104 +aS'ssh' +p105 +asS'preserve_hostname' +p106 +I00 +sS'_log' +p107 +(lp108 +g87 +ag90 +ag88 +asS'datasource_list' +p109 +(lp110 +S'NoCloud' +p111 +aS'ConfigDrive' +p112 +aS'OpenNebula' +p113 +aS'Azure' +p114 +aS'AltCloud' +p115 +aS'OVF' +p116 +aS'MAAS' +p117 +aS'GCE' +p118 +aS'OpenStack' +p119 +aS'CloudSigma' +p120 +aS'Ec2' +p121 +aS'CloudStack' +p122 +aS'SmartOS' +p123 +aS'None' +p124 +asS'vendor_data' +p125 +(dp126 +S'prefix' +p127 +(lp128 +sS'enabled' +p129 +I01 +ssS'cloud_config_modules' +p130 +(lp131 +S'emit_upstart' +p132 +aS'disk_setup' +p133 +aS'mounts' +p134 +aS'ssh-import-id' +p135 +aS'locale' +p136 +aS'set-passwords' +p137 +aS'grub-dpkg' +p138 +aS'apt-pipelining' +p139 +aS'apt-configure' +p140 +aS'package-update-upgrade-install' +p141 +aS'landscape' +p142 +aS'timezone' +p143 +aS'puppet' +p144 +aS'chef' +p145 +aS'salt-minion' +p146 +aS'mcollective' +p147 +aS'disable-ec2-metadata' +p148 +aS'runcmd' +p149 +aS'byobu' +p150 +assg14 +(iemail.mime.multipart +MIMEMultipart +p151 +(dp152 +S'_headers' +p153 +(lp154 +(S'Content-Type' +p155 +S'multipart/mixed; boundary="===============4291038100093149247=="' +tp156 +a(S'MIME-Version' +p157 +S'1.0' +p158 +tp159 +a(S'Number-Attachments' +p160 +S'1' +tp161 +asS'_payload' +p162 +(lp163 +(iemail.mime.base +MIMEBase +p164 +(dp165 +g153 +(lp166 +(g157 +g158 +tp167 +a(S'Content-Type' +p168 +S'text/x-not-multipart' +tp169 +a(S'Content-Disposition' +p170 +S'attachment; filename="part-001"' +tp171 +asg162 +S'' +sS'_charset' +p172 +NsS'_default_type' +p173 +S'text/plain' +p174 +sS'preamble' +p175 +NsS'defects' +p176 +(lp177 +sS'_unixfrom' +p178 +NsS'epilogue' +p179 +Nsbasg172 +Nsg173 +g174 +sg175 +Nsg176 +(lp180 +sg178 +Nsg179 +Nsbsg16 +S'#cloud-config\n{}\n\n' +p181 +sg18 +S'Content-Type: multipart/mixed; boundary="===============1378281702283945349=="\nMIME-Version: 1.0\n\n--===============1378281702283945349==\nContent-Type: text/x-shellscript; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: base64\nContent-Disposition: attachment; filename="script1.sh"\n\nIyEvYmluL3NoCgplY2hvICdoaScgPiAvdmFyL3RtcC9oaQo=\n\n--===============1378281702283945349==\nContent-Type: text/x-shellscript; charset="utf-8"\nMIME-Version: 1.0\nContent-Transfer-Encoding: base64\nContent-Disposition: attachment; filename="script2.sh"\n\nIyEvYmluL2Jhc2gKCmVjaG8gJ2hpMicgPiAvdmFyL3RtcC9oaTIK\n\n--===============1378281702283945349==--\n\n#cloud-config\n# final_message: |\n# This is my final message!\n# $version\n# $timestamp\n# $datasource\n# $uptime\n# updates:\n# network:\n# when: [\'hotplug\']\n' +p182 +sg29 +NsS'dsmode' +p183 +S'net' +p184 +sS'seed' +p185 +S'/var/lib/cloud/seed/nocloud-net' +p186 +sS'cmdline_id' +p187 +S'ds=nocloud-net' +p188 +sS'ud_proc' +p189 +g1 +(ccloudinit.user_data +UserDataProcessor +p190 +g3 +NtRp191 +(dp192 +g6 +g8 +sS'ssl_details' +p193 +(dp194 +sbsg50 +g186 +sS'ds_cfg' +p195 +(dp196 +sS'distro' +p197 +g1 +(ccloudinit.distros.ubuntu +Distro +p198 +g3 +NtRp199 +(dp200 +S'osfamily' +p201 +S'debian' +p202 +sS'_paths' +p203 +g8 +sS'name' +p204 +S'ubuntu' +p205 +sS'_runner' +p206 +g1 +(ccloudinit.helpers +Runners +p207 +g3 +NtRp208 +(dp209 +g6 +g8 +sS'sems' +p210 +(dp211 +sbsS'_cfg' +p212 +(dp213 +S'paths' +p214 +(dp215 +g37 +g38 +sg39 +g40 +sg41 +g42 +ssS'default_user' +p216 +(dp217 +S'shell' +p218 +S'/bin/bash' +p219 +sS'name' +p220 +S'ubuntu' +p221 +sS'sudo' +p222 +(lp223 +S'ALL=(ALL) NOPASSWD:ALL' +p224 +asS'lock_passwd' +p225 +I01 +sS'gecos' +p226 +S'Ubuntu' +p227 +sS'groups' +p228 +(lp229 +S'adm' +p230 +aS'audio' +p231 +aS'cdrom' +p232 +aS'dialout' +p233 +aS'dip' +p234 +aS'floppy' +p235 +aS'netdev' +p236 +aS'plugdev' +p237 +aS'sudo' +p238 +aS'video' +p239 +assS'package_mirrors' +p240 +(lp241 +(dp242 +S'arches' +p243 +(lp244 +S'i386' +p245 +aS'amd64' +p246 +asS'failsafe' +p247 +(dp248 +S'security' +p249 +S'http://security.ubuntu.com/ubuntu' +p250 +sS'primary' +p251 +S'http://archive.ubuntu.com/ubuntu' +p252 +ssS'search' +p253 +(dp254 +S'security' +p255 +(lp256 +sS'primary' +p257 +(lp258 +S'http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/' +p259 +aS'http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/' +p260 +aS'http://%(region)s.clouds.archive.ubuntu.com/ubuntu/' +p261 +assa(dp262 +S'arches' +p263 +(lp264 +S'armhf' +p265 +aS'armel' +p266 +aS'default' +p267 +asS'failsafe' +p268 +(dp269 +S'security' +p270 +S'http://ports.ubuntu.com/ubuntu-ports' +p271 +sS'primary' +p272 +S'http://ports.ubuntu.com/ubuntu-ports' +p273 +ssasS'ssh_svcname' +p274 +S'ssh' +p275 +ssbsS'metadata' +p276 +(dp277 +g183 +g184 +sS'local-hostname' +p278 +S'me' +p279 +sS'instance-id' +p280 +S'me' +p281 +ssb. \ No newline at end of file diff --git a/tests/integration_tests/modules/test_persistence.py b/tests/integration_tests/modules/test_persistence.py new file mode 100644 index 00000000..00fdeaea --- /dev/null +++ b/tests/integration_tests/modules/test_persistence.py @@ -0,0 +1,30 @@ +# This file is part of cloud-init. See LICENSE file for license information. +"""Test the behavior of loading/discarding pickle data""" +from pathlib import Path + +import pytest + +from tests.integration_tests.instances import IntegrationInstance +from tests.integration_tests.util import ( + ASSETS_DIR, + verify_ordered_items_in_text, +) + + +PICKLE_PATH = Path('/var/lib/cloud/instance/obj.pkl') +TEST_PICKLE = ASSETS_DIR / 'trusty_with_mime.pkl' + + +@pytest.mark.lxd_container +def test_log_message_on_missing_version_file(client: IntegrationInstance): + client.push_file(TEST_PICKLE, PICKLE_PATH) + client.restart() + assert client.execute('cloud-init status --wait').ok + log = client.read_from_file('/var/log/cloud-init.log') + verify_ordered_items_in_text([ + "Unable to unpickle datasource: 'MIMEMultipart' object has no " + "attribute 'policy'. Ignoring current cache.", + 'no cache found', + 'Searching for local data source', + 'SUCCESS: found local data from DataSourceNoCloud' + ], log) -- cgit v1.2.3 From 184c836a16e9954a2cba11ae21f07923077ec904 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 19 Jul 2021 14:13:21 -0500 Subject: Initial hotplug support (#936) Adds a udev script which will invoke a hotplug hook script on all net add events. The script will write some udev arguments to a systemd FIFO socket (to ensure we have only instance of cloud-init running at a time), which is then read by a new service that calls a new 'cloud-init devel hotplug-hook' command to handle the new event. This hotplug-hook command will: - Fetch the pickled datsource - Verify that the hotplug event is supported/enabled - Update the metadata for the datasource - Ensure the hotplugged device exists within the datasource - Apply the config change on the datasource metadata - Bring up the new interface (or apply global network configuration) - Save the updated metadata back to the pickle cache Also scattered in some unrelated typing where helpful --- bash_completion/cloud-init | 5 +- cloudinit/cmd/devel/hotplug_hook.py | 236 ++++++++++++++++++++++++ cloudinit/cmd/devel/parser.py | 3 + cloudinit/distros/__init__.py | 11 +- cloudinit/event.py | 1 + cloudinit/net/activators.py | 174 +++++++++++++---- cloudinit/sources/DataSourceConfigDrive.py | 10 +- cloudinit/sources/DataSourceEc2.py | 7 + cloudinit/sources/DataSourceOpenStack.py | 11 +- cloudinit/sources/__init__.py | 3 +- cloudinit/stages.py | 4 +- doc/rtd/topics/cli.rst | 4 + doc/rtd/topics/events.rst | 10 +- packages/redhat/cloud-init.spec.in | 7 + setup.py | 2 + systemd/cloud-init-generator.tmpl | 0 systemd/cloud-init-hotplugd.service | 22 +++ systemd/cloud-init-hotplugd.socket | 13 ++ tests/integration_tests/modules/test_hotplug.py | 94 ++++++++++ tests/unittests/cmd/devel/test_hotplug_hook.py | 218 ++++++++++++++++++++++ tests/unittests/test_net_activators.py | 135 ++++++++++---- tools/hook-hotplug | 21 +++ udev/10-cloud-init-hook-hotplug.rules | 6 + 23 files changed, 906 insertions(+), 91 deletions(-) create mode 100644 cloudinit/cmd/devel/hotplug_hook.py mode change 100755 => 100644 systemd/cloud-init-generator.tmpl create mode 100644 systemd/cloud-init-hotplugd.service create mode 100644 systemd/cloud-init-hotplugd.socket create mode 100644 tests/integration_tests/modules/test_hotplug.py create mode 100644 tests/unittests/cmd/devel/test_hotplug_hook.py create mode 100755 tools/hook-hotplug create mode 100644 udev/10-cloud-init-hook-hotplug.rules (limited to 'cloudinit') diff --git a/bash_completion/cloud-init b/bash_completion/cloud-init index a9577e9d..b9f137b1 100644 --- a/bash_completion/cloud-init +++ b/bash_completion/cloud-init @@ -28,7 +28,7 @@ _cloudinit_complete() COMPREPLY=($(compgen -W "--help --tarfile --include-userdata" -- $cur_word)) ;; devel) - COMPREPLY=($(compgen -W "--help schema net-convert" -- $cur_word)) + COMPREPLY=($(compgen -W "--help hotplug-hook schema net-convert" -- $cur_word)) ;; dhclient-hook) COMPREPLY=($(compgen -W "--help up down" -- $cur_word)) @@ -64,6 +64,9 @@ _cloudinit_complete() --frequency) COMPREPLY=($(compgen -W "--help instance always once" -- $cur_word)) ;; + hotplug-hook) + COMPREPLY=($(compgen -W "--help" -- $cur_word)) + ;; net-convert) COMPREPLY=($(compgen -W "--help --network-data --kind --directory --output-kind" -- $cur_word)) ;; diff --git a/cloudinit/cmd/devel/hotplug_hook.py b/cloudinit/cmd/devel/hotplug_hook.py new file mode 100644 index 00000000..0282f24a --- /dev/null +++ b/cloudinit/cmd/devel/hotplug_hook.py @@ -0,0 +1,236 @@ +# This file is part of cloud-init. See LICENSE file for license information. +"""Handle reconfiguration on hotplug events""" +import abc +import argparse +import os +import time + +from cloudinit import log +from cloudinit import reporting +from cloudinit.event import EventScope, EventType +from cloudinit.net import activators, read_sys_net_safe +from cloudinit.net.network_state import parse_net_config_data +from cloudinit.reporting import events +from cloudinit.stages import Init +from cloudinit.sources import DataSource + + +LOG = log.getLogger(__name__) +NAME = 'hotplug-hook' + + +def get_parser(parser=None): + """Build or extend an arg parser for hotplug-hook utility. + + @param parser: Optional existing ArgumentParser instance representing the + subcommand which will be extended to support the args of this utility. + + @returns: ArgumentParser with proper argument configuration. + """ + if not parser: + parser = argparse.ArgumentParser(prog=NAME, description=__doc__) + + parser.description = __doc__ + parser.add_argument("-d", "--devpath", required=True, + metavar="PATH", + help="sysfs path to hotplugged device") + parser.add_argument("-s", "--subsystem", required=True, + help="subsystem to act on", + choices=['net']) + parser.add_argument("-u", "--udevaction", required=True, + help="action to take", + choices=['add', 'remove']) + + return parser + + +class UeventHandler(abc.ABC): + def __init__(self, id, datasource, devpath, action, success_fn): + self.id = id + self.datasource = datasource # type: DataSource + self.devpath = devpath + self.action = action + self.success_fn = success_fn + + @abc.abstractmethod + def apply(self): + raise NotImplementedError() + + @property + @abc.abstractmethod + def config(self): + raise NotImplementedError() + + @abc.abstractmethod + def device_detected(self) -> bool: + raise NotImplementedError() + + def detect_hotplugged_device(self): + detect_presence = None + if self.action == 'add': + detect_presence = True + elif self.action == 'remove': + detect_presence = False + else: + raise ValueError('Unknown action: %s' % self.action) + + if detect_presence != self.device_detected(): + raise RuntimeError( + 'Failed to detect %s in updated metadata' % self.id) + + def success(self): + return self.success_fn() + + def update_metadata(self): + result = self.datasource.update_metadata_if_supported([ + EventType.HOTPLUG]) + if not result: + raise RuntimeError( + 'Datasource %s not updated for ' + 'event %s' % (self.datasource, EventType.HOTPLUG) + ) + return result + + +class NetHandler(UeventHandler): + def __init__(self, datasource, devpath, action, success_fn): + # convert devpath to mac address + id = read_sys_net_safe(os.path.basename(devpath), 'address') + super().__init__(id, datasource, devpath, action, success_fn) + + def apply(self): + self.datasource.distro.apply_network_config( + self.config, + bring_up=False, + ) + interface_name = os.path.basename(self.devpath) + activator = activators.select_activator() + if self.action == 'add': + if not activator.bring_up_interface(interface_name): + raise RuntimeError( + 'Failed to bring up device: {}'.format(self.devpath)) + elif self.action == 'remove': + if not activator.bring_down_interface(interface_name): + raise RuntimeError( + 'Failed to bring down device: {}'.format(self.devpath)) + + @property + def config(self): + return self.datasource.network_config + + def device_detected(self) -> bool: + netstate = parse_net_config_data(self.config) + found = [ + iface for iface in netstate.iter_interfaces() + if iface.get('mac_address') == self.id + ] + LOG.debug('Ifaces with ID=%s : %s', self.id, found) + return len(found) > 0 + + +SUBSYSTEM_PROPERTES_MAP = { + 'net': (NetHandler, EventScope.NETWORK), +} + + +def handle_hotplug( + hotplug_init: Init, devpath, subsystem, udevaction +): + handler_cls, event_scope = SUBSYSTEM_PROPERTES_MAP.get( + subsystem, (None, None) + ) + if handler_cls is None: + raise Exception( + 'hotplug-hook: cannot handle events for subsystem: {}'.format( + subsystem)) + + LOG.debug('Fetching datasource') + datasource = hotplug_init.fetch(existing="trust") + + if not hotplug_init.update_event_enabled( + event_source_type=EventType.HOTPLUG, + scope=EventScope.NETWORK + ): + LOG.debug('hotplug not enabled for event of type %s', event_scope) + return + + LOG.debug('Creating %s event handler', subsystem) + event_handler = handler_cls( + datasource=datasource, + devpath=devpath, + action=udevaction, + success_fn=hotplug_init._write_to_cache + ) # type: UeventHandler + wait_times = [1, 3, 5, 10, 30] + for attempt, wait in enumerate(wait_times): + LOG.debug( + 'subsystem=%s update attempt %s/%s', + subsystem, + attempt, + len(wait_times) + ) + try: + LOG.debug('Refreshing metadata') + event_handler.update_metadata() + LOG.debug('Detecting device in updated metadata') + event_handler.detect_hotplugged_device() + LOG.debug('Applying config change') + event_handler.apply() + LOG.debug('Updating cache') + event_handler.success() + break + except Exception as e: + LOG.debug('Exception while processing hotplug event. %s', e) + time.sleep(wait) + last_exception = e + else: + raise last_exception # type: ignore + + +def handle_args(name, args): + # Note that if an exception happens between now and when logging is + # setup, we'll only see it in the journal + hotplug_reporter = events.ReportEventStack( + name, __doc__, reporting_enabled=True + ) + + hotplug_init = Init(ds_deps=[], reporter=hotplug_reporter) + hotplug_init.read_cfg() + + log.setupLogging(hotplug_init.cfg) + if 'reporting' in hotplug_init.cfg: + reporting.update_configuration(hotplug_init.cfg.get('reporting')) + + # Logging isn't going to be setup until now + LOG.debug( + '%s called with the following arguments: {udevaction: %s, ' + 'subsystem: %s, devpath: %s}', + name, args.udevaction, args.subsystem, args.devpath + ) + LOG.debug( + '%s called with the following arguments:\n' + 'udevaction: %s\n' + 'subsystem: %s\n' + 'devpath: %s', + name, args.udevaction, args.subsystem, args.devpath + ) + + with hotplug_reporter: + try: + handle_hotplug( + hotplug_init=hotplug_init, + devpath=args.devpath, + subsystem=args.subsystem, + udevaction=args.udevaction, + ) + except Exception: + LOG.exception('Received fatal exception handling hotplug!') + raise + + LOG.debug('Exiting hotplug handler') + reporting.flush_events() + + +if __name__ == '__main__': + args = get_parser().parse_args() + handle_args(NAME, args) diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py index 1a3c46a4..be304630 100644 --- a/cloudinit/cmd/devel/parser.py +++ b/cloudinit/cmd/devel/parser.py @@ -7,6 +7,7 @@ import argparse from cloudinit.config import schema +from . import hotplug_hook from . import net_convert from . import render from . import make_mime @@ -21,6 +22,8 @@ def get_parser(parser=None): subparsers.required = True subcmds = [ + (hotplug_hook.NAME, hotplug_hook.__doc__, + hotplug_hook.get_parser, hotplug_hook.handle_args), ('schema', 'Validate cloud-config files for document schema', schema.get_parser, schema.handle_schema_args), (net_convert.NAME, net_convert.__doc__, diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 2caa8bc2..7bdf2197 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -206,8 +206,15 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): def generate_fallback_config(self): return net.generate_fallback_config() - def apply_network_config(self, netconfig, bring_up=False): - # apply network config netconfig + def apply_network_config(self, netconfig, bring_up=False) -> bool: + """Apply the network config. + + If bring_up is True, attempt to bring up the passed in devices. If + devices is None, attempt to bring up devices returned by + _write_network_config. + + Returns True if any devices failed to come up, otherwise False. + """ # This method is preferred to apply_network which only takes # a much less complete network config format (interfaces(5)). network_state = parse_net_config_data(netconfig) diff --git a/cloudinit/event.py b/cloudinit/event.py index 76a0afc6..53ad4c25 100644 --- a/cloudinit/event.py +++ b/cloudinit/event.py @@ -29,6 +29,7 @@ class EventType(Enum): BOOT = "boot" BOOT_NEW_INSTANCE = "boot-new-instance" BOOT_LEGACY = "boot-legacy" + HOTPLUG = 'hotplug' def __str__(self): # pylint: disable=invalid-str-returned return self.value diff --git a/cloudinit/net/activators.py b/cloudinit/net/activators.py index 34fee3bf..84aaafc9 100644 --- a/cloudinit/net/activators.py +++ b/cloudinit/net/activators.py @@ -15,31 +15,80 @@ from cloudinit.net.sysconfig import NM_CFG_FILE LOG = logging.getLogger(__name__) +def _alter_interface(cmd, device_name) -> bool: + LOG.debug("Attempting command %s for device %s", cmd, device_name) + try: + (_out, err) = subp.subp(cmd) + if len(err): + LOG.warning("Running %s resulted in stderr output: %s", + cmd, err) + return True + except subp.ProcessExecutionError: + util.logexc(LOG, "Running interface command %s failed", cmd) + return False + + class NetworkActivator(ABC): @staticmethod @abstractmethod def available() -> bool: + """Return True if activator is available, otherwise return False.""" raise NotImplementedError() @staticmethod @abstractmethod def bring_up_interface(device_name: str) -> bool: + """Bring up interface. + + Return True is successful, otherwise return False + """ + raise NotImplementedError() + + @staticmethod + @abstractmethod + def bring_down_interface(device_name: str) -> bool: + """Bring down interface. + + Return True is successful, otherwise return False + """ raise NotImplementedError() @classmethod def bring_up_interfaces(cls, device_names: Iterable[str]) -> bool: - all_succeeded = True - for device in device_names: - if not cls.bring_up_interface(device): - all_succeeded = False - return all_succeeded + """Bring up specified list of interfaces. + + Return True is successful, otherwise return False + """ + return all(cls.bring_up_interface(device) for device in device_names) @classmethod def bring_up_all_interfaces(cls, network_state: NetworkState) -> bool: + """Bring up all interfaces. + + Return True is successful, otherwise return False + """ return cls.bring_up_interfaces( [i['name'] for i in network_state.iter_interfaces()] ) + @classmethod + def bring_down_interfaces(cls, device_names: Iterable[str]) -> bool: + """Bring down specified list of interfaces. + + Return True is successful, otherwise return False + """ + return all(cls.bring_down_interface(device) for device in device_names) + + @classmethod + def bring_down_all_interfaces(cls, network_state: NetworkState) -> bool: + """Bring down all interfaces. + + Return True is successful, otherwise return False + """ + return cls.bring_down_interfaces( + [i['name'] for i in network_state.iter_interfaces()] + ) + class IfUpDownActivator(NetworkActivator): # Note that we're not overriding bring_up_interfaces to pass something @@ -53,24 +102,27 @@ class IfUpDownActivator(NetworkActivator): @staticmethod def bring_up_interface(device_name: str) -> bool: - """Bring up interface using ifup.""" + """Bring up interface using ifup. + + Return True is successful, otherwise return False + """ cmd = ['ifup', device_name] - LOG.debug("Attempting to run bring up interface %s using command %s", - device_name, cmd) - try: - (_out, err) = subp.subp(cmd) - if len(err): - LOG.warning("Running %s resulted in stderr output: %s", - cmd, err) - return True - except subp.ProcessExecutionError: - util.logexc(LOG, "Running interface command %s failed", cmd) - return False + return _alter_interface(cmd, device_name) + + @staticmethod + def bring_down_interface(device_name: str) -> bool: + """Bring up interface using ifup. + + Return True is successful, otherwise return False + """ + cmd = ['ifdown', device_name] + return _alter_interface(cmd, device_name) class NetworkManagerActivator(NetworkActivator): @staticmethod def available(target=None) -> bool: + """ Return true if network manager can be used on this system.""" config_present = os.path.isfile( subp.target_path(target, path=NM_CFG_FILE) ) @@ -79,44 +131,86 @@ class NetworkManagerActivator(NetworkActivator): @staticmethod def bring_up_interface(device_name: str) -> bool: - try: - subp.subp(['nmcli', 'connection', 'up', device_name]) - except subp.ProcessExecutionError: - util.logexc(LOG, "nmcli failed to bring up {}".format(device_name)) - return False - return True + """Bring up interface using nmcli. + + Return True is successful, otherwise return False + """ + cmd = ['nmcli', 'connection', 'up', 'ifname', device_name] + return _alter_interface(cmd, device_name) + + @staticmethod + def bring_down_interface(device_name: str) -> bool: + """Bring down interface using nmcli. + + Return True is successful, otherwise return False + """ + cmd = ['nmcli', 'connection', 'down', device_name] + return _alter_interface(cmd, device_name) class NetplanActivator(NetworkActivator): + NETPLAN_CMD = ['netplan', 'apply'] + @staticmethod def available(target=None) -> bool: + """ Return true if netplan can be used on this system.""" return netplan_available(target=target) - @staticmethod - def _apply_netplan(): - LOG.debug('Applying current netplan config') - try: - subp.subp(['netplan', 'apply'], capture=True) - except subp.ProcessExecutionError: - util.logexc(LOG, "netplan apply failed") - return False - return True - @staticmethod def bring_up_interface(device_name: str) -> bool: + """Apply netplan config. + + Return True is successful, otherwise return False + """ LOG.debug("Calling 'netplan apply' rather than " - "bringing up individual interfaces") - return NetplanActivator._apply_netplan() + "altering individual interfaces") + return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all') @staticmethod def bring_up_interfaces(device_names: Iterable[str]) -> bool: + """Apply netplan config. + + Return True is successful, otherwise return False + """ LOG.debug("Calling 'netplan apply' rather than " - "bringing up individual interfaces") - return NetplanActivator._apply_netplan() + "altering individual interfaces") + return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all') @staticmethod def bring_up_all_interfaces(network_state: NetworkState) -> bool: - return NetplanActivator._apply_netplan() + """Apply netplan config. + + Return True is successful, otherwise return False + """ + return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all') + + @staticmethod + def bring_down_interface(device_name: str) -> bool: + """Apply netplan config. + + Return True is successful, otherwise return False + """ + LOG.debug("Calling 'netplan apply' rather than " + "altering individual interfaces") + return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all') + + @staticmethod + def bring_down_interfaces(device_names: Iterable[str]) -> bool: + """Apply netplan config. + + Return True is successful, otherwise return False + """ + LOG.debug("Calling 'netplan apply' rather than " + "altering individual interfaces") + return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all') + + @staticmethod + def bring_down_all_interfaces(network_state: NetworkState) -> bool: + """Apply netplan config. + + Return True is successful, otherwise return False + """ + return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all') # This section is mostly copied and pasted from renderers.py. An abstract @@ -153,4 +247,6 @@ def select_activator(priority=None, target=None) -> Type[NetworkActivator]: raise RuntimeError( "No available network activators found%s. Searched " "through list: %s" % (tmsg, priority)) - return found[0] + selected = found[0] + LOG.debug('Using selected activator: %s', selected) + return selected diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 62756cf7..19c8d126 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -12,9 +12,8 @@ from cloudinit import log as logging from cloudinit import sources from cloudinit import subp from cloudinit import util - +from cloudinit.event import EventScope, EventType from cloudinit.net import eni - from cloudinit.sources.DataSourceIBMCloud import get_ibm_platform from cloudinit.sources.helpers import openstack @@ -37,6 +36,13 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): dsname = 'ConfigDrive' + supported_update_events = {EventScope.NETWORK: { + EventType.BOOT_NEW_INSTANCE, + EventType.BOOT, + EventType.BOOT_LEGACY, + EventType.HOTPLUG, + }} + def __init__(self, sys_cfg, distro, paths): super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths) self.source = None diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 8a7f7c60..700437b0 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -76,6 +76,13 @@ class DataSourceEc2(sources.DataSource): # Whether we want to get network configuration from the metadata service. perform_dhcp_setup = False + supported_update_events = {EventScope.NETWORK: { + EventType.BOOT_NEW_INSTANCE, + EventType.BOOT, + EventType.BOOT_LEGACY, + EventType.HOTPLUG, + }} + def __init__(self, sys_cfg, distro, paths): super(DataSourceEc2, self).__init__(sys_cfg, distro, paths) self.metadata_address = None diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index 619a171e..a85b71d7 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -8,11 +8,11 @@ import time from cloudinit import dmi from cloudinit import log as logging -from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError from cloudinit import sources from cloudinit import url_helper from cloudinit import util - +from cloudinit.event import EventScope, EventType +from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError from cloudinit.sources.helpers import openstack from cloudinit.sources import DataSourceOracle as oracle @@ -46,6 +46,13 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): # Whether we want to get network configuration from the metadata service. perform_dhcp_setup = False + supported_update_events = {EventScope.NETWORK: { + EventType.BOOT_NEW_INSTANCE, + EventType.BOOT, + EventType.BOOT_LEGACY, + EventType.HOTPLUG + }} + def __init__(self, sys_cfg, distro, paths): super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths) self.metadata_address = None diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 9d25b0ee..bf6bf139 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -23,6 +23,7 @@ from cloudinit import type_utils from cloudinit import user_data as ud from cloudinit import util from cloudinit.atomic_helper import write_json +from cloudinit.distros import Distro from cloudinit.event import EventScope, EventType from cloudinit.filters import launch_index from cloudinit.persistence import CloudInitPickleMixin @@ -215,7 +216,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): _ci_pkl_version = 1 - def __init__(self, sys_cfg, distro, paths, ud_proc=None): + def __init__(self, sys_cfg, distro: Distro, paths, ud_proc=None): self.sys_cfg = sys_cfg self.distro = distro self.paths = paths diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 06e0d9b1..bc164fa0 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -241,7 +241,7 @@ class Init(object): else: return (None, "cache invalid in datasource: %s" % ds) - def _get_data_source(self, existing): + def _get_data_source(self, existing) -> sources.DataSource: if self.datasource is not NULL_DATA_SOURCE: return self.datasource @@ -267,7 +267,7 @@ class Init(object): cfg_list, pkg_list, self.reporter) LOG.info("Loaded datasource %s - %s", dsname, ds) - self.datasource = ds + self.datasource = ds # type: sources.DataSource # Ensure we adjust our path members datasource # now that we have one (thus allowing ipath to be used) self._reset() diff --git a/doc/rtd/topics/cli.rst b/doc/rtd/topics/cli.rst index 0ff230b5..b6115ed6 100644 --- a/doc/rtd/topics/cli.rst +++ b/doc/rtd/topics/cli.rst @@ -119,6 +119,10 @@ Current subcommands: schema errors locally without the need for deployment. Schema validation is work in progress and supports a subset of cloud-config modules. + * ``hotplug-hook``: respond to newly added system devices by retrieving + updated system metadata and bringing up/down the corresponding device. + This command is intended to be called via a systemd service and is + not considered user-accessible except for debugging purposes. .. _cli_features: diff --git a/doc/rtd/topics/events.rst b/doc/rtd/topics/events.rst index 463208cc..984e7577 100644 --- a/doc/rtd/topics/events.rst +++ b/doc/rtd/topics/events.rst @@ -20,11 +20,11 @@ event types: boot: once during Local stage, then again in Network stage. As this behavior was previously the default behavior, this option exists to prevent regressing such behavior. +- **HOTPLUG**: Dynamic add of a system device Future work will likely include infrastructure and support for the following events: -- **HOTPLUG**: Dynamic add of a system device - **METADATA_CHANGE**: An instance's metadata has change - **USER_REQUEST**: Directed request to update @@ -64,6 +64,12 @@ arbitrary values can be used. Each ``scope`` requires a ``when`` element to specify which events are to allowed to be handled. +Hotplug +======= +When the hotplug event is supported by the data source and configured in +user data, cloud-init will respond to the addition or removal of network +interfaces to the system. In addition to fetching and updating the system +metadata, cloud-init will also bring up/down the newly added interface. Examples ======== @@ -77,7 +83,7 @@ On every boot, apply network configuration found in the datasource. # apply network config on every boot updates: network: - when: ['boot'] + when: ['boot', 'hotplug'] .. _Cloud-init: https://launchpad.net/cloud-init .. vi: textwidth=78 diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in index 16138012..b930709b 100644 --- a/packages/redhat/cloud-init.spec.in +++ b/packages/redhat/cloud-init.spec.in @@ -119,6 +119,12 @@ version_pys=$(cd "$RPM_BUILD_ROOT" && find . -name version.py -type f) ( cd "$RPM_BUILD_ROOT" && sed -i "s,@@PACKAGED_VERSION@@,%{version}-%{release}," $version_pys ) +# patch hotplug /usr/libexec script path +hotplug_file=$(cd "$RPM_BUILD_ROOT" && find . -name 10-cloud-init-hook-hotplug.rules -type f) + +( cd "$RPM_BUILD_ROOT" && + sed -i "s,/usr/lib,%{_libexecdir}," $hotplug_file ) + %clean rm -rf $RPM_BUILD_ROOT @@ -172,6 +178,7 @@ fi %files /lib/udev/rules.d/66-azure-ephemeral.rules +/lib/udev/rules.d/10-cloud-init-hook-hotplug.rules %if "%{init_system}" == "systemd" /usr/lib/systemd/system-generators/cloud-init-generator diff --git a/setup.py b/setup.py index dcbe0843..7fa03e63 100755 --- a/setup.py +++ b/setup.py @@ -128,6 +128,7 @@ INITSYS_FILES = { 'systemd': [render_tmpl(f) for f in (glob('systemd/*.tmpl') + glob('systemd/*.service') + + glob('systemd/*.socket') + glob('systemd/*.target')) if (is_f(f) and not is_generator(f))], 'systemd.generators': [ @@ -249,6 +250,7 @@ data_files = [ (ETC + '/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')), (ETC + '/cloud/templates', glob('templates/*')), (USR_LIB_EXEC + '/cloud-init', ['tools/ds-identify', + 'tools/hook-hotplug', 'tools/uncloud-init', 'tools/write-ssh-key-fingerprints']), (USR + '/share/bash-completion/completions', diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl old mode 100755 new mode 100644 diff --git a/systemd/cloud-init-hotplugd.service b/systemd/cloud-init-hotplugd.service new file mode 100644 index 00000000..b64632ef --- /dev/null +++ b/systemd/cloud-init-hotplugd.service @@ -0,0 +1,22 @@ +# Paired with cloud-init-hotplugd.socket to read from the FIFO +# /run/cloud-init/hook-hotplug-cmd which is created during a udev network +# add or remove event as processed by 10-cloud-init-hook-hotplug.rules. + +# On start, read args from the FIFO, process and provide structured arguments +# to `cloud-init devel hotplug-hook` which will setup or teardown network +# devices as configured by user-data. + +# Known bug with an enforcing SELinux policy: LP: #1936229 +# cloud-init-hotplud.service will read args from file descriptor 3 + +[Unit] +Description=cloud-init hotplug hook daemon +After=cloud-init-hotplugd.socket + +[Service] +Type=simple +ExecStart=/bin/bash -c 'read args <&3; echo "args=$args"; \ + exec /usr/bin/cloud-init devel hotplug-hook $args; \ + exit 0' +SyslogIdentifier=cloud-init-hotplugd +TimeoutStopSec=5 diff --git a/systemd/cloud-init-hotplugd.socket b/systemd/cloud-init-hotplugd.socket new file mode 100644 index 00000000..aa093016 --- /dev/null +++ b/systemd/cloud-init-hotplugd.socket @@ -0,0 +1,13 @@ +# cloud-init-hotplugd.socket listens on the FIFO file +# /run/cloud-init/hook-hotplug-cmd which is created during a udev network +# add or remove event as processed by 10-cloud-init-hook-hotplug.rules. + +# Known bug with an enforcing SELinux policy: LP: #1936229 +[Unit] +Description=cloud-init hotplug hook socket + +[Socket] +ListenFIFO=/run/cloud-init/hook-hotplug-cmd + +[Install] +WantedBy=cloud-init.target diff --git a/tests/integration_tests/modules/test_hotplug.py b/tests/integration_tests/modules/test_hotplug.py new file mode 100644 index 00000000..b683566f --- /dev/null +++ b/tests/integration_tests/modules/test_hotplug.py @@ -0,0 +1,94 @@ +import pytest +import time +import yaml +from collections import namedtuple + +from tests.integration_tests.instances import IntegrationInstance + +USER_DATA = """\ +#cloud-config +updates: + network: + when: ['hotplug'] +""" + +ip_addr = namedtuple('ip_addr', 'interface state ip4 ip6') + + +def _wait_till_hotplug_complete(client, expected_runs=1): + for _ in range(60): + log = client.read_from_file('/var/log/cloud-init.log') + if log.count('Exiting hotplug handler') == expected_runs: + return log + time.sleep(1) + raise Exception('Waiting for hotplug handler failed') + + +def _get_ip_addr(client): + ips = [] + lines = client.execute('ip --brief addr').split('\n') + for line in lines: + attributes = line.split() + interface, state = attributes[0], attributes[1] + ip4_cidr = attributes[2] if len(attributes) > 2 else None + ip6_cidr = attributes[3] if len(attributes) > 3 else None + ip4 = ip4_cidr.split('/')[0] if ip4_cidr else None + ip6 = ip6_cidr.split('/')[0] if ip6_cidr else None + ip = ip_addr(interface, state, ip4, ip6) + ips.append(ip) + return ips + + +@pytest.mark.openstack +@pytest.mark.user_data(USER_DATA) +def test_hotplug_add_remove(client: IntegrationInstance): + ips_before = _get_ip_addr(client) + log = client.read_from_file('/var/log/cloud-init.log') + assert 'Exiting hotplug handler' not in log + + # Add new NIC + added_ip = client.instance.add_network_interface() + _wait_till_hotplug_complete(client) + ips_after_add = _get_ip_addr(client) + new_addition = [ip for ip in ips_after_add if ip.ip4 == added_ip][0] + + assert len(ips_after_add) == len(ips_before) + 1 + assert added_ip not in [ip.ip4 for ip in ips_before] + assert added_ip in [ip.ip4 for ip in ips_after_add] + assert new_addition.state == 'UP' + + netplan_cfg = client.read_from_file('/etc/netplan/50-cloud-init.yaml') + config = yaml.safe_load(netplan_cfg) + assert new_addition.interface in config['network']['ethernets'] + + # Remove new NIC + client.instance.remove_network_interface(added_ip) + _wait_till_hotplug_complete(client, expected_runs=2) + ips_after_remove = _get_ip_addr(client) + assert len(ips_after_remove) == len(ips_before) + assert added_ip not in [ip.ip4 for ip in ips_after_remove] + + netplan_cfg = client.read_from_file('/etc/netplan/50-cloud-init.yaml') + config = yaml.safe_load(netplan_cfg) + assert new_addition.interface not in config['network']['ethernets'] + + +@pytest.mark.openstack +def test_no_hotplug_in_userdata(client: IntegrationInstance): + ips_before = _get_ip_addr(client) + log = client.read_from_file('/var/log/cloud-init.log') + assert 'Exiting hotplug handler' not in log + + # Add new NIC + client.instance.add_network_interface() + _wait_till_hotplug_complete(client) + log = client.read_from_file('/var/log/cloud-init.log') + assert 'hotplug not enabled for event of type network' in log + + ips_after_add = _get_ip_addr(client) + if len(ips_after_add) == len(ips_before) + 1: + # We can see the device, but it should not have been brought up + new_ip = [ip for ip in ips_after_add if ip not in ips_before][0] + assert new_ip.state == 'DOWN' + else: + assert len(ips_after_add) == len(ips_before) diff --git a/tests/unittests/cmd/devel/test_hotplug_hook.py b/tests/unittests/cmd/devel/test_hotplug_hook.py new file mode 100644 index 00000000..63d2490e --- /dev/null +++ b/tests/unittests/cmd/devel/test_hotplug_hook.py @@ -0,0 +1,218 @@ +import pytest +from collections import namedtuple +from unittest import mock +from unittest.mock import call + +from cloudinit.cmd.devel.hotplug_hook import handle_hotplug +from cloudinit.distros import Distro +from cloudinit.event import EventType +from cloudinit.net.activators import NetworkActivator +from cloudinit.net.network_state import NetworkState +from cloudinit.sources import DataSource +from cloudinit.stages import Init + + +hotplug_args = namedtuple('hotplug_args', 'udevaction, subsystem, devpath') +FAKE_MAC = '11:22:33:44:55:66' + + +@pytest.yield_fixture +def mocks(): + m_init = mock.MagicMock(spec=Init) + m_distro = mock.MagicMock(spec=Distro) + m_datasource = mock.MagicMock(spec=DataSource) + m_datasource.distro = m_distro + m_init.datasource = m_datasource + m_init.fetch.return_value = m_datasource + + read_sys_net = mock.patch( + 'cloudinit.cmd.devel.hotplug_hook.read_sys_net_safe', + return_value=FAKE_MAC + ) + + m_network_state = mock.MagicMock(spec=NetworkState) + parse_net = mock.patch( + 'cloudinit.cmd.devel.hotplug_hook.parse_net_config_data', + return_value=m_network_state + ) + + m_activator = mock.MagicMock(spec=NetworkActivator) + select_activator = mock.patch( + 'cloudinit.cmd.devel.hotplug_hook.activators.select_activator', + return_value=m_activator + ) + + sleep = mock.patch('time.sleep') + + read_sys_net.start() + parse_net.start() + select_activator.start() + m_sleep = sleep.start() + + yield namedtuple('mocks', 'm_init m_network_state m_activator m_sleep')( + m_init=m_init, + m_network_state=m_network_state, + m_activator=m_activator, + m_sleep=m_sleep, + ) + + read_sys_net.stop() + parse_net.stop() + select_activator.stop() + sleep.stop() + + +class TestUnsupportedActions: + def test_unsupported_subsystem(self, mocks): + with pytest.raises( + Exception, + match='cannot handle events for subsystem: not_real' + ): + handle_hotplug( + hotplug_init=mocks.m_init, + devpath='/dev/fake', + subsystem='not_real', + udevaction='add' + ) + + def test_unsupported_udevaction(self, mocks): + with pytest.raises(ValueError, match='Unknown action: not_real'): + handle_hotplug( + hotplug_init=mocks.m_init, + devpath='/dev/fake', + udevaction='not_real', + subsystem='net' + ) + + +class TestHotplug: + def test_succcessful_add(self, mocks): + init = mocks.m_init + mocks.m_network_state.iter_interfaces.return_value = [{ + 'mac_address': FAKE_MAC, + }] + handle_hotplug( + hotplug_init=init, + devpath='/dev/fake', + udevaction='add', + subsystem='net' + ) + init.datasource.update_metadata_if_supported.assert_called_once_with([ + EventType.HOTPLUG + ]) + mocks.m_activator.bring_up_interface.assert_called_once_with('fake') + mocks.m_activator.bring_down_interface.assert_not_called() + init._write_to_cache.assert_called_once_with() + + def test_successful_remove(self, mocks): + init = mocks.m_init + mocks.m_network_state.iter_interfaces.return_value = [{}] + handle_hotplug( + hotplug_init=init, + devpath='/dev/fake', + udevaction='remove', + subsystem='net' + ) + init.datasource.update_metadata_if_supported.assert_called_once_with([ + EventType.HOTPLUG + ]) + mocks.m_activator.bring_down_interface.assert_called_once_with('fake') + mocks.m_activator.bring_up_interface.assert_not_called() + init._write_to_cache.assert_called_once_with() + + def test_update_event_disabled(self, mocks, caplog): + init = mocks.m_init + init.update_event_enabled.return_value = False + handle_hotplug( + hotplug_init=init, + devpath='/dev/fake', + udevaction='remove', + subsystem='net' + ) + assert 'hotplug not enabled for event of type' in caplog.text + init.datasource.update_metadata_if_supported.assert_not_called() + mocks.m_activator.bring_up_interface.assert_not_called() + mocks.m_activator.bring_down_interface.assert_not_called() + init._write_to_cache.assert_not_called() + + def test_update_metadata_failed(self, mocks): + mocks.m_init.datasource.update_metadata_if_supported.return_value = \ + False + with pytest.raises( + RuntimeError, match='Datasource .* not updated for event hotplug' + ): + handle_hotplug( + hotplug_init=mocks.m_init, + devpath='/dev/fake', + udevaction='remove', + subsystem='net' + ) + + def test_detect_hotplugged_device_not_detected_on_add(self, mocks): + mocks.m_network_state.iter_interfaces.return_value = [{}] + with pytest.raises( + RuntimeError, + match='Failed to detect {} in updated metadata'.format(FAKE_MAC) + ): + handle_hotplug( + hotplug_init=mocks.m_init, + devpath='/dev/fake', + udevaction='add', + subsystem='net' + ) + + def test_detect_hotplugged_device_detected_on_remove(self, mocks): + mocks.m_network_state.iter_interfaces.return_value = [{ + 'mac_address': FAKE_MAC, + }] + with pytest.raises( + RuntimeError, + match='Failed to detect .* in updated metadata' + ): + handle_hotplug( + hotplug_init=mocks.m_init, + devpath='/dev/fake', + udevaction='remove', + subsystem='net' + ) + + def test_apply_failed_on_add(self, mocks): + mocks.m_network_state.iter_interfaces.return_value = [{ + 'mac_address': FAKE_MAC, + }] + mocks.m_activator.bring_up_interface.return_value = False + with pytest.raises( + RuntimeError, match='Failed to bring up device: /dev/fake' + ): + handle_hotplug( + hotplug_init=mocks.m_init, + devpath='/dev/fake', + udevaction='add', + subsystem='net' + ) + + def test_apply_failed_on_remove(self, mocks): + mocks.m_network_state.iter_interfaces.return_value = [{}] + mocks.m_activator.bring_down_interface.return_value = False + with pytest.raises( + RuntimeError, match='Failed to bring down device: /dev/fake' + ): + handle_hotplug( + hotplug_init=mocks.m_init, + devpath='/dev/fake', + udevaction='remove', + subsystem='net' + ) + + def test_retry(self, mocks): + with pytest.raises(RuntimeError): + handle_hotplug( + hotplug_init=mocks.m_init, + devpath='/dev/fake', + udevaction='add', + subsystem='net' + ) + assert mocks.m_sleep.call_count == 5 + assert mocks.m_sleep.call_args_list == [ + call(1), call(3), call(5), call(10), call(30) + ] diff --git a/tests/unittests/test_net_activators.py b/tests/unittests/test_net_activators.py index f11486ff..db825c35 100644 --- a/tests/unittests/test_net_activators.py +++ b/tests/unittests/test_net_activators.py @@ -35,32 +35,8 @@ ethernets: dhcp4: true """ -IF_UP_DOWN_AVAILABLE_CALLS = [ - (('ifquery',), {'search': ['/sbin', '/usr/sbin'], 'target': None}), - (('ifup',), {'search': ['/sbin', '/usr/sbin'], 'target': None}), - (('ifdown',), {'search': ['/sbin', '/usr/sbin'], 'target': None}), -] - -IF_UP_DOWN_CALL_LIST = [ - ((['ifup', 'eth0'], ), {}), - ((['ifup', 'eth1'], ), {}), -] - -NETPLAN_AVAILABLE_CALLS = [ - (('netplan',), {'search': ['/usr/sbin', '/sbin'], 'target': None}), -] - NETPLAN_CALL_LIST = [ - ((['netplan', 'apply'], ), {'capture': True}), -] - -NETWORK_MANAGER_AVAILABLE_CALLS = [ - (('nmcli',), {'target': None}), -] - -NETWORK_MANAGER_CALL_LIST = [ - ((['nmcli', 'connection', 'up', 'eth0'], ), {}), - ((['nmcli', 'connection', 'up', 'eth1'], ), {}), + ((['netplan', 'apply'], ), {}), ] @@ -126,23 +102,54 @@ class TestSearchAndSelect: select_activator() -@pytest.mark.parametrize('activator, available_calls, expected_call_list', [ - (IfUpDownActivator, IF_UP_DOWN_AVAILABLE_CALLS, IF_UP_DOWN_CALL_LIST), - (NetplanActivator, NETPLAN_AVAILABLE_CALLS, NETPLAN_CALL_LIST), - (NetworkManagerActivator, NETWORK_MANAGER_AVAILABLE_CALLS, - NETWORK_MANAGER_CALL_LIST), +IF_UP_DOWN_AVAILABLE_CALLS = [ + (('ifquery',), {'search': ['/sbin', '/usr/sbin'], 'target': None}), + (('ifup',), {'search': ['/sbin', '/usr/sbin'], 'target': None}), + (('ifdown',), {'search': ['/sbin', '/usr/sbin'], 'target': None}), +] + +NETPLAN_AVAILABLE_CALLS = [ + (('netplan',), {'search': ['/usr/sbin', '/sbin'], 'target': None}), +] + +NETWORK_MANAGER_AVAILABLE_CALLS = [ + (('nmcli',), {'target': None}), +] + + +@pytest.mark.parametrize('activator, available_calls', [ + (IfUpDownActivator, IF_UP_DOWN_AVAILABLE_CALLS), + (NetplanActivator, NETPLAN_AVAILABLE_CALLS), + (NetworkManagerActivator, NETWORK_MANAGER_AVAILABLE_CALLS), ]) -class TestIfUpDownActivator: +class TestActivatorsAvailable: def test_available( - self, activator, available_calls, expected_call_list, available_mocks + self, activator, available_calls, available_mocks ): activator.available() assert available_mocks.m_which.call_args_list == available_calls + +IF_UP_DOWN_BRING_UP_CALL_LIST = [ + ((['ifup', 'eth0'], ), {}), + ((['ifup', 'eth1'], ), {}), +] + +NETWORK_MANAGER_BRING_UP_CALL_LIST = [ + ((['nmcli', 'connection', 'up', 'ifname', 'eth0'], ), {}), + ((['nmcli', 'connection', 'up', 'ifname', 'eth1'], ), {}), +] + + +@pytest.mark.parametrize('activator, expected_call_list', [ + (IfUpDownActivator, IF_UP_DOWN_BRING_UP_CALL_LIST), + (NetplanActivator, NETPLAN_CALL_LIST), + (NetworkManagerActivator, NETWORK_MANAGER_BRING_UP_CALL_LIST), +]) +class TestActivatorsBringUp: @patch('cloudinit.subp.subp', return_value=('', '')) def test_bring_up_interface( - self, m_subp, activator, available_calls, expected_call_list, - available_mocks + self, m_subp, activator, expected_call_list, available_mocks ): activator.bring_up_interface('eth0') assert len(m_subp.call_args_list) == 1 @@ -150,16 +157,14 @@ class TestIfUpDownActivator: @patch('cloudinit.subp.subp', return_value=('', '')) def test_bring_up_interfaces( - self, m_subp, activator, available_calls, expected_call_list, - available_mocks + self, m_subp, activator, expected_call_list, available_mocks ): activator.bring_up_interfaces(['eth0', 'eth1']) assert expected_call_list == m_subp.call_args_list @patch('cloudinit.subp.subp', return_value=('', '')) def test_bring_up_all_interfaces_v1( - self, m_subp, activator, available_calls, expected_call_list, - available_mocks + self, m_subp, activator, expected_call_list, available_mocks ): network_state = parse_net_config_data(load(V1_CONFIG)) activator.bring_up_all_interfaces(network_state) @@ -168,10 +173,60 @@ class TestIfUpDownActivator: @patch('cloudinit.subp.subp', return_value=('', '')) def test_bring_up_all_interfaces_v2( - self, m_subp, activator, available_calls, expected_call_list, - available_mocks + self, m_subp, activator, expected_call_list, available_mocks ): network_state = parse_net_config_data(load(V2_CONFIG)) activator.bring_up_all_interfaces(network_state) for call in m_subp.call_args_list: assert call in expected_call_list + + +IF_UP_DOWN_BRING_DOWN_CALL_LIST = [ + ((['ifdown', 'eth0'], ), {}), + ((['ifdown', 'eth1'], ), {}), +] + +NETWORK_MANAGER_BRING_DOWN_CALL_LIST = [ + ((['nmcli', 'connection', 'down', 'eth0'], ), {}), + ((['nmcli', 'connection', 'down', 'eth1'], ), {}), +] + + +@pytest.mark.parametrize('activator, expected_call_list', [ + (IfUpDownActivator, IF_UP_DOWN_BRING_DOWN_CALL_LIST), + (NetplanActivator, NETPLAN_CALL_LIST), + (NetworkManagerActivator, NETWORK_MANAGER_BRING_DOWN_CALL_LIST), +]) +class TestActivatorsBringDown: + @patch('cloudinit.subp.subp', return_value=('', '')) + def test_bring_down_interface( + self, m_subp, activator, expected_call_list, available_mocks + ): + activator.bring_down_interface('eth0') + assert len(m_subp.call_args_list) == 1 + assert m_subp.call_args_list[0] == expected_call_list[0] + + @patch('cloudinit.subp.subp', return_value=('', '')) + def test_bring_down_interfaces( + self, m_subp, activator, expected_call_list, available_mocks + ): + activator.bring_down_interfaces(['eth0', 'eth1']) + assert expected_call_list == m_subp.call_args_list + + @patch('cloudinit.subp.subp', return_value=('', '')) + def test_bring_down_all_interfaces_v1( + self, m_subp, activator, expected_call_list, available_mocks + ): + network_state = parse_net_config_data(load(V1_CONFIG)) + activator.bring_down_all_interfaces(network_state) + for call in m_subp.call_args_list: + assert call in expected_call_list + + @patch('cloudinit.subp.subp', return_value=('', '')) + def test_bring_down_all_interfaces_v2( + self, m_subp, activator, expected_call_list, available_mocks + ): + network_state = parse_net_config_data(load(V2_CONFIG)) + activator.bring_down_all_interfaces(network_state) + for call in m_subp.call_args_list: + assert call in expected_call_list diff --git a/tools/hook-hotplug b/tools/hook-hotplug new file mode 100755 index 00000000..34e95929 --- /dev/null +++ b/tools/hook-hotplug @@ -0,0 +1,21 @@ +#!/bin/bash +# This file is part of cloud-init. See LICENSE file for license information. + +# This script checks if cloud-init has hotplug hooked and if +# cloud-init has finished; if so invoke cloud-init hotplug-hook + +is_finished() { + [ -e /run/cloud-init/result.json ] +} + +if is_finished; then + # open cloud-init's hotplug-hook fifo rw + exec 3<>/run/cloud-init/hook-hotplug-cmd + env_params=( + --devpath="${DEVPATH}" + --subsystem="${SUBSYSTEM}" + --udevaction="${ACTION}" + ) + # write params to cloud-init's hotplug-hook fifo + echo "${env_params[@]}" >&3 +fi diff --git a/udev/10-cloud-init-hook-hotplug.rules b/udev/10-cloud-init-hook-hotplug.rules new file mode 100644 index 00000000..2e382679 --- /dev/null +++ b/udev/10-cloud-init-hook-hotplug.rules @@ -0,0 +1,6 @@ +# This file is part of cloud-init. See LICENSE file for license information. +# Handle device adds only +ACTION!="add|remove", GOTO="cloudinit_end" +LABEL="cloudinit_hook" +SUBSYSTEM=="net|block", RUN+="/usr/lib/cloud-init/hook-hotplug" +LABEL="cloudinit_end" -- cgit v1.2.3 From a984ee78b745b157b4b023a1786bfbd3b2002b88 Mon Sep 17 00:00:00 2001 From: Anh Vo Date: Mon, 19 Jul 2021 17:45:02 -0400 Subject: Azure: mount default provisioning iso before try device listing (#870) With a few exceptions, Azure VM deployments receive provisioning metadata through the provisioning iso presented as a cdrom device (/dev/sr0). The existing code attempts to find this device by calling blkid to find all devices that have either type iso9660 or udf. This can be very expensive if the VM has a lot of disks. This commit will attempt to mount the default iso location first and only tries to use blkid to locate the iso location if the default mounting location fails --- cloudinit/sources/DataSourceAzure.py | 246 +++++++++++++------------- tests/unittests/test_datasource/test_azure.py | 52 ++++-- 2 files changed, 160 insertions(+), 138 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index dcdf9f8f..2e7bfbe3 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -76,7 +76,7 @@ REPROVISION_NIC_ATTACH_MARKER_FILE = "/var/lib/cloud/data/wait_for_nic_attach" REPROVISION_NIC_DETACHED_MARKER_FILE = "/var/lib/cloud/data/nic_detached" REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready" AGENT_SEED_DIR = '/var/lib/waagent' - +DEFAULT_PROVISIONING_ISO_DEV = '/dev/sr0' # In the event where the IMDS primary server is not # available, it takes 1s to fallback to the secondary one @@ -428,148 +428,142 @@ class DataSourceAzure(sources.DataSource): # it determines the value of ret. More specifically, the first one in # the candidate list determines the path to take in order to get the # metadata we need. - candidates = [self.seed_dir] + reprovision = False + ovf_is_accessible = True + reprovision_after_nic_attach = False + metadata_source = None + ret = None if os.path.isfile(REPROVISION_MARKER_FILE): - candidates.insert(0, "IMDS") + reprovision = True + metadata_source = "IMDS" report_diagnostic_event("Reprovision marker file already present " "before crawling Azure metadata: %s" % REPROVISION_MARKER_FILE, logger_func=LOG.debug) elif os.path.isfile(REPROVISION_NIC_ATTACH_MARKER_FILE): - candidates.insert(0, "NIC_ATTACH_MARKER_PRESENT") + reprovision_after_nic_attach = True + metadata_source = "NIC_ATTACH_MARKER_PRESENT" report_diagnostic_event("Reprovision nic attach marker file " "already present before crawling Azure " "metadata: %s" % REPROVISION_NIC_ATTACH_MARKER_FILE, logger_func=LOG.debug) - candidates.extend(list_possible_azure_ds_devs()) - if ddir: - candidates.append(ddir) - - found = None - reprovision = False - ovf_is_accessible = True - reprovision_after_nic_attach = False - for cdev in candidates: - try: - LOG.debug("cdev: %s", cdev) - if cdev == "IMDS": - ret = None - reprovision = True - elif cdev == "NIC_ATTACH_MARKER_PRESENT": - ret = None - reprovision_after_nic_attach = True - elif cdev.startswith("/dev/"): - if util.is_FreeBSD(): - ret = util.mount_cb(cdev, load_azure_ds_dir, - mtype="udf") + else: + for src in list_possible_azure_ds(self.seed_dir, ddir): + try: + if src.startswith("/dev/"): + if util.is_FreeBSD(): + ret = util.mount_cb(src, load_azure_ds_dir, + mtype="udf") + else: + ret = util.mount_cb(src, load_azure_ds_dir) + # save the device for ejection later + self.iso_dev = src + ovf_is_accessible = True else: - ret = util.mount_cb(cdev, load_azure_ds_dir) - else: - ret = load_azure_ds_dir(cdev) - - except NonAzureDataSource: - report_diagnostic_event( - "Did not find Azure data source in %s" % cdev, - logger_func=LOG.debug) - continue - except BrokenAzureDataSource as exc: - msg = 'BrokenAzureDataSource: %s' % exc - report_diagnostic_event(msg, logger_func=LOG.error) - raise sources.InvalidMetaDataException(msg) - except util.MountFailedError: - report_diagnostic_event( - '%s was not mountable' % cdev, logger_func=LOG.debug) - cdev = 'IMDS' - ovf_is_accessible = False - empty_md = {'local-hostname': ''} - empty_cfg = dict( - system_info=dict( - default_user=dict( - name='' + ret = load_azure_ds_dir(src) + metadata_source = src + break + except NonAzureDataSource: + report_diagnostic_event( + "Did not find Azure data source in %s" % src, + logger_func=LOG.debug) + continue + except util.MountFailedError: + report_diagnostic_event( + '%s was not mountable' % src, + logger_func=LOG.debug) + ovf_is_accessible = False + empty_md = {'local-hostname': ''} + empty_cfg = dict( + system_info=dict( + default_user=dict( + name='' + ) ) ) - ) - ret = (empty_md, '', empty_cfg, {}) - - report_diagnostic_event("Found provisioning metadata in %s" % cdev, - logger_func=LOG.debug) - - # save the iso device for ejection before reporting ready - if cdev.startswith("/dev"): - self.iso_dev = cdev - - perform_reprovision = reprovision or self._should_reprovision(ret) - perform_reprovision_after_nic_attach = ( - reprovision_after_nic_attach or - self._should_reprovision_after_nic_attach(ret)) - - if perform_reprovision or perform_reprovision_after_nic_attach: - if util.is_FreeBSD(): - msg = "Free BSD is not supported for PPS VMs" + ret = (empty_md, '', empty_cfg, {}) + metadata_source = 'IMDS' + continue + except BrokenAzureDataSource as exc: + msg = 'BrokenAzureDataSource: %s' % exc report_diagnostic_event(msg, logger_func=LOG.error) raise sources.InvalidMetaDataException(msg) - if perform_reprovision_after_nic_attach: - self._wait_for_all_nics_ready() - ret = self._reprovision() - imds_md = self.get_imds_data_with_api_fallback( - self.fallback_interface, - retries=10 - ) - if not imds_md and not ovf_is_accessible: - msg = 'No OVF or IMDS available' - report_diagnostic_event(msg) + report_diagnostic_event( + "Found provisioning metadata in %s" % metadata_source, + logger_func=LOG.debug) + + perform_reprovision = reprovision or self._should_reprovision(ret) + perform_reprovision_after_nic_attach = ( + reprovision_after_nic_attach or + self._should_reprovision_after_nic_attach(ret)) + + if perform_reprovision or perform_reprovision_after_nic_attach: + if util.is_FreeBSD(): + msg = "Free BSD is not supported for PPS VMs" + report_diagnostic_event(msg, logger_func=LOG.error) raise sources.InvalidMetaDataException(msg) - (md, userdata_raw, cfg, files) = ret - self.seed = cdev - crawled_data.update({ - 'cfg': cfg, - 'files': files, - 'metadata': util.mergemanydict( - [md, {'imds': imds_md}]), - 'userdata_raw': userdata_raw}) - imds_username = _username_from_imds(imds_md) - imds_hostname = _hostname_from_imds(imds_md) - imds_disable_password = _disable_password_from_imds(imds_md) - if imds_username: - LOG.debug('Username retrieved from IMDS: %s', imds_username) - cfg['system_info']['default_user']['name'] = imds_username - if imds_hostname: - LOG.debug('Hostname retrieved from IMDS: %s', imds_hostname) - crawled_data['metadata']['local-hostname'] = imds_hostname - if imds_disable_password: - LOG.debug( - 'Disable password retrieved from IMDS: %s', - imds_disable_password - ) - crawled_data['metadata']['disable_password'] = imds_disable_password # noqa: E501 - - # only use userdata from imds if OVF did not provide custom data - # userdata provided by IMDS is always base64 encoded - if not userdata_raw: - imds_userdata = _userdata_from_imds(imds_md) - if imds_userdata: - LOG.debug("Retrieved userdata from IMDS") - try: - crawled_data['userdata_raw'] = base64.b64decode( - ''.join(imds_userdata.split())) - except Exception: - report_diagnostic_event( - "Bad userdata in IMDS", - logger_func=LOG.warning) - found = cdev + if perform_reprovision_after_nic_attach: + self._wait_for_all_nics_ready() + ret = self._reprovision() - report_diagnostic_event( - 'found datasource in %s' % cdev, logger_func=LOG.debug) - break + imds_md = self.get_imds_data_with_api_fallback( + self.fallback_interface, + retries=10 + ) + if not imds_md and not ovf_is_accessible: + msg = 'No OVF or IMDS available' + report_diagnostic_event(msg) + raise sources.InvalidMetaDataException(msg) + (md, userdata_raw, cfg, files) = ret + self.seed = metadata_source + crawled_data.update({ + 'cfg': cfg, + 'files': files, + 'metadata': util.mergemanydict( + [md, {'imds': imds_md}]), + 'userdata_raw': userdata_raw}) + imds_username = _username_from_imds(imds_md) + imds_hostname = _hostname_from_imds(imds_md) + imds_disable_password = _disable_password_from_imds(imds_md) + if imds_username: + LOG.debug('Username retrieved from IMDS: %s', imds_username) + cfg['system_info']['default_user']['name'] = imds_username + if imds_hostname: + LOG.debug('Hostname retrieved from IMDS: %s', imds_hostname) + crawled_data['metadata']['local-hostname'] = imds_hostname + if imds_disable_password: + LOG.debug( + 'Disable password retrieved from IMDS: %s', + imds_disable_password + ) + crawled_data['metadata']['disable_password'] = imds_disable_password # noqa: E501 + + # only use userdata from imds if OVF did not provide custom data + # userdata provided by IMDS is always base64 encoded + if not userdata_raw: + imds_userdata = _userdata_from_imds(imds_md) + if imds_userdata: + LOG.debug("Retrieved userdata from IMDS") + try: + crawled_data['userdata_raw'] = base64.b64decode( + ''.join(imds_userdata.split())) + except Exception: + report_diagnostic_event( + "Bad userdata in IMDS", + logger_func=LOG.warning) - if not found: + if not metadata_source: msg = 'No Azure metadata found' report_diagnostic_event(msg, logger_func=LOG.error) raise sources.InvalidMetaDataException(msg) + else: + report_diagnostic_event( + 'found datasource in %s' % metadata_source, + logger_func=LOG.debug) - if found == ddir: + if metadata_source == ddir: report_diagnostic_event( "using files cached in %s" % ddir, logger_func=LOG.debug) @@ -2084,18 +2078,18 @@ def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE): @azure_ds_telemetry_reporter -def list_possible_azure_ds_devs(): - devlist = [] +def list_possible_azure_ds(seed, cache_dir): + yield seed + yield DEFAULT_PROVISIONING_ISO_DEV if util.is_FreeBSD(): cdrom_dev = "/dev/cd0" if _check_freebsd_cdrom(cdrom_dev): - return [cdrom_dev] + yield cdrom_dev else: for fstype in ("iso9660", "udf"): - devlist.extend(util.find_devs_with("TYPE=%s" % fstype)) - - devlist.sort(reverse=True) - return devlist + yield from util.find_devs_with("TYPE=%s" % fstype) + if cache_dir: + yield cache_dir @azure_ds_telemetry_reporter diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 54e06119..3bf8fdb2 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -635,15 +635,20 @@ scbus-1 on xpt0 bus 0 def _get_ds(self, data, agent_command=None, distro='ubuntu', apply_network=None, instance_id=None): - def dsdevs(): - return data.get('dsdevs', []) - def _wait_for_files(flist, _maxwait=None, _naplen=None): data['waited'] = flist return [] + def _load_possible_azure_ds(seed_dir, cache_dir): + yield seed_dir + yield dsaz.DEFAULT_PROVISIONING_ISO_DEV + yield from data.get('dsdevs', []) + if cache_dir: + yield cache_dir + + seed_dir = os.path.join(self.paths.seed_dir, "azure") if data.get('ovfcontent') is not None: - populate_dir(os.path.join(self.paths.seed_dir, "azure"), + populate_dir(seed_dir, {'ovf-env.xml': data['ovfcontent']}) dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d @@ -654,6 +659,8 @@ scbus-1 on xpt0 bus 0 self.m_report_failure_to_fabric = mock.MagicMock(autospec=True) self.m_ephemeral_dhcpv4 = mock.MagicMock() self.m_ephemeral_dhcpv4_with_reporting = mock.MagicMock() + self.m_list_possible_azure_ds = mock.MagicMock( + side_effect=_load_possible_azure_ds) if instance_id: self.instance_id = instance_id @@ -667,7 +674,8 @@ scbus-1 on xpt0 bus 0 return '7783-7084-3265-9085-8269-3286-77' self.apply_patches([ - (dsaz, 'list_possible_azure_ds_devs', dsdevs), + (dsaz, 'list_possible_azure_ds', + self.m_list_possible_azure_ds), (dsaz, 'perform_hostname_bounce', mock.MagicMock()), (dsaz, 'get_hostname', mock.MagicMock()), (dsaz, 'set_hostname', mock.MagicMock()), @@ -844,9 +852,14 @@ scbus-1 on xpt0 bus 0 """When a device path is used, present that in subplatform.""" data = {'sys_cfg': {}, 'dsdevs': ['/dev/cd0']} dsrc = self._get_ds(data) + # DSAzure will attempt to mount /dev/sr0 first, which should + # fail with mount error since the list of devices doesn't have + # /dev/sr0 with mock.patch(MOCKPATH + 'util.mount_cb') as m_mount_cb: - m_mount_cb.return_value = ( - {'local-hostname': 'me'}, 'ud', {'cfg': ''}, {}) + m_mount_cb.side_effect = [ + MountFailedError("fail"), + ({'local-hostname': 'me'}, 'ud', {'cfg': ''}, {}) + ] self.assertTrue(dsrc.get_data()) self.assertEqual(dsrc.userdata_raw, 'ud') self.assertEqual(dsrc.metadata['local-hostname'], 'me') @@ -1608,12 +1621,19 @@ scbus-1 on xpt0 bus 0 @mock.patch(MOCKPATH + 'util.is_FreeBSD') @mock.patch(MOCKPATH + '_check_freebsd_cdrom') - def test_list_possible_azure_ds_devs(self, m_check_fbsd_cdrom, - m_is_FreeBSD): + def test_list_possible_azure_ds(self, m_check_fbsd_cdrom, + m_is_FreeBSD): """On FreeBSD, possible devs should show /dev/cd0.""" m_is_FreeBSD.return_value = True m_check_fbsd_cdrom.return_value = True - self.assertEqual(dsaz.list_possible_azure_ds_devs(), ['/dev/cd0']) + possible_ds = [] + for src in dsaz.list_possible_azure_ds( + "seed_dir", "cache_dir"): + possible_ds.append(src) + self.assertEqual(possible_ds, ["seed_dir", + dsaz.DEFAULT_PROVISIONING_ISO_DEV, + "/dev/cd0", + "cache_dir"]) self.assertEqual( [mock.call("/dev/cd0")], m_check_fbsd_cdrom.call_args_list) @@ -1967,11 +1987,19 @@ class TestAzureBounce(CiTestCase): with_logs = True def mock_out_azure_moving_parts(self): + + def _load_possible_azure_ds(seed_dir, cache_dir): + yield seed_dir + yield dsaz.DEFAULT_PROVISIONING_ISO_DEV + if cache_dir: + yield cache_dir + self.patches.enter_context( mock.patch.object(dsaz.util, 'wait_for_files')) self.patches.enter_context( - mock.patch.object(dsaz, 'list_possible_azure_ds_devs', - mock.MagicMock(return_value=[]))) + mock.patch.object( + dsaz, 'list_possible_azure_ds', + mock.MagicMock(side_effect=_load_possible_azure_ds))) self.patches.enter_context( mock.patch.object(dsaz, 'get_metadata_from_fabric', mock.MagicMock(return_value={}))) -- cgit v1.2.3 From ec6afadbf0f0f77d5b58dccd70df77da89c2c91d Mon Sep 17 00:00:00 2001 From: Paride Legovini Date: Tue, 20 Jul 2021 16:58:21 +0200 Subject: Update pylint to v2.9.3 and fix the new issues it spots (#946) In CI run against pylint 2.9.3 and fix occurrences of: - W0237 (arguments-renamed) - W0402 (deprecated-module) The W0402 deprecated-module was about module `imp`: cloudinit/patcher.py:9: [W0402(deprecated-module), ] Uses of a deprecated module 'imp' The imp module is deprecated and replaced by importlib, which according to the documentation has no replacement for acquire_lock() and release_lock(), which are the only reason why `imp` is imported. Nothing about the code using this lock that actually requires it. Let's remove the locking code and the import altogether. Dropping the locking makes patcher.patch() an empty wrapper around _patch_logging(). Rename _patch_logging() to patch_logging() and call it directly instead. Drop patch(). --- cloudinit/cmd/main.py | 2 +- cloudinit/distros/__init__.py | 2 +- cloudinit/distros/alpine.py | 8 ++++---- cloudinit/distros/arch.py | 8 ++++---- cloudinit/distros/debian.py | 8 ++++---- cloudinit/distros/gentoo.py | 8 ++++---- cloudinit/distros/opensuse.py | 10 +++++----- cloudinit/distros/photon.py | 6 +++--- cloudinit/distros/rhel.py | 8 ++++---- cloudinit/patcher.py | 11 +---------- tests/unittests/test_distros/test_create_users.py | 2 +- tox.ini | 2 +- 12 files changed, 33 insertions(+), 42 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 21213a4a..1de1de99 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -19,7 +19,7 @@ import time import traceback from cloudinit import patcher -patcher.patch() # noqa +patcher.patch_logging() from cloudinit import log as logging from cloudinit import netinfo diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 7bdf2197..40c4f2ac 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -147,7 +147,7 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): return uses_systemd() @abc.abstractmethod - def package_command(self, cmd, args=None, pkgs=None): + def package_command(self, command, args=None, pkgs=None): raise NotImplementedError() @abc.abstractmethod diff --git a/cloudinit/distros/alpine.py b/cloudinit/distros/alpine.py index e4bed5a2..73b68baf 100644 --- a/cloudinit/distros/alpine.py +++ b/cloudinit/distros/alpine.py @@ -73,18 +73,18 @@ class Distro(distros.Distro): self.update_package_sources() self.package_command('add', pkgs=pkglist) - def _write_hostname(self, your_hostname, out_fn): + def _write_hostname(self, hostname, filename): conf = None try: # Try to update the previous one # so lets see if we can read it first. - conf = self._read_hostname_conf(out_fn) + conf = self._read_hostname_conf(filename) except IOError: pass if not conf: conf = HostnameConf('') - conf.set_hostname(your_hostname) - util.write_file(out_fn, str(conf), 0o644) + conf.set_hostname(hostname) + util.write_file(filename, str(conf), 0o644) def _read_system_hostname(self): sys_hostname = self._read_hostname(self.hostname_conf_fn) diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py index c9acb11f..3c5bbb38 100644 --- a/cloudinit/distros/arch.py +++ b/cloudinit/distros/arch.py @@ -101,18 +101,18 @@ class Distro(distros.Distro): util.logexc(LOG, "Running interface command %s failed", cmd) return False - def _write_hostname(self, your_hostname, out_fn): + def _write_hostname(self, hostname, filename): conf = None try: # Try to update the previous one # so lets see if we can read it first. - conf = self._read_hostname_conf(out_fn) + conf = self._read_hostname_conf(filename) except IOError: pass if not conf: conf = HostnameConf('') - conf.set_hostname(your_hostname) - util.write_file(out_fn, str(conf), omode="w", mode=0o644) + conf.set_hostname(hostname) + util.write_file(filename, str(conf), omode="w", mode=0o644) def _read_system_hostname(self): sys_hostname = self._read_hostname(self.hostname_conf_fn) diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 089e0c3e..f2b4dfc9 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -115,18 +115,18 @@ class Distro(distros.Distro): _maybe_remove_legacy_eth0() return super()._write_network_state(network_state) - def _write_hostname(self, your_hostname, out_fn): + def _write_hostname(self, hostname, filename): conf = None try: # Try to update the previous one # so lets see if we can read it first. - conf = self._read_hostname_conf(out_fn) + conf = self._read_hostname_conf(filename) except IOError: pass if not conf: conf = HostnameConf('') - conf.set_hostname(your_hostname) - util.write_file(out_fn, str(conf), 0o644) + conf.set_hostname(hostname) + util.write_file(filename, str(conf), 0o644) def _read_system_hostname(self): sys_hostname = self._read_hostname(self.hostname_conf_fn) diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py index 68c03e7f..1be76dc8 100644 --- a/cloudinit/distros/gentoo.py +++ b/cloudinit/distros/gentoo.py @@ -149,12 +149,12 @@ class Distro(distros.Distro): else: return distros.Distro._bring_up_interfaces(self, device_names) - def _write_hostname(self, your_hostname, out_fn): + def _write_hostname(self, hostname, filename): conf = None try: # Try to update the previous one # so lets see if we can read it first. - conf = self._read_hostname_conf(out_fn) + conf = self._read_hostname_conf(filename) except IOError: pass if not conf: @@ -163,8 +163,8 @@ class Distro(distros.Distro): # Many distro's format is the hostname by itself, and that is the # way HostnameConf works but gentoo expects it to be in # hostname="the-actual-hostname" - conf.set_hostname('hostname="%s"' % your_hostname) - util.write_file(out_fn, str(conf), 0o644) + conf.set_hostname('hostname="%s"' % hostname) + util.write_file(filename, str(conf), 0o644) def _read_system_hostname(self): sys_hostname = self._read_hostname(self.hostname_conf_fn) diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py index b4193ac2..2a7497cc 100644 --- a/cloudinit/distros/opensuse.py +++ b/cloudinit/distros/opensuse.py @@ -150,9 +150,9 @@ class Distro(distros.Distro): host_fn = self.hostname_conf_fn return (host_fn, self._read_hostname(host_fn)) - def _write_hostname(self, hostname, out_fn): - if self.uses_systemd() and out_fn.endswith('/previous-hostname'): - util.write_file(out_fn, hostname) + def _write_hostname(self, hostname, filename): + if self.uses_systemd() and filename.endswith('/previous-hostname'): + util.write_file(filename, hostname) elif self.uses_systemd(): subp.subp(['hostnamectl', 'set-hostname', str(hostname)]) else: @@ -160,13 +160,13 @@ class Distro(distros.Distro): try: # Try to update the previous one # so lets see if we can read it first. - conf = self._read_hostname_conf(out_fn) + conf = self._read_hostname_conf(filename) except IOError: pass if not conf: conf = HostnameConf('') conf.set_hostname(hostname) - util.write_file(out_fn, str(conf), 0o644) + util.write_file(filename, str(conf), 0o644) @property def preferred_ntp_clients(self): diff --git a/cloudinit/distros/photon.py b/cloudinit/distros/photon.py index 0ced7b5f..3ef5dd40 100644 --- a/cloudinit/distros/photon.py +++ b/cloudinit/distros/photon.py @@ -83,19 +83,19 @@ class Distro(distros.Distro): ret, _out, _err = self.exec_cmd(cmd) return ret - def _write_hostname(self, hostname, out_fn): + def _write_hostname(self, hostname, filename): conf = None try: # Try to update the previous one # Let's see if we can read it first. - conf = HostnameConf(util.load_file(out_fn)) + conf = HostnameConf(util.load_file(filename)) conf.parse() except IOError: pass if not conf: conf = HostnameConf('') conf.set_hostname(hostname) - util.write_file(out_fn, str(conf), mode=0o644) + util.write_file(filename, str(conf), mode=0o644) def _read_system_hostname(self): sys_hostname = self._read_hostname(self.hostname_conf_fn) diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index be5b3d24..c9ee2747 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -78,18 +78,18 @@ class Distro(distros.Distro): } rhel_util.update_sysconfig_file(out_fn, locale_cfg) - def _write_hostname(self, hostname, out_fn): + def _write_hostname(self, hostname, filename): # systemd will never update previous-hostname for us, so # we need to do it ourselves - if self.uses_systemd() and out_fn.endswith('/previous-hostname'): - util.write_file(out_fn, hostname) + if self.uses_systemd() and filename.endswith('/previous-hostname'): + util.write_file(filename, hostname) elif self.uses_systemd(): subp.subp(['hostnamectl', 'set-hostname', str(hostname)]) else: host_cfg = { 'HOSTNAME': hostname, } - rhel_util.update_sysconfig_file(out_fn, host_cfg) + rhel_util.update_sysconfig_file(filename, host_cfg) def _read_system_hostname(self): if self.uses_systemd(): diff --git a/cloudinit/patcher.py b/cloudinit/patcher.py index 2df9441a..186d8ad8 100644 --- a/cloudinit/patcher.py +++ b/cloudinit/patcher.py @@ -6,7 +6,6 @@ # # This file is part of cloud-init. See LICENSE file for license information. -import imp import logging import sys @@ -20,7 +19,7 @@ class QuietStreamHandler(logging.StreamHandler): pass -def _patch_logging(): +def patch_logging(): # Replace 'handleError' with one that will be more # tolerant of errors in that it can avoid # re-notifying on exceptions and when errors @@ -37,12 +36,4 @@ def _patch_logging(): pass setattr(logging.Handler, 'handleError', handleError) - -def patch(): - imp.acquire_lock() - try: - _patch_logging() - finally: - imp.release_lock() - # vi: ts=4 expandtab diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/test_distros/test_create_users.py index 94ab052d..021866b7 100644 --- a/tests/unittests/test_distros/test_create_users.py +++ b/tests/unittests/test_distros/test_create_users.py @@ -23,7 +23,7 @@ class MyBaseDistro(distros.Distro): def _write_network(self, settings): raise NotImplementedError() - def package_command(self, cmd, args=None, pkgs=None): + def package_command(self, command, args=None, pkgs=None): raise NotImplementedError() def update_package_sources(self): diff --git a/tox.ini b/tox.ini index f21e1186..27c16ef3 100644 --- a/tox.ini +++ b/tox.ini @@ -23,7 +23,7 @@ setenv = basepython = python3 deps = # requirements - pylint==2.6.0 + pylint==2.9.3 # test-requirements because unit tests are now present in cloudinit tree -r{toxinidir}/test-requirements.txt -r{toxinidir}/integration-requirements.txt -- cgit v1.2.3 From f0ab1e64852d50f4fe0de84e0bca0ee8bb516a9f Mon Sep 17 00:00:00 2001 From: PengpengSun <40026211+PengpengSun@users.noreply.github.com> Date: Wed, 21 Jul 2021 00:49:37 +0800 Subject: VMware: add network-config support in ovf-env.xml (#947) Details: 1. Support guest set network config through guestinfo.ovfEnv using OVF 2. 'network-config' Property is optional 3. 'network-config' Property's value has to be base64 encoded Added unittests and updated ovf-env.xml example --- cloudinit/sources/DataSourceOVF.py | 14 ++++- doc/sources/ovf/example/ovf-env.xml | 8 +++ tests/unittests/test_datasource/test_ovf.py | 97 +++++++++++++++++++++++++++++ 3 files changed, 117 insertions(+), 2 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 9e83dccc..e909f058 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -358,8 +358,11 @@ class DataSourceOVF(sources.DataSource): if contents: break if contents: - (md, ud, cfg) = read_ovf_environment(contents) + read_network = ('com.vmware.guestinfo' == name) + (md, ud, cfg) = read_ovf_environment(contents, read_network) self.environment = contents + if 'network-config' in md and md['network-config']: + self._network_config = md['network-config'] found.append(name) # There was no OVF transports found @@ -507,13 +510,14 @@ def read_vmware_imc(config): # This will return a dict with some content # meta-data, user-data, some config -def read_ovf_environment(contents): +def read_ovf_environment(contents, read_network=False): props = get_properties(contents) md = {} cfg = {} ud = None cfg_props = ['password'] md_props = ['seedfrom', 'local-hostname', 'public-keys', 'instance-id'] + network_props = ['network-config'] for (prop, val) in props.items(): if prop == 'hostname': prop = "local-hostname" @@ -521,6 +525,12 @@ def read_ovf_environment(contents): md[prop] = val elif prop in cfg_props: cfg[prop] = val + elif prop in network_props and read_network: + try: + network_config = base64.b64decode(val.encode()) + md[prop] = safeload_yaml_or_dict(network_config).get('network') + except Exception: + LOG.debug("Ignore network-config in wrong format") elif prop == "user-data": try: ud = base64.b64decode(val.encode()) diff --git a/doc/sources/ovf/example/ovf-env.xml b/doc/sources/ovf/example/ovf-env.xml index 13e8f104..4ef4ee63 100644 --- a/doc/sources/ovf/example/ovf-env.xml +++ b/doc/sources/ovf/example/ovf-env.xml @@ -41,6 +41,14 @@ --> + + diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index e2718077..9f52b504 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -83,6 +83,103 @@ class TestReadOvfEnv(CiTestCase): self.assertEqual({'password': "passw0rd"}, cfg) self.assertIsNone(ud) + def test_with_b64_network_config_enable_read_network(self): + network_config = dedent("""\ + network: + version: 2 + ethernets: + nics: + nameservers: + addresses: + - 127.0.0.53 + search: + - eng.vmware.com + - vmware.com + match: + name: eth* + gateway4: 10.10.10.253 + dhcp4: false + addresses: + - 10.10.10.1/24 + """) + network_config_b64 = base64.b64encode(network_config.encode()).decode() + props = {"network-config": network_config_b64, + "password": "passw0rd", + "instance-id": "inst-001"} + env = fill_properties(props) + md, ud, cfg = dsovf.read_ovf_environment(env, True) + self.assertEqual("inst-001", md["instance-id"]) + self.assertEqual({'password': "passw0rd"}, cfg) + self.assertEqual( + {'version': 2, 'ethernets': + {'nics': + {'nameservers': + {'addresses': ['127.0.0.53'], + 'search': ['eng.vmware.com', 'vmware.com']}, + 'match': {'name': 'eth*'}, + 'gateway4': '10.10.10.253', + 'dhcp4': False, + 'addresses': ['10.10.10.1/24']}}}, + md["network-config"]) + self.assertIsNone(ud) + + def test_with_non_b64_network_config_enable_read_network(self): + network_config = dedent("""\ + network: + version: 2 + ethernets: + nics: + nameservers: + addresses: + - 127.0.0.53 + search: + - eng.vmware.com + - vmware.com + match: + name: eth* + gateway4: 10.10.10.253 + dhcp4: false + addresses: + - 10.10.10.1/24 + """) + props = {"network-config": network_config, + "password": "passw0rd", + "instance-id": "inst-001"} + env = fill_properties(props) + md, ud, cfg = dsovf.read_ovf_environment(env, True) + self.assertEqual({"instance-id": "inst-001"}, md) + self.assertEqual({'password': "passw0rd"}, cfg) + self.assertIsNone(ud) + + def test_with_b64_network_config_disable_read_network(self): + network_config = dedent("""\ + network: + version: 2 + ethernets: + nics: + nameservers: + addresses: + - 127.0.0.53 + search: + - eng.vmware.com + - vmware.com + match: + name: eth* + gateway4: 10.10.10.253 + dhcp4: false + addresses: + - 10.10.10.1/24 + """) + network_config_b64 = base64.b64encode(network_config.encode()).decode() + props = {"network-config": network_config_b64, + "password": "passw0rd", + "instance-id": "inst-001"} + env = fill_properties(props) + md, ud, cfg = dsovf.read_ovf_environment(env) + self.assertEqual({"instance-id": "inst-001"}, md) + self.assertEqual({'password': "passw0rd"}, cfg) + self.assertIsNone(ud) + class TestMarkerFiles(CiTestCase): -- cgit v1.2.3 From 4257e30ac4b8730af35c078f2df0a2234dd19ffa Mon Sep 17 00:00:00 2001 From: eb3095 <45504889+eb3095@users.noreply.github.com> Date: Wed, 21 Jul 2021 16:48:05 -0400 Subject: Add VZLinux support (#951) Virtuozzo Linux is a distro based off of CentOS 8, similar to Alma Linux and Rocky Linux. --- README.md | 2 +- cloudinit/config/cc_ntp.py | 2 +- cloudinit/config/cc_yum_add_repo.py | 6 ++++-- cloudinit/distros/__init__.py | 2 +- cloudinit/distros/virtuozzo.py | 9 +++++++++ cloudinit/net/sysconfig.py | 3 ++- cloudinit/tests/test_util.py | 32 ++++++++++++++++++++++++++++++++ cloudinit/util.py | 17 ++++++++++++++++- config/cloud.cfg.tmpl | 6 +++--- systemd/cloud-init-generator.tmpl | 2 +- systemd/cloud-init.service.tmpl | 2 +- tests/unittests/test_cli.py | 2 +- tools/render-cloudcfg | 2 +- 13 files changed, 73 insertions(+), 14 deletions(-) create mode 100644 cloudinit/distros/virtuozzo.py (limited to 'cloudinit') diff --git a/README.md b/README.md index 462e3204..832d8b43 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ get in contact with that distribution and send them our way! | Supported OSes | Supported Public Clouds | Supported Private Clouds | | --- | --- | --- | -| Alpine Linux
ArchLinux
Debian
DragonFlyBSD
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS/AlmaLinux/Rocky/PhotonOS
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
DigitalOcean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| +| Alpine Linux
ArchLinux
Debian
DragonFlyBSD
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
DigitalOcean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| ## To start developing cloud-init diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index acf3251d..d227efb9 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -25,7 +25,7 @@ frequency = PER_INSTANCE NTP_CONF = '/etc/ntp.conf' NR_POOL_SERVERS = 4 distros = ['almalinux', 'alpine', 'centos', 'debian', 'fedora', 'opensuse', - 'photon', 'rhel', 'rocky', 'sles', 'ubuntu'] + 'photon', 'rhel', 'rocky', 'sles', 'ubuntu', 'virtuozzo'] NTP_CLIENT_CONFIG = { 'chrony': { diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 67f09686..6e6133d1 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -18,7 +18,8 @@ entry, the config entry will be skipped. **Module frequency:** per always -**Supported distros:** almalinux, centos, fedora, photon, rhel, rocky +**Supported distros:** almalinux, centos, fedora, photon, rhel, rocky, + virtuozzo **Config keys**:: @@ -36,7 +37,8 @@ from configparser import ConfigParser from cloudinit import util -distros = ['almalinux', 'centos', 'fedora', 'photon', 'rhel', 'rocky'] +distros = ['almalinux', 'centos', 'fedora', 'photon', 'rhel', 'rocky', + 'virtuozzo'] def _canonicalize_id(repo_id): diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 40c4f2ac..7b813167 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -50,7 +50,7 @@ OSFAMILIES = { 'freebsd': ['freebsd'], 'gentoo': ['gentoo'], 'redhat': ['almalinux', 'amazon', 'centos', 'fedora', 'photon', 'rhel', - 'rocky'], + 'rocky', 'virtuozzo'], 'suse': ['opensuse', 'sles'], } diff --git a/cloudinit/distros/virtuozzo.py b/cloudinit/distros/virtuozzo.py new file mode 100644 index 00000000..edb3165d --- /dev/null +++ b/cloudinit/distros/virtuozzo.py @@ -0,0 +1,9 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.distros import rhel + + +class Distro(rhel.Distro): + pass + +# vi: ts=4 expandtab diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 8031cd3a..49f52e9d 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -18,7 +18,8 @@ from .network_state import ( is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6, IPV6_DYNAMIC_TYPES) LOG = logging.getLogger(__name__) -KNOWN_DISTROS = ['almalinux', 'centos', 'fedora', 'rhel', 'rocky', 'suse'] +KNOWN_DISTROS = ['almalinux', 'centos', 'fedora', 'rhel', 'rocky', 'suse', + 'virtuozzo'] NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf" diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index a1ccb1dc..bd7720d1 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -140,6 +140,20 @@ OS_RELEASE_ROCKY_8 = dedent("""\ ROCKY_SUPPORT_PRODUCT_VERSION="8" """) +OS_RELEASE_VIRTUOZZO_8 = dedent("""\ + NAME="Virtuozzo Linux" + VERSION="8" + ID="virtuozzo" + ID_LIKE="rhel fedora" + VERSION_ID="8" + PLATFORM_ID="platform:el8" + PRETTY_NAME="Virtuozzo Linux" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:virtuozzoproject:vzlinux:8" + HOME_URL="https://www.vzlinux.org" + BUG_REPORT_URL="https://bugs.openvz.org" +""") + REDHAT_RELEASE_CENTOS_6 = "CentOS release 6.10 (Final)" REDHAT_RELEASE_CENTOS_7 = "CentOS Linux release 7.5.1804 (Core)" REDHAT_RELEASE_REDHAT_6 = ( @@ -150,6 +164,8 @@ REDHAT_RELEASE_ALMALINUX_8 = ( "AlmaLinux release 8.3 (Purple Manul)") REDHAT_RELEASE_ROCKY_8 = ( "Rocky Linux release 8.3 (Green Obsidian)") +REDHAT_RELEASE_VIRTUOZZO_8 = ( + "Virtuozzo Linux release 8") OS_RELEASE_DEBIAN = dedent("""\ PRETTY_NAME="Debian GNU/Linux 9 (stretch)" @@ -581,6 +597,22 @@ class TestGetLinuxDistro(CiTestCase): dist = util.get_linux_distro() self.assertEqual(('rocky', '8.3', 'Green Obsidian'), dist) + @mock.patch('cloudinit.util.load_file') + def test_get_linux_virtuozzo8_rhrelease(self, m_os_release, m_path_exists): + """Verify virtuozzo linux 8 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_VIRTUOZZO_8 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('virtuozzo', '8', 'Virtuozzo Linux'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_virtuozzo8_osrelease(self, m_os_release, m_path_exists): + """Verify virtuozzo linux 8 read from os-release.""" + m_os_release.return_value = OS_RELEASE_VIRTUOZZO_8 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('virtuozzo', '8', 'Virtuozzo Linux'), dist) + @mock.patch('cloudinit.util.load_file') def test_get_linux_debian(self, m_os_release, m_path_exists): """Verify we get the correct name and release name on Debian.""" diff --git a/cloudinit/util.py b/cloudinit/util.py index 7995c6c8..3bed1aed 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -453,9 +453,19 @@ def _parse_redhat_release(release_file=None): redhat_regex = ( r'(?P.+) release (?P[\d\.]+) ' r'\((?P[^)]+)\)') + + # Virtuozzo deviates here + if "Virtuozzo" in redhat_release: + redhat_regex = r'(?P.+) release (?P[\d\.]+)' + match = re.match(redhat_regex, redhat_release) if match: group = match.groupdict() + + # Virtuozzo has no codename in this file + if "Virtuozzo" in group['name']: + group['codename'] = group['name'] + group['name'] = group['name'].lower().partition(' linux')[0] if group['name'] == 'red hat enterprise': group['name'] = 'redhat' @@ -470,9 +480,11 @@ def get_linux_distro(): distro_version = '' flavor = '' os_release = {} + os_release_rhel = False if os.path.exists('/etc/os-release'): os_release = load_shell_content(load_file('/etc/os-release')) if not os_release: + os_release_rhel = True os_release = _parse_redhat_release() if os_release: distro_name = os_release.get('ID', '') @@ -485,6 +497,9 @@ def get_linux_distro(): flavor = platform.machine() elif distro_name == 'photon': flavor = os_release.get('PRETTY_NAME', '') + elif distro_name == 'virtuozzo' and not os_release_rhel: + # Only use this if the redhat file is not parsed + flavor = os_release.get('PRETTY_NAME', '') else: flavor = os_release.get('VERSION_CODENAME', '') if not flavor: @@ -533,7 +548,7 @@ def system_info(): linux_dist = info['dist'][0].lower() if linux_dist in ( 'almalinux', 'alpine', 'arch', 'centos', 'debian', 'fedora', - 'photon', 'rhel', 'rocky', 'suse'): + 'photon', 'rhel', 'rocky', 'suse', 'virtuozzo'): var = linux_dist elif linux_dist in ('ubuntu', 'linuxmint', 'mint'): var = 'ubuntu' diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index cb2a625b..f918d919 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -32,7 +32,7 @@ disable_root: true {% endif %} {% if variant in ["almalinux", "alpine", "amazon", "centos", "fedora", - "rhel", "rocky"] %} + "rhel", "rocky", "virtuozzo"] %} mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2'] {% if variant == "amazon" %} resize_rootfs: noblock @@ -174,7 +174,7 @@ system_info: # This will affect which distro class gets used {% if variant in ["almalinux", "alpine", "amazon", "arch", "centos", "debian", "fedora", "freebsd", "netbsd", "openbsd", "photon", "rhel", - "rocky", "suse", "ubuntu"] %} + "rocky", "suse", "ubuntu", "virtuozzo"] %} distro: {{ variant }} {% elif variant in ["dragonfly"] %} distro: dragonflybsd @@ -228,7 +228,7 @@ system_info: security: http://ports.ubuntu.com/ubuntu-ports ssh_svcname: ssh {% elif variant in ["almalinux", "alpine", "amazon", "arch", "centos", "fedora", - "rhel", "rocky", "suse"] %} + "rhel", "rocky", "suse", "virtuozzo"] %} # Default user name + that default users groups (if added/used) default_user: {% if variant == "amazon" %} diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl index 0713db16..1d6af5ae 100644 --- a/systemd/cloud-init-generator.tmpl +++ b/systemd/cloud-init-generator.tmpl @@ -83,7 +83,7 @@ default() { check_for_datasource() { local ds_rc="" -{% if variant in ["almalinux", "rhel", "fedora", "centos", "rocky"] %} +{% if variant in ["almalinux", "rhel", "fedora", "centos", "rocky", "virtuozzo"] %} local dsidentify="/usr/libexec/cloud-init/ds-identify" {% else %} local dsidentify="/usr/lib/cloud-init/ds-identify" diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl index c773e411..cab3ec51 100644 --- a/systemd/cloud-init.service.tmpl +++ b/systemd/cloud-init.service.tmpl @@ -12,7 +12,7 @@ After=systemd-networkd-wait-online.service {% if variant in ["ubuntu", "unknown", "debian"] %} After=networking.service {% endif %} -{% if variant in ["almalinux", "centos", "fedora", "rhel", "rocky"] %} +{% if variant in ["almalinux", "centos", "fedora", "rhel", "rocky", "virtuozzo"] %} After=network.service After=NetworkManager.service {% endif %} diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index fdb4026c..90d8f7b9 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -225,7 +225,7 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): expected_doc_sections = [ '**Supported distros:** all', ('**Supported distros:** almalinux, alpine, centos, debian, ' - 'fedora, opensuse, photon, rhel, rocky, sles, ubuntu'), + 'fedora, opensuse, photon, rhel, rocky, sles, ubuntu, virtuozzo'), '**Config schema**:\n **resize_rootfs:** (true/false/noblock)', '**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n' ] diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg index 7e667de4..227bd8ab 100755 --- a/tools/render-cloudcfg +++ b/tools/render-cloudcfg @@ -6,7 +6,7 @@ import sys VARIANTS = ["almalinux", "alpine", "amazon", "arch", "centos", "debian", "fedora", "freebsd", "netbsd", "openbsd", "photon", "rhel", - "suse","rocky", "ubuntu", "unknown"] + "suse","rocky", "ubuntu", "unknown", "virtuozzo"] if "avoid-pep8-E402-import-not-top-of-file": -- cgit v1.2.3 From 6e7066ea2b06940c4931f0258c7982b09966582f Mon Sep 17 00:00:00 2001 From: sshedi <53473811+sshedi@users.noreply.github.com> Date: Fri, 23 Jul 2021 21:10:41 +0530 Subject: Add ability to manage fallback network config on PhotonOS (#941) Currently cloud-init generates fallback network config on various scenarios. For example: 1. When no DS found 2. There is no 'network' info given in DS metadata. 3. If a DS gives a network config once and upon reboot if DS doesn't give any network info, previously set network data will be overridden. A newly introduced key in cloud.cfg.tmpl can be used to control this behavior on PhotonOS. Also, if OS comes with a set of default network files(configs), like in PhotonOS, cloud-init should not overwrite them by default. This change also includes some nitpicking changes of reorganizing few config variables. Signed-off-by: Shreenidhi Shedi --- cloudinit/distros/photon.py | 15 +++++++++ config/cloud.cfg.tmpl | 10 ++++-- doc/rtd/topics/availability.rst | 1 + doc/rtd/topics/network-config.rst | 7 ++++ tests/unittests/test_distros/test_photon.py | 51 +++++++++++++++++++++++++++++ 5 files changed, 82 insertions(+), 2 deletions(-) create mode 100644 tests/unittests/test_distros/test_photon.py (limited to 'cloudinit') diff --git a/cloudinit/distros/photon.py b/cloudinit/distros/photon.py index 3ef5dd40..61e270c0 100644 --- a/cloudinit/distros/photon.py +++ b/cloudinit/distros/photon.py @@ -5,6 +5,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. +from cloudinit import net from cloudinit import util from cloudinit import subp from cloudinit import distros @@ -54,6 +55,20 @@ class Distro(distros.Distro): util.logexc(LOG, 'Command %s failed', cmd) return False, None, None + def generate_fallback_config(self): + key = 'disable_fallback_netcfg' + disable_fallback_netcfg = self._cfg.get(key, True) + LOG.debug('%s value is: %s', key, disable_fallback_netcfg) + + if not disable_fallback_netcfg: + return net.generate_fallback_config() + + LOG.info( + 'Skipping generate_fallback_config. Rely on PhotonOS default ' + 'network config' + ) + return None + def apply_locale(self, locale, out_fn=None): # This has a dependancy on glibc-i18n, user need to manually install it # and enable the option in cloud.cfg diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index f918d919..2314d893 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -18,9 +18,10 @@ users: - default {% endif %} -# VMware guest customization. {% if variant in ["photon"] %} +# VMware guest customization. disable_vmware_customization: true +manage_etc_hosts: false {% endif %} # If this is set, 'root' will not be able to ssh in and they @@ -306,10 +307,15 @@ system_info: paths: cloud_dir: /var/lib/cloud/ templates_dir: /etc/cloud/templates/ + network: + renderers: ['networkd'] ssh_svcname: sshd -#manage_etc_hosts: true + # If set to true, cloud-init will not use fallback network config. + # In Photon, we have default network settings, hence if network settings are + # not explicitly given in metadata, don't use fallback network config. + disable_fallback_netcfg: true {% endif %} {% if variant in ["freebsd", "netbsd", "openbsd"] %} network: diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst index a45a49d6..b84b6076 100644 --- a/doc/rtd/topics/availability.rst +++ b/doc/rtd/topics/availability.rst @@ -26,6 +26,7 @@ OpenBSD and DragonFlyBSD: - Gentoo Linux - NetBSD - OpenBSD +- Photon OS - RHEL/CentOS - SLES/openSUSE - Ubuntu diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst index 5f7a74f8..8eb7a31b 100644 --- a/doc/rtd/topics/network-config.rst +++ b/doc/rtd/topics/network-config.rst @@ -104,6 +104,13 @@ interface given the information it has available. Finally after selecting the "right" interface, a configuration is generated and applied to the system. +.. note:: + + PhotonOS disables fallback networking configuration by default leaving + network unrendered when no other network config is provided. + If fallback config is still desired on PhotonOS, it can be enabled by + providing `disable_fallback_netcfg: false` in + `/etc/cloud/cloud.cfg:sys_config` settings. Network Configuration Sources ============================= diff --git a/tests/unittests/test_distros/test_photon.py b/tests/unittests/test_distros/test_photon.py new file mode 100644 index 00000000..775f37ac --- /dev/null +++ b/tests/unittests/test_distros/test_photon.py @@ -0,0 +1,51 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from . import _get_distro +from cloudinit import util +from cloudinit.tests.helpers import mock +from cloudinit.tests.helpers import CiTestCase + +SYSTEM_INFO = { + 'paths': { + 'cloud_dir': '/var/lib/cloud/', + 'templates_dir': '/etc/cloud/templates/', + }, + 'network': {'renderers': 'networkd'}, +} + + +class TestPhoton(CiTestCase): + with_logs = True + distro = _get_distro('photon', SYSTEM_INFO) + expected_log_line = 'Rely on PhotonOS default network config' + + def test_network_renderer(self): + self.assertEqual(self.distro._cfg['network']['renderers'], 'networkd') + + def test_get_distro(self): + self.assertEqual(self.distro.osfamily, 'photon') + + def test_write_hostname(self): + hostname = 'myhostname' + hostfile = self.tmp_path('hostfile') + self.distro._write_hostname(hostname, hostfile) + self.assertEqual(hostname + '\n', util.load_file(hostfile)) + + @mock.patch('cloudinit.net.generate_fallback_config') + def test_fallback_netcfg(self, m_fallback_cfg): + + key = 'disable_fallback_netcfg' + # Don't use fallback if no setting given + self.logs.truncate(0) + assert(self.distro.generate_fallback_config() is None) + self.assertIn(self.expected_log_line, self.logs.getvalue()) + + self.logs.truncate(0) + self.distro._cfg[key] = True + assert(self.distro.generate_fallback_config() is None) + self.assertIn(self.expected_log_line, self.logs.getvalue()) + + self.logs.truncate(0) + self.distro._cfg[key] = False + assert(self.distro.generate_fallback_config() is not None) + self.assertNotIn(self.expected_log_line, self.logs.getvalue()) -- cgit v1.2.3 From 758acf976f2cb67a85411467fa5fca2ea17a2283 Mon Sep 17 00:00:00 2001 From: Mal Graty Date: Tue, 3 Aug 2021 17:08:26 +0100 Subject: Implementing device_aliases as described in docs (#945) Implement missing device_aliases feature The device_aliases key has been documented as part of disk_setup for years, however the feature was never implemented. This implements the feature as documented allowing usercfg (rather than dsconfig) to create a mapping of device names. This is not to be confused with disk_aliases, a very similar map but existing solely for use by datasources. LP: #1867532 --- cloudinit/config/cc_disk_setup.py | 13 +- cloudinit/config/cc_mounts.py | 17 +- tests/integration_tests/bugs/test_lp1920939.py | 140 --------------- tests/integration_tests/modules/test_disk_setup.py | 192 +++++++++++++++++++++ .../unittests/test_handler/test_handler_mounts.py | 9 + tools/.github-cla-signers | 1 + 6 files changed, 225 insertions(+), 147 deletions(-) delete mode 100644 tests/integration_tests/bugs/test_lp1920939.py create mode 100644 tests/integration_tests/modules/test_disk_setup.py (limited to 'cloudinit') diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index 22af3813..3ec49ca5 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -125,9 +125,15 @@ def handle(_name, cfg, cloud, log, _args): See doc/examples/cloud-config-disk-setup.txt for documentation on the format. """ + device_aliases = cfg.get("device_aliases", {}) + + def alias_to_device(cand): + name = device_aliases.get(cand) + return cloud.device_name_to_device(name or cand) or name + disk_setup = cfg.get("disk_setup") if isinstance(disk_setup, dict): - update_disk_setup_devices(disk_setup, cloud.device_name_to_device) + update_disk_setup_devices(disk_setup, alias_to_device) log.debug("Partitioning disks: %s", str(disk_setup)) for disk, definition in disk_setup.items(): if not isinstance(definition, dict): @@ -145,7 +151,7 @@ def handle(_name, cfg, cloud, log, _args): fs_setup = cfg.get("fs_setup") if isinstance(fs_setup, list): log.debug("setting up filesystems: %s", str(fs_setup)) - update_fs_setup_devices(fs_setup, cloud.device_name_to_device) + update_fs_setup_devices(fs_setup, alias_to_device) for definition in fs_setup: if not isinstance(definition, dict): log.warning("Invalid file system definition: %s" % definition) @@ -174,7 +180,8 @@ def update_disk_setup_devices(disk_setup, tformer): del disk_setup[transformed] disk_setup[transformed] = disk_setup[origname] - disk_setup[transformed]['_origname'] = origname + if isinstance(disk_setup[transformed], dict): + disk_setup[transformed]['_origname'] = origname del disk_setup[origname] LOG.debug("updated disk_setup device entry '%s' to '%s'", origname, transformed) diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index c22d1698..eeb008d2 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -123,7 +123,7 @@ def _is_block_device(device_path, partition_path=None): return os.path.exists(sys_path) -def sanitize_devname(startname, transformer, log): +def sanitize_devname(startname, transformer, log, aliases=None): log.debug("Attempting to determine the real name of %s", startname) # workaround, allow user to specify 'ephemeral' @@ -137,9 +137,14 @@ def sanitize_devname(startname, transformer, log): return startname device_path, partition_number = util.expand_dotted_devname(devname) + orig = device_path + + if aliases: + device_path = aliases.get(device_path, device_path) + if orig != device_path: + log.debug("Mapped device alias %s to %s", orig, device_path) if is_meta_device_name(device_path): - orig = device_path device_path = transformer(device_path) if not device_path: return None @@ -394,6 +399,8 @@ def handle(_name, cfg, cloud, log, _args): fstab_devs[toks[0]] = line fstab_lines.append(line) + device_aliases = cfg.get("device_aliases", {}) + for i in range(len(cfgmnt)): # skip something that wasn't a list if not isinstance(cfgmnt[i], list): @@ -402,7 +409,8 @@ def handle(_name, cfg, cloud, log, _args): continue start = str(cfgmnt[i][0]) - sanitized = sanitize_devname(start, cloud.device_name_to_device, log) + sanitized = sanitize_devname(start, cloud.device_name_to_device, log, + aliases=device_aliases) if sanitized != start: log.debug("changed %s => %s" % (start, sanitized)) @@ -444,7 +452,8 @@ def handle(_name, cfg, cloud, log, _args): # entry has the same device name for defmnt in defmnts: start = defmnt[0] - sanitized = sanitize_devname(start, cloud.device_name_to_device, log) + sanitized = sanitize_devname(start, cloud.device_name_to_device, log, + aliases=device_aliases) if sanitized != start: log.debug("changed default device %s => %s" % (start, sanitized)) diff --git a/tests/integration_tests/bugs/test_lp1920939.py b/tests/integration_tests/bugs/test_lp1920939.py deleted file mode 100644 index 408792a6..00000000 --- a/tests/integration_tests/bugs/test_lp1920939.py +++ /dev/null @@ -1,140 +0,0 @@ -""" -Test that disk setup can run successfully on a mounted partition when -partprobe is being used. - -lp-1920939 -""" -import json -import os -import pytest -from uuid import uuid4 -from pycloudlib.lxd.instance import LXDInstance - -from cloudinit.subp import subp -from tests.integration_tests.instances import IntegrationInstance - -DISK_PATH = '/tmp/test_disk_setup_{}'.format(uuid4()) - - -def setup_and_mount_lxd_disk(instance: LXDInstance): - subp('lxc config device add {} test-disk-setup-disk disk source={}'.format( - instance.name, DISK_PATH).split()) - - -@pytest.yield_fixture -def create_disk(): - # 640k should be enough for anybody - subp('dd if=/dev/zero of={} bs=1k count=640'.format(DISK_PATH).split()) - yield - os.remove(DISK_PATH) - - -USERDATA = """\ -#cloud-config -disk_setup: - /dev/sdb: - table_type: mbr - layout: [50, 50] - overwrite: True -fs_setup: - - label: test - device: /dev/sdb1 - filesystem: ext4 - - label: test2 - device: /dev/sdb2 - filesystem: ext4 -mounts: -- ["/dev/sdb1", "/mnt1"] -- ["/dev/sdb2", "/mnt2"] -""" - -UPDATED_USERDATA = """\ -#cloud-config -disk_setup: - /dev/sdb: - table_type: mbr - layout: [100] - overwrite: True -fs_setup: - - label: test3 - device: /dev/sdb1 - filesystem: ext4 -mounts: -- ["/dev/sdb1", "/mnt3"] -""" - - -def _verify_first_disk_setup(client, log): - assert 'Traceback' not in log - assert 'WARN' not in log - lsblk = json.loads(client.execute('lsblk --json')) - sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0] - assert len(sdb['children']) == 2 - assert sdb['children'][0]['name'] == 'sdb1' - assert sdb['children'][0]['mountpoint'] == '/mnt1' - assert sdb['children'][1]['name'] == 'sdb2' - assert sdb['children'][1]['mountpoint'] == '/mnt2' - - -@pytest.mark.user_data(USERDATA) -@pytest.mark.lxd_setup.with_args(setup_and_mount_lxd_disk) -@pytest.mark.ubuntu -@pytest.mark.lxd_vm -# Not bionic or xenial because the LXD agent gets in the way of us -# changing the userdata -@pytest.mark.not_bionic -@pytest.mark.not_xenial -def test_disk_setup_when_mounted(create_disk, client: IntegrationInstance): - """Test lp-1920939. - - We insert an extra disk into our VM, format it to have two partitions, - modify our cloud config to mount devices before disk setup, and modify - our userdata to setup a single partition on the disk. - - This allows cloud-init to attempt disk setup on a mounted partition. - When blockdev is in use, it will fail with - "blockdev: ioctl error on BLKRRPART: Device or resource busy" along - with a warning and a traceback. When partprobe is in use, everything - should work successfully. - """ - log = client.read_from_file('/var/log/cloud-init.log') - _verify_first_disk_setup(client, log) - - # Update our userdata and cloud.cfg to mount then perform new disk setup - client.write_to_file( - '/var/lib/cloud/seed/nocloud-net/user-data', - UPDATED_USERDATA - ) - client.execute("sed -i 's/write-files/write-files\\n - mounts/' " - "/etc/cloud/cloud.cfg") - - client.execute('cloud-init clean --logs') - client.restart() - - # Assert new setup works as expected - assert 'Traceback' not in log - assert 'WARN' not in log - - lsblk = json.loads(client.execute('lsblk --json')) - sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0] - assert len(sdb['children']) == 1 - assert sdb['children'][0]['name'] == 'sdb1' - assert sdb['children'][0]['mountpoint'] == '/mnt3' - - -@pytest.mark.user_data(USERDATA) -@pytest.mark.lxd_setup.with_args(setup_and_mount_lxd_disk) -@pytest.mark.ubuntu -@pytest.mark.lxd_vm -def test_disk_setup_no_partprobe(create_disk, client: IntegrationInstance): - """Ensure disk setup still works as expected without partprobe.""" - # We can't do this part in a bootcmd because the path has already - # been found by the time we get to the bootcmd - client.execute('rm $(which partprobe)') - client.execute('cloud-init clean --logs') - client.restart() - - log = client.read_from_file('/var/log/cloud-init.log') - _verify_first_disk_setup(client, log) - - assert 'partprobe' not in log diff --git a/tests/integration_tests/modules/test_disk_setup.py b/tests/integration_tests/modules/test_disk_setup.py new file mode 100644 index 00000000..1fc96c52 --- /dev/null +++ b/tests/integration_tests/modules/test_disk_setup.py @@ -0,0 +1,192 @@ +import json +import os +import pytest +from uuid import uuid4 +from pycloudlib.lxd.instance import LXDInstance + +from cloudinit.subp import subp +from tests.integration_tests.instances import IntegrationInstance + +DISK_PATH = '/tmp/test_disk_setup_{}'.format(uuid4()) + + +def setup_and_mount_lxd_disk(instance: LXDInstance): + subp('lxc config device add {} test-disk-setup-disk disk source={}'.format( + instance.name, DISK_PATH).split()) + + +@pytest.yield_fixture +def create_disk(): + # 640k should be enough for anybody + subp('dd if=/dev/zero of={} bs=1k count=640'.format(DISK_PATH).split()) + yield + os.remove(DISK_PATH) + + +ALIAS_USERDATA = """\ +#cloud-config +device_aliases: + my_alias: /dev/sdb +disk_setup: + my_alias: + table_type: mbr + layout: [50, 50] + overwrite: True +fs_setup: +- label: fs1 + device: my_alias.1 + filesystem: ext4 +- label: fs2 + device: my_alias.2 + filesystem: ext4 +mounts: +- ["my_alias.1", "/mnt1"] +- ["my_alias.2", "/mnt2"] +""" + + +@pytest.mark.user_data(ALIAS_USERDATA) +@pytest.mark.lxd_setup.with_args(setup_and_mount_lxd_disk) +@pytest.mark.ubuntu +@pytest.mark.lxd_vm +class TestDeviceAliases: + """Test devices aliases work on disk setup/mount""" + + def test_device_alias(self, create_disk, client: IntegrationInstance): + log = client.read_from_file('/var/log/cloud-init.log') + assert ( + "updated disk_setup device entry 'my_alias' to '/dev/sdb'" + ) in log + assert 'changed my_alias.1 => /dev/sdb1' in log + assert 'changed my_alias.2 => /dev/sdb2' in log + assert 'WARN' not in log + assert 'Traceback' not in log + + lsblk = json.loads(client.execute('lsblk --json')) + sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0] + assert len(sdb['children']) == 2 + assert sdb['children'][0]['name'] == 'sdb1' + assert sdb['children'][0]['mountpoint'] == '/mnt1' + assert sdb['children'][1]['name'] == 'sdb2' + assert sdb['children'][1]['mountpoint'] == '/mnt2' + + +PARTPROBE_USERDATA = """\ +#cloud-config +disk_setup: + /dev/sdb: + table_type: mbr + layout: [50, 50] + overwrite: True +fs_setup: + - label: test + device: /dev/sdb1 + filesystem: ext4 + - label: test2 + device: /dev/sdb2 + filesystem: ext4 +mounts: +- ["/dev/sdb1", "/mnt1"] +- ["/dev/sdb2", "/mnt2"] +""" + +UPDATED_PARTPROBE_USERDATA = """\ +#cloud-config +disk_setup: + /dev/sdb: + table_type: mbr + layout: [100] + overwrite: True +fs_setup: + - label: test3 + device: /dev/sdb1 + filesystem: ext4 +mounts: +- ["/dev/sdb1", "/mnt3"] +""" + + +@pytest.mark.user_data(PARTPROBE_USERDATA) +@pytest.mark.lxd_setup.with_args(setup_and_mount_lxd_disk) +@pytest.mark.ubuntu +@pytest.mark.lxd_vm +class TestPartProbeAvailability: + """Test disk setup works with partprobe + + Disk setup can run successfully on a mounted partition when + partprobe is being used. + + lp-1920939 + """ + + def _verify_first_disk_setup(self, client, log): + assert 'Traceback' not in log + assert 'WARN' not in log + lsblk = json.loads(client.execute('lsblk --json')) + sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0] + assert len(sdb['children']) == 2 + assert sdb['children'][0]['name'] == 'sdb1' + assert sdb['children'][0]['mountpoint'] == '/mnt1' + assert sdb['children'][1]['name'] == 'sdb2' + assert sdb['children'][1]['mountpoint'] == '/mnt2' + + # Not bionic or xenial because the LXD agent gets in the way of us + # changing the userdata + @pytest.mark.not_bionic + @pytest.mark.not_xenial + def test_disk_setup_when_mounted( + self, create_disk, client: IntegrationInstance + ): + """Test lp-1920939. + + We insert an extra disk into our VM, format it to have two partitions, + modify our cloud config to mount devices before disk setup, and modify + our userdata to setup a single partition on the disk. + + This allows cloud-init to attempt disk setup on a mounted partition. + When blockdev is in use, it will fail with + "blockdev: ioctl error on BLKRRPART: Device or resource busy" along + with a warning and a traceback. When partprobe is in use, everything + should work successfully. + """ + log = client.read_from_file('/var/log/cloud-init.log') + self._verify_first_disk_setup(client, log) + + # Update our userdata and cloud.cfg to mount then perform new disk + # setup + client.write_to_file( + '/var/lib/cloud/seed/nocloud-net/user-data', + UPDATED_PARTPROBE_USERDATA, + ) + client.execute( + "sed -i 's/write-files/write-files\\n - mounts/' " + "/etc/cloud/cloud.cfg" + ) + + client.execute('cloud-init clean --logs') + client.restart() + + # Assert new setup works as expected + assert 'Traceback' not in log + assert 'WARN' not in log + + lsblk = json.loads(client.execute('lsblk --json')) + sdb = [x for x in lsblk['blockdevices'] if x['name'] == 'sdb'][0] + assert len(sdb['children']) == 1 + assert sdb['children'][0]['name'] == 'sdb1' + assert sdb['children'][0]['mountpoint'] == '/mnt3' + + def test_disk_setup_no_partprobe( + self, create_disk, client: IntegrationInstance + ): + """Ensure disk setup still works as expected without partprobe.""" + # We can't do this part in a bootcmd because the path has already + # been found by the time we get to the bootcmd + client.execute('rm $(which partprobe)') + client.execute('cloud-init clean --logs') + client.restart() + + log = client.read_from_file('/var/log/cloud-init.log') + self._verify_first_disk_setup(client, log) + + assert 'partprobe' not in log diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/test_handler/test_handler_mounts.py index e87069f6..69e8b30d 100644 --- a/tests/unittests/test_handler/test_handler_mounts.py +++ b/tests/unittests/test_handler/test_handler_mounts.py @@ -133,6 +133,15 @@ class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase): disk_path, cc_mounts.sanitize_devname(disk_path, None, mock.Mock())) + def test_device_aliases_remapping(self): + disk_path = '/dev/sda' + self.mock_existence_of_disk(disk_path) + self.assertEqual(disk_path, + cc_mounts.sanitize_devname('mydata', + lambda x: None, + mock.Mock(), + {'mydata': disk_path})) + class TestSwapFileCreation(test_helpers.FilesystemMockingTestCase): diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index e2979ed4..3c2c6d14 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -32,6 +32,7 @@ klausenbusk landon912 lucasmoura lungj +mal mamercad manuelisimo marlluslustosa -- cgit v1.2.3 From 3d9c862b6ded798031fad827328fa437bc14ac97 Mon Sep 17 00:00:00 2001 From: Aleksander Baranowski Date: Thu, 5 Aug 2021 18:32:36 +0200 Subject: Add support for EuroLinux 7 && EuroLinux 8 (#957) --- README.md | 2 +- cloudinit/config/cc_ntp.py | 9 +++-- cloudinit/config/cc_yum_add_repo.py | 8 ++-- cloudinit/distros/__init__.py | 4 +- cloudinit/distros/eurolinux.py | 9 +++++ cloudinit/net/sysconfig.py | 4 +- cloudinit/tests/.test_util.py.swp | Bin 16384 -> 0 bytes cloudinit/tests/test_util.py | 66 ++++++++++++++++++++++++++++++++ cloudinit/util.py | 4 +- config/cloud.cfg.tmpl | 12 +++--- doc/rtd/topics/availability.rst | 2 +- packages/pkg-deps.json | 14 +++++++ systemd/cloud-init-generator.tmpl | 3 +- systemd/cloud-init.service.tmpl | 3 +- tests/unittests/test_cli.py | 3 +- tests/unittests/test_net.py | 1 + tests/unittests/test_render_cloudcfg.py | 6 +-- tools/read-dependencies | 8 +++- tools/render-cloudcfg | 4 +- 19 files changed, 130 insertions(+), 32 deletions(-) create mode 100644 cloudinit/distros/eurolinux.py delete mode 100644 cloudinit/tests/.test_util.py.swp (limited to 'cloudinit') diff --git a/README.md b/README.md index 832d8b43..caf9a6e9 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ get in contact with that distribution and send them our way! | Supported OSes | Supported Public Clouds | Supported Private Clouds | | --- | --- | --- | -| Alpine Linux
ArchLinux
Debian
DragonFlyBSD
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
DigitalOcean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| +| Alpine Linux
ArchLinux
Debian
DragonFlyBSD
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo/EuroLinux
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
DigitalOcean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| ## To start developing cloud-init diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index d227efb9..7c371a49 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -24,8 +24,9 @@ LOG = logging.getLogger(__name__) frequency = PER_INSTANCE NTP_CONF = '/etc/ntp.conf' NR_POOL_SERVERS = 4 -distros = ['almalinux', 'alpine', 'centos', 'debian', 'fedora', 'opensuse', - 'photon', 'rhel', 'rocky', 'sles', 'ubuntu', 'virtuozzo'] +distros = ['almalinux', 'alpine', 'centos', 'debian', 'eurolinux', 'fedora', + 'opensuse', 'photon', 'rhel', 'rocky', 'sles', 'ubuntu', + 'virtuozzo'] NTP_CLIENT_CONFIG = { 'chrony': { @@ -405,9 +406,9 @@ def generate_server_names(distro): # For legal reasons x.pool.sles.ntp.org does not exist, # use the opensuse pool pool_distro = 'opensuse' - elif distro == 'alpine': + elif distro == 'alpine' or distro == 'eurolinux': # Alpine-specific pool (i.e. x.alpine.pool.ntp.org) does not exist - # so use general x.pool.ntp.org instead. + # so use general x.pool.ntp.org instead. The same applies to EuroLinux pool_distro = '' for x in range(0, NR_POOL_SERVERS): diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 6e6133d1..b7a48dcc 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -18,8 +18,8 @@ entry, the config entry will be skipped. **Module frequency:** per always -**Supported distros:** almalinux, centos, fedora, photon, rhel, rocky, - virtuozzo +**Supported distros:** almalinux, centos, eurolinux, fedora, photon, rhel, + rocky, virtuozzo **Config keys**:: @@ -37,8 +37,8 @@ from configparser import ConfigParser from cloudinit import util -distros = ['almalinux', 'centos', 'fedora', 'photon', 'rhel', 'rocky', - 'virtuozzo'] +distros = ['almalinux', 'centos', 'eurolinux', 'fedora', 'photon', 'rhel', + 'rocky', 'virtuozzo'] def _canonicalize_id(repo_id): diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 7b813167..a634623a 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -49,8 +49,8 @@ OSFAMILIES = { 'debian': ['debian', 'ubuntu'], 'freebsd': ['freebsd'], 'gentoo': ['gentoo'], - 'redhat': ['almalinux', 'amazon', 'centos', 'fedora', 'photon', 'rhel', - 'rocky', 'virtuozzo'], + 'redhat': ['almalinux', 'amazon', 'centos', 'eurolinux', 'fedora', + 'photon', 'rhel', 'rocky', 'virtuozzo'], 'suse': ['opensuse', 'sles'], } diff --git a/cloudinit/distros/eurolinux.py b/cloudinit/distros/eurolinux.py new file mode 100644 index 00000000..edb3165d --- /dev/null +++ b/cloudinit/distros/eurolinux.py @@ -0,0 +1,9 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.distros import rhel + + +class Distro(rhel.Distro): + pass + +# vi: ts=4 expandtab diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 49f52e9d..06f7255e 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -18,8 +18,8 @@ from .network_state import ( is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6, IPV6_DYNAMIC_TYPES) LOG = logging.getLogger(__name__) -KNOWN_DISTROS = ['almalinux', 'centos', 'fedora', 'rhel', 'rocky', 'suse', - 'virtuozzo'] +KNOWN_DISTROS = ['almalinux', 'centos', 'eurolinux', 'fedora', 'rhel', 'rocky', + 'suse', 'virtuozzo'] NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf" diff --git a/cloudinit/tests/.test_util.py.swp b/cloudinit/tests/.test_util.py.swp deleted file mode 100644 index 78ef5865..00000000 Binary files a/cloudinit/tests/.test_util.py.swp and /dev/null differ diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index bd7720d1..9dd01158 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -124,6 +124,38 @@ OS_RELEASE_ALMALINUX_8 = dedent("""\ ALMALINUX_MANTISBT_PROJECT_VERSION="8.3" """) +OS_RELEASE_EUROLINUX_7 = dedent("""\ + VERSION="7.9 (Minsk)" + ID="eurolinux" + ID_LIKE="rhel scientific centos fedora" + VERSION_ID="7.9" + PRETTY_NAME="EuroLinux 7.9 (Minsk)" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:eurolinux:eurolinux:7.9:GA" + HOME_URL="http://www.euro-linux.com/" + BUG_REPORT_URL="mailto:support@euro-linux.com" + REDHAT_BUGZILLA_PRODUCT="EuroLinux 7" + REDHAT_BUGZILLA_PRODUCT_VERSION=7.9 + REDHAT_SUPPORT_PRODUCT="EuroLinux" + REDHAT_SUPPORT_PRODUCT_VERSION="7.9" +""") + +OS_RELEASE_EUROLINUX_8 = dedent("""\ + NAME="EuroLinux" + VERSION="8.4 (Vaduz)" + ID="eurolinux" + ID_LIKE="rhel fedora centos" + VERSION_ID="8.4" + PLATFORM_ID="platform:el8" + PRETTY_NAME="EuroLinux 8.4 (Vaduz)" + ANSI_COLOR="0;34" + CPE_NAME="cpe:/o:eurolinux:eurolinux:8" + HOME_URL="https://www.euro-linux.com/" + BUG_REPORT_URL="https://github.com/EuroLinux/eurolinux-distro-bugs-and-rfc/" + REDHAT_SUPPORT_PRODUCT="EuroLinux" + REDHAT_SUPPORT_PRODUCT_VERSION="8" +""") + OS_RELEASE_ROCKY_8 = dedent("""\ NAME="Rocky Linux" VERSION="8.3 (Green Obsidian)" @@ -162,6 +194,8 @@ REDHAT_RELEASE_REDHAT_7 = ( "Red Hat Enterprise Linux Server release 7.5 (Maipo)") REDHAT_RELEASE_ALMALINUX_8 = ( "AlmaLinux release 8.3 (Purple Manul)") +REDHAT_RELEASE_EUROLINUX_7 = "EuroLinux release 7.9 (Minsk)" +REDHAT_RELEASE_EUROLINUX_8 = "EuroLinux release 8.4 (Vaduz)" REDHAT_RELEASE_ROCKY_8 = ( "Rocky Linux release 8.3 (Green Obsidian)") REDHAT_RELEASE_VIRTUOZZO_8 = ( @@ -581,6 +615,38 @@ class TestGetLinuxDistro(CiTestCase): dist = util.get_linux_distro() self.assertEqual(('almalinux', '8.3', 'Purple Manul'), dist) + @mock.patch('cloudinit.util.load_file') + def test_get_linux_eurolinux7_rhrelease(self, m_os_release, m_path_exists): + """Verify eurolinux 7 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_7 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('eurolinux', '7.9', 'Minsk'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_eurolinux7_osrelease(self, m_os_release, m_path_exists): + """Verify eurolinux 7 read from os-release.""" + m_os_release.return_value = OS_RELEASE_EUROLINUX_7 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('eurolinux', '7.9', 'Minsk'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_eurolinux8_rhrelease(self, m_os_release, m_path_exists): + """Verify eurolinux 8 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('eurolinux', '8.4', 'Vaduz'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_eurolinux8_osrelease(self, m_os_release, m_path_exists): + """Verify eurolinux 8 read from os-release.""" + m_os_release.return_value = OS_RELEASE_EUROLINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('eurolinux', '8.4', 'Vaduz'), dist) + @mock.patch('cloudinit.util.load_file') def test_get_linux_rocky8_rhrelease(self, m_os_release, m_path_exists): """Verify rocky linux 8 read from redhat-release.""" diff --git a/cloudinit/util.py b/cloudinit/util.py index 3bed1aed..d3ced463 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -547,8 +547,8 @@ def system_info(): if system == "linux": linux_dist = info['dist'][0].lower() if linux_dist in ( - 'almalinux', 'alpine', 'arch', 'centos', 'debian', 'fedora', - 'photon', 'rhel', 'rocky', 'suse', 'virtuozzo'): + 'almalinux', 'alpine', 'arch', 'centos', 'debian', 'eurolinux', + 'fedora', 'photon', 'rhel', 'rocky', 'suse', 'virtuozzo'): var = linux_dist elif linux_dist in ('ubuntu', 'linuxmint', 'mint'): var = 'ubuntu' diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 2314d893..825deff4 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -32,8 +32,8 @@ disable_root: false disable_root: true {% endif %} -{% if variant in ["almalinux", "alpine", "amazon", "centos", "fedora", - "rhel", "rocky", "virtuozzo"] %} +{% if variant in ["almalinux", "alpine", "amazon", "centos", "eurolinux", + "fedora", "rhel", "rocky", "virtuozzo"] %} mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2'] {% if variant == "amazon" %} resize_rootfs: noblock @@ -174,8 +174,8 @@ cloud_final_modules: system_info: # This will affect which distro class gets used {% if variant in ["almalinux", "alpine", "amazon", "arch", "centos", "debian", - "fedora", "freebsd", "netbsd", "openbsd", "photon", "rhel", - "rocky", "suse", "ubuntu", "virtuozzo"] %} + "eurolinux", "fedora", "freebsd", "netbsd", "openbsd", + "photon", "rhel", "rocky", "suse", "ubuntu", "virtuozzo"] %} distro: {{ variant }} {% elif variant in ["dragonfly"] %} distro: dragonflybsd @@ -228,8 +228,8 @@ system_info: primary: http://ports.ubuntu.com/ubuntu-ports security: http://ports.ubuntu.com/ubuntu-ports ssh_svcname: ssh -{% elif variant in ["almalinux", "alpine", "amazon", "arch", "centos", "fedora", - "rhel", "rocky", "suse", "virtuozzo"] %} +{% elif variant in ["almalinux", "alpine", "amazon", "arch", "centos", "eurolinux", + "fedora", "rhel", "rocky", "suse", "virtuozzo"] %} # Default user name + that default users groups (if added/used) default_user: {% if variant == "amazon" %} diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst index b84b6076..e0644534 100644 --- a/doc/rtd/topics/availability.rst +++ b/doc/rtd/topics/availability.rst @@ -27,7 +27,7 @@ OpenBSD and DragonFlyBSD: - NetBSD - OpenBSD - Photon OS -- RHEL/CentOS +- RHEL/CentOS/AlmaLinux/Rocky Linux/EuroLinux - SLES/openSUSE - Ubuntu diff --git a/packages/pkg-deps.json b/packages/pkg-deps.json index 80028396..eaf13469 100644 --- a/packages/pkg-deps.json +++ b/packages/pkg-deps.json @@ -27,6 +27,20 @@ "sudo" ] }, + "eurolinux" : { + "build-requires" : [ + "python3-devel" + ], + "requires" : [ + "e2fsprogs", + "iproute", + "net-tools", + "procps", + "rsyslog", + "shadow-utils", + "sudo" + ] + }, "redhat" : { "build-requires" : [ "python3-devel" diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl index 1d6af5ae..3dbe5947 100644 --- a/systemd/cloud-init-generator.tmpl +++ b/systemd/cloud-init-generator.tmpl @@ -83,7 +83,8 @@ default() { check_for_datasource() { local ds_rc="" -{% if variant in ["almalinux", "rhel", "fedora", "centos", "rocky", "virtuozzo"] %} +{% if variant in ["almalinux", "centos", "eurolinux", "fedora", "rhel", + "rocky", "virtuozzo"] %} local dsidentify="/usr/libexec/cloud-init/ds-identify" {% else %} local dsidentify="/usr/lib/cloud-init/ds-identify" diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl index cab3ec51..636f59be 100644 --- a/systemd/cloud-init.service.tmpl +++ b/systemd/cloud-init.service.tmpl @@ -12,7 +12,8 @@ After=systemd-networkd-wait-online.service {% if variant in ["ubuntu", "unknown", "debian"] %} After=networking.service {% endif %} -{% if variant in ["almalinux", "centos", "fedora", "rhel", "rocky", "virtuozzo"] %} +{% if variant in ["almalinux", "centos", "eurolinux", "fedora", "rhel", + "rocky", "virtuozzo"] %} After=network.service After=NetworkManager.service {% endif %} diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index 90d8f7b9..a39e1d0c 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -225,7 +225,8 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): expected_doc_sections = [ '**Supported distros:** all', ('**Supported distros:** almalinux, alpine, centos, debian, ' - 'fedora, opensuse, photon, rhel, rocky, sles, ubuntu, virtuozzo'), + 'eurolinux, fedora, opensuse, photon, rhel, rocky, sles, ubuntu, ' + 'virtuozzo'), '**Config schema**:\n **resize_rootfs:** (true/false/noblock)', '**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n' ] diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 43e209c1..fc77b11e 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -5308,6 +5308,7 @@ class TestNetRenderers(CiTestCase): ('opensuse-tumbleweed', '', ''), ('sles', '', ''), ('centos', '', ''), + ('eurolinux', '', ''), ('fedora', '', ''), ('redhat', '', ''), ] diff --git a/tests/unittests/test_render_cloudcfg.py b/tests/unittests/test_render_cloudcfg.py index 275879af..00d50e66 100644 --- a/tests/unittests/test_render_cloudcfg.py +++ b/tests/unittests/test_render_cloudcfg.py @@ -9,9 +9,9 @@ from cloudinit import subp from cloudinit import util # TODO(Look to align with tools.render-cloudcfg or cloudinit.distos.OSFAMILIES) -DISTRO_VARIANTS = ["amazon", "arch", "centos", "debian", "fedora", "freebsd", - "netbsd", "openbsd", "photon", "rhel", "suse", "ubuntu", - "unknown"] +DISTRO_VARIANTS = ["amazon", "arch", "centos", "debian", "eurolinux", "fedora", + "freebsd", "netbsd", "openbsd", "photon", "rhel", "suse", + "ubuntu", "unknown"] @pytest.mark.allow_subp_for(sys.executable) diff --git a/tools/read-dependencies b/tools/read-dependencies index e52720d4..810154e4 100755 --- a/tools/read-dependencies +++ b/tools/read-dependencies @@ -23,6 +23,7 @@ DEFAULT_REQUIREMENTS = 'requirements.txt' # Map the appropriate package dir needed for each distro choice DISTRO_PKG_TYPE_MAP = { 'centos': 'redhat', + 'eurolinux': 'redhat', 'rocky': 'redhat', 'redhat': 'redhat', 'debian': 'debian', @@ -68,11 +69,13 @@ ZYPPER_INSTALL = [ DRY_DISTRO_INSTALL_PKG_CMD = { 'rocky': ['yum', 'install', '--assumeyes'], 'centos': ['yum', 'install', '--assumeyes'], + 'eurolinux': ['yum', 'install', '--assumeyes'], 'redhat': ['yum', 'install', '--assumeyes'], } DISTRO_INSTALL_PKG_CMD = { 'rocky': MAYBE_RELIABLE_YUM_INSTALL, + 'eurolinux': MAYBE_RELIABLE_YUM_INSTALL, 'centos': MAYBE_RELIABLE_YUM_INSTALL, 'redhat': MAYBE_RELIABLE_YUM_INSTALL, 'debian': ['apt', 'install', '-y'], @@ -85,6 +88,7 @@ DISTRO_INSTALL_PKG_CMD = { # List of base system packages required to enable ci automation CI_SYSTEM_BASE_PKGS = { 'common': ['make', 'sudo', 'tar'], + 'eurolinux': ['python3-tox'], 'redhat': ['python3-tox'], 'centos': ['python3-tox'], 'ubuntu': ['devscripts', 'python3-dev', 'libssl-dev', 'tox', 'sbuild'], @@ -277,10 +281,10 @@ def pkg_install(pkg_list, distro, test_distro=False, dry_run=False): cmd = DRY_DISTRO_INSTALL_PKG_CMD[distro] install_cmd.extend(cmd) - if distro in ['centos', 'redhat', 'rocky']: + if distro in ['centos', 'redhat', 'rocky', 'eurolinux']: # CentOS and Redhat need epel-release to access oauthlib and jsonschema subprocess.check_call(install_cmd + ['epel-release']) - if distro in ['suse', 'opensuse', 'redhat', 'rocky', 'centos']: + if distro in ['suse', 'opensuse', 'redhat', 'rocky', 'centos', 'eurolinux']: pkg_list.append('rpm-build') subprocess.check_call(install_cmd + pkg_list) diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg index 227bd8ab..30f82521 100755 --- a/tools/render-cloudcfg +++ b/tools/render-cloudcfg @@ -5,8 +5,8 @@ import os import sys VARIANTS = ["almalinux", "alpine", "amazon", "arch", "centos", "debian", - "fedora", "freebsd", "netbsd", "openbsd", "photon", "rhel", - "suse","rocky", "ubuntu", "unknown", "virtuozzo"] + "eurolinux", "fedora", "freebsd", "netbsd", "openbsd", "photon", + "rhel", "suse","rocky", "ubuntu", "unknown", "virtuozzo"] if "avoid-pep8-E402-import-not-top-of-file": -- cgit v1.2.3 From 8f42eb547ddf3202268e1e37a300ba8b2e89cbd2 Mon Sep 17 00:00:00 2001 From: Anh Vo Date: Thu, 5 Aug 2021 15:41:10 -0400 Subject: generate contents for ovf-env.xml when provisioning via IMDS (#959) Azure Linux Agent (WaLinuxAgent) waits for the ovf-env.xml file to be written by cloud-init when cloud-init provisions the VM. This file is written whenever cloud-init reads its contents from the provisioning ISO. With this change, when there is no provisioning ISO, DataSourceAzure will generate the ovf-env.xml file based on the metadata obtained from Azure IMDS. --- cloudinit/sources/DataSourceAzure.py | 15 ++++++++++++++- cloudinit/sources/helpers/azure.py | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 2e7bfbe3..01e2c959 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -45,7 +45,8 @@ from cloudinit.sources.helpers.azure import ( is_byte_swapped, dhcp_log_cb, push_log_to_kvp, - report_failure_to_fabric) + report_failure_to_fabric, + build_minimal_ovf) LOG = logging.getLogger(__name__) @@ -540,6 +541,18 @@ class DataSourceAzure(sources.DataSource): ) crawled_data['metadata']['disable_password'] = imds_disable_password # noqa: E501 + if metadata_source == 'IMDS' and not crawled_data['files']: + try: + contents = build_minimal_ovf( + username=imds_username, + hostname=imds_hostname, + disableSshPwd=imds_disable_password) + crawled_data['files'] = {'ovf-env.xml': contents} + except Exception as e: + report_diagnostic_event( + "Failed to construct OVF from IMDS data %s" % e, + logger_func=LOG.debug) + # only use userdata from imds if OVF did not provide custom data # userdata provided by IMDS is always base64 encoded if not userdata_raw: diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index ad476076..a5ac1d57 100755 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -344,6 +344,40 @@ def http_with_retries(url, **kwargs) -> str: raise exc +def build_minimal_ovf( + username: str, + hostname: str, + disableSshPwd: str) -> bytes: + OVF_ENV_TEMPLATE = textwrap.dedent('''\ + + + 1.0 + + LinuxProvisioningConfiguration + + {username} + {disableSshPwd} + + {hostname} + + + + 1.0 + + true + + + + ''') + ret = OVF_ENV_TEMPLATE.format( + username=username, + hostname=hostname, + disableSshPwd=disableSshPwd) + return ret.encode('utf-8') + + class AzureEndpointHttpClient: headers = { -- cgit v1.2.3 From 00dbaf1e9ab0e59d81662f0f3561897bef499a3f Mon Sep 17 00:00:00 2001 From: Emanuele Giuseppe Esposito Date: Mon, 9 Aug 2021 16:49:56 +0200 Subject: Stop copying ssh system keys and check folder permissions (#956) In /etc/ssh/sshd_config, it is possible to define a custom authorized_keys file that will contain the keys allowed to access the machine via the AuthorizedKeysFile option. Cloudinit is able to add user-specific keys to the existing ones, but we need to be careful on which of the authorized_keys files listed to pick. Chosing a file that is shared by all user will cause security issues, because the owner of that key can then access also other users. We therefore pick an authorized_keys file only if it satisfies the following conditions: 1. it is not a "global" file, ie it must be defined in AuthorizedKeysFile with %u, %h or be in /home/. This avoids security issues. 2. it must comply with ssh permission requirements, otherwise the ssh agent won't use that file. If it doesn't meet either of those conditions, write to ~/.ssh/authorized_keys We also need to consider the case when the chosen authorized_keys file does not exist. In this case, the existing behavior of cloud-init is to create the new file. We therefore need to be sure that the file complies with ssh permissions too, by setting: - the actual file to permission 600, and owned by the user - the directories in the path that do not exist must be root owned and with permission 755. --- cloudinit/ssh_util.py | 133 +++++- cloudinit/util.py | 51 ++- tests/unittests/test_sshutil.py | 952 +++++++++++++++++++++++++++++++--------- 3 files changed, 920 insertions(+), 216 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 89057262..b8a3c8f7 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -249,6 +249,113 @@ def render_authorizedkeysfile_paths(value, homedir, username): return rendered +# Inspired from safe_path() in openssh source code (misc.c). +def check_permissions(username, current_path, full_path, is_file, strictmodes): + """Check if the file/folder in @current_path has the right permissions. + + We need to check that: + 1. If StrictMode is enabled, the owner is either root or the user + 2. the user can access the file/folder, otherwise ssh won't use it + 3. If StrictMode is enabled, no write permission is given to group + and world users (022) + """ + + # group/world can only execute the folder (access) + minimal_permissions = 0o711 + if is_file: + # group/world can only read the file + minimal_permissions = 0o644 + + # 1. owner must be either root or the user itself + owner = util.get_owner(current_path) + if strictmodes and owner != username and owner != "root": + LOG.debug("Path %s in %s must be own by user %s or" + " by root, but instead is own by %s. Ignoring key.", + current_path, full_path, username, owner) + return False + + parent_permission = util.get_permissions(current_path) + # 2. the user can access the file/folder, otherwise ssh won't use it + if owner == username: + # need only the owner permissions + minimal_permissions &= 0o700 + else: + group_owner = util.get_group(current_path) + user_groups = util.get_user_groups(username) + + if group_owner in user_groups: + # need only the group permissions + minimal_permissions &= 0o070 + else: + # need only the world permissions + minimal_permissions &= 0o007 + + if parent_permission & minimal_permissions == 0: + LOG.debug("Path %s in %s must be accessible by user %s," + " check its permissions", + current_path, full_path, username) + return False + + # 3. no write permission (w) is given to group and world users (022) + # Group and world user can still have +rx. + if strictmodes and parent_permission & 0o022 != 0: + LOG.debug("Path %s in %s must not give write" + "permission to group or world users. Ignoring key.", + current_path, full_path) + return False + + return True + + +def check_create_path(username, filename, strictmodes): + user_pwent = users_ssh_info(username)[1] + root_pwent = users_ssh_info("root")[1] + try: + # check the directories first + directories = filename.split("/")[1:-1] + + # scan in order, from root to file name + parent_folder = "" + # this is to comply also with unit tests, and + # strange home directories + home_folder = os.path.dirname(user_pwent.pw_dir) + for directory in directories: + parent_folder += "/" + directory + if home_folder.startswith(parent_folder): + continue + + if not os.path.isdir(parent_folder): + # directory does not exist, and permission so far are good: + # create the directory, and make it accessible by everyone + # but owned by root, as it might be used by many users. + with util.SeLinuxGuard(parent_folder): + os.makedirs(parent_folder, mode=0o755, exist_ok=True) + util.chownbyid(parent_folder, root_pwent.pw_uid, + root_pwent.pw_gid) + + permissions = check_permissions(username, parent_folder, + filename, False, strictmodes) + if not permissions: + return False + + # check the file + if not os.path.exists(filename): + # if file does not exist: we need to create it, since the + # folders at this point exist and have right permissions + util.write_file(filename, '', mode=0o600, ensure_dir_exists=True) + util.chownbyid(filename, user_pwent.pw_uid, user_pwent.pw_gid) + + permissions = check_permissions(username, filename, + filename, True, strictmodes) + if not permissions: + return False + except (IOError, OSError) as e: + util.logexc(LOG, str(e)) + return False + + return True + + def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG): (ssh_dir, pw_ent) = users_ssh_info(username) default_authorizedkeys_file = os.path.join(ssh_dir, 'authorized_keys') @@ -259,6 +366,7 @@ def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG): ssh_cfg = parse_ssh_config_map(sshd_cfg_file) key_paths = ssh_cfg.get("authorizedkeysfile", "%h/.ssh/authorized_keys") + strictmodes = ssh_cfg.get("strictmodes", "yes") auth_key_fns = render_authorizedkeysfile_paths( key_paths, pw_ent.pw_dir, username) @@ -269,31 +377,31 @@ def extract_authorized_keys(username, sshd_cfg_file=DEF_SSHD_CFG): "config from %r, using 'AuthorizedKeysFile' file " "%r instead", DEF_SSHD_CFG, auth_key_fns[0]) - # check if one of the keys is the user's one + # check if one of the keys is the user's one and has the right permissions for key_path, auth_key_fn in zip(key_paths.split(), auth_key_fns): if any([ '%u' in key_path, '%h' in key_path, auth_key_fn.startswith('{}/'.format(pw_ent.pw_dir)) ]): - user_authorizedkeys_file = auth_key_fn + permissions_ok = check_create_path(username, auth_key_fn, + strictmodes == "yes") + if permissions_ok: + user_authorizedkeys_file = auth_key_fn + break if user_authorizedkeys_file != default_authorizedkeys_file: LOG.debug( "AuthorizedKeysFile has an user-specific authorized_keys, " "using %s", user_authorizedkeys_file) - # always store all the keys in the user's private file - return (user_authorizedkeys_file, parse_authorized_keys(auth_key_fns)) + return ( + user_authorizedkeys_file, + parse_authorized_keys([user_authorizedkeys_file]) + ) def setup_user_keys(keys, username, options=None): - # Make sure the users .ssh dir is setup accordingly - (ssh_dir, pwent) = users_ssh_info(username) - if not os.path.isdir(ssh_dir): - util.ensure_dir(ssh_dir, mode=0o700) - util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid) - # Turn the 'update' keys given into actual entries parser = AuthKeyLineParser() key_entries = [] @@ -302,11 +410,10 @@ def setup_user_keys(keys, username, options=None): # Extract the old and make the new (auth_key_fn, auth_key_entries) = extract_authorized_keys(username) + ssh_dir = os.path.dirname(auth_key_fn) with util.SeLinuxGuard(ssh_dir, recursive=True): content = update_authorized_keys(auth_key_entries, key_entries) - util.ensure_dir(os.path.dirname(auth_key_fn), mode=0o700) - util.write_file(auth_key_fn, content, mode=0o600) - util.chownbyid(auth_key_fn, pwent.pw_uid, pwent.pw_gid) + util.write_file(auth_key_fn, content, preserve_mode=True) class SshdConfigLine(object): diff --git a/cloudinit/util.py b/cloudinit/util.py index d3ced463..c53f6453 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -35,6 +35,7 @@ from base64 import b64decode, b64encode from errno import ENOENT from functools import lru_cache from urllib import parse +from typing import List from cloudinit import importer from cloudinit import log as logging @@ -1878,6 +1879,53 @@ def chmod(path, mode): os.chmod(path, real_mode) +def get_permissions(path: str) -> int: + """ + Returns the octal permissions of the file/folder pointed by the path, + encoded as an int. + + @param path: The full path of the file/folder. + """ + + return stat.S_IMODE(os.stat(path).st_mode) + + +def get_owner(path: str) -> str: + """ + Returns the owner of the file/folder pointed by the path. + + @param path: The full path of the file/folder. + """ + st = os.stat(path) + return pwd.getpwuid(st.st_uid).pw_name + + +def get_group(path: str) -> str: + """ + Returns the group of the file/folder pointed by the path. + + @param path: The full path of the file/folder. + """ + st = os.stat(path) + return grp.getgrgid(st.st_gid).gr_name + + +def get_user_groups(username: str) -> List[str]: + """ + Returns a list of all groups to which the user belongs + + @param username: the user we want to check + """ + groups = [] + for group in grp.getgrall(): + if username in group.gr_mem: + groups.append(group.gr_name) + + gid = pwd.getpwnam(username).pw_gid + groups.append(grp.getgrgid(gid).gr_name) + return groups + + def write_file( filename, content, @@ -1904,8 +1952,7 @@ def write_file( if preserve_mode: try: - file_stat = os.stat(filename) - mode = stat.S_IMODE(file_stat.st_mode) + mode = get_permissions(filename) except OSError: pass diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py index bcb8044f..a66788bf 100644 --- a/tests/unittests/test_sshutil.py +++ b/tests/unittests/test_sshutil.py @@ -1,6 +1,9 @@ # This file is part of cloud-init. See LICENSE file for license information. +import os + from collections import namedtuple +from functools import partial from unittest.mock import patch from cloudinit import ssh_util @@ -8,13 +11,48 @@ from cloudinit.tests import helpers as test_helpers from cloudinit import util # https://stackoverflow.com/questions/11351032/ -FakePwEnt = namedtuple( - 'FakePwEnt', - ['pw_dir', 'pw_gecos', 'pw_name', 'pw_passwd', 'pw_shell', 'pwd_uid']) +FakePwEnt = namedtuple('FakePwEnt', [ + 'pw_name', + 'pw_passwd', + 'pw_uid', + 'pw_gid', + 'pw_gecos', + 'pw_dir', + 'pw_shell', +]) FakePwEnt.__new__.__defaults__ = tuple( "UNSET_%s" % n for n in FakePwEnt._fields) +def mock_get_owner(updated_permissions, value): + try: + return updated_permissions[value][0] + except ValueError: + return util.get_owner(value) + + +def mock_get_group(updated_permissions, value): + try: + return updated_permissions[value][1] + except ValueError: + return util.get_group(value) + + +def mock_get_user_groups(username): + return username + + +def mock_get_permissions(updated_permissions, value): + try: + return updated_permissions[value][2] + except ValueError: + return util.get_permissions(value) + + +def mock_getpwnam(users, username): + return users[username] + + # Do not use these public keys, most of them are fetched from # the testdata for OpenSSH, and their private keys are available # https://github.com/openssh/openssh-portable/tree/master/regress/unittests/sshkey/testdata @@ -552,12 +590,30 @@ class TestBasicAuthorizedKeyParse(test_helpers.CiTestCase): ssh_util.render_authorizedkeysfile_paths( "/opt/%u/keys", "/home/bobby", "bobby")) + def test_user_file(self): + self.assertEqual( + ["/opt/bobby"], + ssh_util.render_authorizedkeysfile_paths( + "/opt/%u", "/home/bobby", "bobby")) + + def test_user_file2(self): + self.assertEqual( + ["/opt/bobby/bobby"], + ssh_util.render_authorizedkeysfile_paths( + "/opt/%u/%u", "/home/bobby", "bobby")) + def test_multiple(self): self.assertEqual( ["/keys/path1", "/keys/path2"], ssh_util.render_authorizedkeysfile_paths( "/keys/path1 /keys/path2", "/home/bobby", "bobby")) + def test_multiple2(self): + self.assertEqual( + ["/keys/path1", "/keys/bobby"], + ssh_util.render_authorizedkeysfile_paths( + "/keys/path1 /keys/%u", "/home/bobby", "bobby")) + def test_relative(self): self.assertEqual( ["/home/bobby/.secret/keys"], @@ -581,269 +637,763 @@ class TestBasicAuthorizedKeyParse(test_helpers.CiTestCase): class TestMultipleSshAuthorizedKeysFile(test_helpers.CiTestCase): - @patch("cloudinit.ssh_util.pwd.getpwnam") - def test_multiple_authorizedkeys_file_order1(self, m_getpwnam): - fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') - m_getpwnam.return_value = fpw - user_ssh_folder = "%s/.ssh" % fpw.pw_dir - - # /tmp/home2/bobby/.ssh/authorized_keys = rsa - authorized_keys = self.tmp_path('authorized_keys', dir=user_ssh_folder) - util.write_file(authorized_keys, VALID_CONTENT['rsa']) - - # /tmp/home2/bobby/.ssh/user_keys = dsa - user_keys = self.tmp_path('user_keys', dir=user_ssh_folder) - util.write_file(user_keys, VALID_CONTENT['dsa']) - - # /tmp/sshd_config + def create_fake_users(self, names, mock_permissions, + m_get_group, m_get_owner, m_get_permissions, + m_getpwnam, users): + homes = [] + + root = '/tmp/root' + fpw = FakePwEnt(pw_name="root", pw_dir=root) + users["root"] = fpw + + for name in names: + home = '/tmp/home/' + name + fpw = FakePwEnt(pw_name=name, pw_dir=home) + users[name] = fpw + homes.append(home) + + m_get_permissions.side_effect = partial( + mock_get_permissions, mock_permissions) + m_get_owner.side_effect = partial(mock_get_owner, mock_permissions) + m_get_group.side_effect = partial(mock_get_group, mock_permissions) + m_getpwnam.side_effect = partial(mock_getpwnam, users) + return homes + + def create_user_authorized_file(self, home, filename, content_key, keys): + user_ssh_folder = "%s/.ssh" % home + # /tmp/home//.ssh/authorized_keys = content_key + authorized_keys = self.tmp_path(filename, dir=user_ssh_folder) + util.write_file(authorized_keys, VALID_CONTENT[content_key]) + keys[authorized_keys] = content_key + return authorized_keys + + def create_global_authorized_file(self, filename, content_key, keys): + authorized_keys = self.tmp_path(filename, dir='/tmp') + util.write_file(authorized_keys, VALID_CONTENT[content_key]) + keys[authorized_keys] = content_key + return authorized_keys + + def create_sshd_config(self, authorized_keys_files): sshd_config = self.tmp_path('sshd_config', dir="/tmp") util.write_file( sshd_config, - "AuthorizedKeysFile %s %s" % (authorized_keys, user_keys) + "AuthorizedKeysFile " + authorized_keys_files ) + return sshd_config + def execute_and_check(self, user, sshd_config, solution, keys, + delete_keys=True): (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( - fpw.pw_name, sshd_config) + user, sshd_config) content = ssh_util.update_authorized_keys(auth_key_entries, []) - self.assertEqual(user_keys, auth_key_fn) - self.assertTrue(VALID_CONTENT['rsa'] in content) - self.assertTrue(VALID_CONTENT['dsa'] in content) + self.assertEqual(auth_key_fn, solution) + for path, key in keys.items(): + if path == solution: + self.assertTrue(VALID_CONTENT[key] in content) + else: + self.assertFalse(VALID_CONTENT[key] in content) + + if delete_keys and os.path.isdir("/tmp/home/"): + util.delete_dir_contents("/tmp/home/") @patch("cloudinit.ssh_util.pwd.getpwnam") - def test_multiple_authorizedkeys_file_order2(self, m_getpwnam): - fpw = FakePwEnt(pw_name='suzie', pw_dir='/tmp/home/suzie') - m_getpwnam.return_value = fpw - user_ssh_folder = "%s/.ssh" % fpw.pw_dir + @patch("cloudinit.util.get_permissions") + @patch("cloudinit.util.get_owner") + @patch("cloudinit.util.get_group") + def test_single_user_two_local_files( + self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam + ): + user_bobby = 'bobby' + keys = {} + users = {} + mock_permissions = { + '/tmp/home/bobby': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh/user_keys': ('bobby', 'bobby', 0o600), + '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), + } + + homes = self.create_fake_users( + [user_bobby], mock_permissions, m_get_group, m_get_owner, + m_get_permissions, m_getpwnam, users + ) + home = homes[0] - # /tmp/home/suzie/.ssh/authorized_keys = rsa - authorized_keys = self.tmp_path('authorized_keys', dir=user_ssh_folder) - util.write_file(authorized_keys, VALID_CONTENT['rsa']) + # /tmp/home/bobby/.ssh/authorized_keys = rsa + authorized_keys = self.create_user_authorized_file( + home, 'authorized_keys', 'rsa', keys + ) - # /tmp/home/suzie/.ssh/user_keys = dsa - user_keys = self.tmp_path('user_keys', dir=user_ssh_folder) - util.write_file(user_keys, VALID_CONTENT['dsa']) + # /tmp/home/bobby/.ssh/user_keys = dsa + user_keys = self.create_user_authorized_file( + home, 'user_keys', 'dsa', keys + ) # /tmp/sshd_config - sshd_config = self.tmp_path('sshd_config', dir="/tmp") - util.write_file( - sshd_config, - "AuthorizedKeysFile %s %s" % (user_keys, authorized_keys) + options = "%s %s" % (authorized_keys, user_keys) + sshd_config = self.create_sshd_config(options) + + self.execute_and_check(user_bobby, sshd_config, authorized_keys, keys) + + @patch("cloudinit.ssh_util.pwd.getpwnam") + @patch("cloudinit.util.get_permissions") + @patch("cloudinit.util.get_owner") + @patch("cloudinit.util.get_group") + def test_single_user_two_local_files_inverted( + self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam + ): + user_bobby = 'bobby' + keys = {} + users = {} + mock_permissions = { + '/tmp/home/bobby': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh/user_keys': ('bobby', 'bobby', 0o600), + '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), + } + + homes = self.create_fake_users( + [user_bobby], mock_permissions, m_get_group, m_get_owner, + m_get_permissions, m_getpwnam, users ) + home = homes[0] - (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( - fpw.pw_name, sshd_config) - content = ssh_util.update_authorized_keys(auth_key_entries, []) + # /tmp/home/bobby/.ssh/authorized_keys = rsa + authorized_keys = self.create_user_authorized_file( + home, 'authorized_keys', 'rsa', keys + ) - self.assertEqual(authorized_keys, auth_key_fn) - self.assertTrue(VALID_CONTENT['rsa'] in content) - self.assertTrue(VALID_CONTENT['dsa'] in content) + # /tmp/home/bobby/.ssh/user_keys = dsa + user_keys = self.create_user_authorized_file( + home, 'user_keys', 'dsa', keys + ) - @patch("cloudinit.ssh_util.pwd.getpwnam") - def test_multiple_authorizedkeys_file_local_global(self, m_getpwnam): - fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') - m_getpwnam.return_value = fpw - user_ssh_folder = "%s/.ssh" % fpw.pw_dir + # /tmp/sshd_config + options = "%s %s" % (user_keys, authorized_keys) + sshd_config = self.create_sshd_config(options) - # /tmp/home2/bobby/.ssh/authorized_keys = rsa - authorized_keys = self.tmp_path('authorized_keys', dir=user_ssh_folder) - util.write_file(authorized_keys, VALID_CONTENT['rsa']) + self.execute_and_check(user_bobby, sshd_config, user_keys, keys) - # /tmp/home2/bobby/.ssh/user_keys = dsa - user_keys = self.tmp_path('user_keys', dir=user_ssh_folder) - util.write_file(user_keys, VALID_CONTENT['dsa']) + @patch("cloudinit.ssh_util.pwd.getpwnam") + @patch("cloudinit.util.get_permissions") + @patch("cloudinit.util.get_owner") + @patch("cloudinit.util.get_group") + def test_single_user_local_global_files( + self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam + ): + user_bobby = 'bobby' + keys = {} + users = {} + mock_permissions = { + '/tmp/home/bobby': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh/user_keys': ('bobby', 'bobby', 0o600), + '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), + } + + homes = self.create_fake_users( + [user_bobby], mock_permissions, m_get_group, m_get_owner, + m_get_permissions, m_getpwnam, users + ) + home = homes[0] - # /tmp/etc/ssh/authorized_keys = ecdsa - authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys', - dir="/tmp") - util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) + # /tmp/home/bobby/.ssh/authorized_keys = rsa + authorized_keys = self.create_user_authorized_file( + home, 'authorized_keys', 'rsa', keys + ) - # /tmp/sshd_config - sshd_config = self.tmp_path('sshd_config', dir="/tmp") - util.write_file( - sshd_config, - "AuthorizedKeysFile %s %s %s" % (authorized_keys_global, - user_keys, authorized_keys) + # /tmp/home/bobby/.ssh/user_keys = dsa + user_keys = self.create_user_authorized_file( + home, 'user_keys', 'dsa', keys ) - (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( - fpw.pw_name, sshd_config) - content = ssh_util.update_authorized_keys(auth_key_entries, []) + authorized_keys_global = self.create_global_authorized_file( + 'etc/ssh/authorized_keys', 'ecdsa', keys + ) - self.assertEqual(authorized_keys, auth_key_fn) - self.assertTrue(VALID_CONTENT['rsa'] in content) - self.assertTrue(VALID_CONTENT['ecdsa'] in content) - self.assertTrue(VALID_CONTENT['dsa'] in content) + options = "%s %s %s" % (authorized_keys_global, user_keys, + authorized_keys) + sshd_config = self.create_sshd_config(options) - @patch("cloudinit.ssh_util.pwd.getpwnam") - def test_multiple_authorizedkeys_file_local_global2(self, m_getpwnam): - fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') - m_getpwnam.return_value = fpw - user_ssh_folder = "%s/.ssh" % fpw.pw_dir + self.execute_and_check(user_bobby, sshd_config, user_keys, keys) - # /tmp/home2/bobby/.ssh/authorized_keys2 = rsa - authorized_keys = self.tmp_path('authorized_keys2', - dir=user_ssh_folder) - util.write_file(authorized_keys, VALID_CONTENT['rsa']) + @patch("cloudinit.ssh_util.pwd.getpwnam") + @patch("cloudinit.util.get_permissions") + @patch("cloudinit.util.get_owner") + @patch("cloudinit.util.get_group") + def test_single_user_local_global_files_inverted( + self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam + ): + user_bobby = 'bobby' + keys = {} + users = {} + mock_permissions = { + '/tmp/home/bobby': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh/user_keys3': ('bobby', 'bobby', 0o600), + '/tmp/home/bobby/.ssh/authorized_keys2': ('bobby', 'bobby', 0o600), + } + + homes = self.create_fake_users( + [user_bobby], mock_permissions, m_get_group, m_get_owner, + m_get_permissions, m_getpwnam, users + ) + home = homes[0] - # /tmp/home2/bobby/.ssh/user_keys3 = dsa - user_keys = self.tmp_path('user_keys3', dir=user_ssh_folder) - util.write_file(user_keys, VALID_CONTENT['dsa']) + # /tmp/home/bobby/.ssh/authorized_keys = rsa + authorized_keys = self.create_user_authorized_file( + home, 'authorized_keys2', 'rsa', keys + ) - # /tmp/etc/ssh/authorized_keys = ecdsa - authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys', - dir="/tmp") - util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) + # /tmp/home/bobby/.ssh/user_keys = dsa + user_keys = self.create_user_authorized_file( + home, 'user_keys3', 'dsa', keys + ) - # /tmp/sshd_config - sshd_config = self.tmp_path('sshd_config', dir="/tmp") - util.write_file( - sshd_config, - "AuthorizedKeysFile %s %s %s" % (authorized_keys_global, - authorized_keys, user_keys) + authorized_keys_global = self.create_global_authorized_file( + 'etc/ssh/authorized_keys', 'ecdsa', keys ) - (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( - fpw.pw_name, sshd_config) - content = ssh_util.update_authorized_keys(auth_key_entries, []) + options = "%s %s %s" % (authorized_keys_global, authorized_keys, + user_keys) + sshd_config = self.create_sshd_config(options) - self.assertEqual(user_keys, auth_key_fn) - self.assertTrue(VALID_CONTENT['rsa'] in content) - self.assertTrue(VALID_CONTENT['ecdsa'] in content) - self.assertTrue(VALID_CONTENT['dsa'] in content) + self.execute_and_check(user_bobby, sshd_config, authorized_keys, keys) @patch("cloudinit.ssh_util.pwd.getpwnam") - def test_multiple_authorizedkeys_file_global(self, m_getpwnam): - fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') - m_getpwnam.return_value = fpw + @patch("cloudinit.util.get_permissions") + @patch("cloudinit.util.get_owner") + @patch("cloudinit.util.get_group") + def test_single_user_global_file( + self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam + ): + user_bobby = 'bobby' + keys = {} + users = {} + mock_permissions = { + '/tmp/home/bobby': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), + } + + homes = self.create_fake_users( + [user_bobby], mock_permissions, m_get_group, m_get_owner, + m_get_permissions, m_getpwnam, users + ) + home = homes[0] # /tmp/etc/ssh/authorized_keys = rsa - authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys', - dir="/tmp") - util.write_file(authorized_keys_global, VALID_CONTENT['rsa']) + authorized_keys_global = self.create_global_authorized_file( + 'etc/ssh/authorized_keys', 'rsa', keys + ) - # /tmp/sshd_config - sshd_config = self.tmp_path('sshd_config') - util.write_file( - sshd_config, - "AuthorizedKeysFile %s" % (authorized_keys_global) + options = "%s" % authorized_keys_global + sshd_config = self.create_sshd_config(options) + + default = "%s/.ssh/authorized_keys" % home + self.execute_and_check(user_bobby, sshd_config, default, keys) + + @patch("cloudinit.ssh_util.pwd.getpwnam") + @patch("cloudinit.util.get_permissions") + @patch("cloudinit.util.get_owner") + @patch("cloudinit.util.get_group") + def test_two_users_local_file_standard( + self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam + ): + keys = {} + users = {} + mock_permissions = { + '/tmp/home/bobby': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), + '/tmp/home/suzie': ('suzie', 'suzie', 0o700), + '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700), + '/tmp/home/suzie/.ssh/authorized_keys': ('suzie', 'suzie', 0o600), + } + + user_bobby = 'bobby' + user_suzie = 'suzie' + homes = self.create_fake_users( + [user_bobby, user_suzie], mock_permissions, m_get_group, + m_get_owner, m_get_permissions, m_getpwnam, users ) + home_bobby = homes[0] + home_suzie = homes[1] - (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( - fpw.pw_name, sshd_config) - content = ssh_util.update_authorized_keys(auth_key_entries, []) + # /tmp/home/bobby/.ssh/authorized_keys = rsa + authorized_keys = self.create_user_authorized_file( + home_bobby, 'authorized_keys', 'rsa', keys + ) - self.assertEqual("%s/.ssh/authorized_keys" % fpw.pw_dir, auth_key_fn) - self.assertTrue(VALID_CONTENT['rsa'] in content) + # /tmp/home/suzie/.ssh/authorized_keys = rsa + authorized_keys2 = self.create_user_authorized_file( + home_suzie, 'authorized_keys', 'ssh-xmss@openssh.com', keys + ) + + options = ".ssh/authorized_keys" + sshd_config = self.create_sshd_config(options) + + self.execute_and_check( + user_bobby, sshd_config, authorized_keys, keys, delete_keys=False + ) + self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys) @patch("cloudinit.ssh_util.pwd.getpwnam") - def test_multiple_authorizedkeys_file_multiuser(self, m_getpwnam): - fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home2/bobby') - m_getpwnam.return_value = fpw - user_ssh_folder = "%s/.ssh" % fpw.pw_dir - # /tmp/home2/bobby/.ssh/authorized_keys2 = rsa - authorized_keys = self.tmp_path('authorized_keys2', - dir=user_ssh_folder) - util.write_file(authorized_keys, VALID_CONTENT['rsa']) - # /tmp/home2/bobby/.ssh/user_keys3 = dsa - user_keys = self.tmp_path('user_keys3', dir=user_ssh_folder) - util.write_file(user_keys, VALID_CONTENT['dsa']) - - fpw2 = FakePwEnt(pw_name='suzie', pw_dir='/tmp/home/suzie') - user_ssh_folder = "%s/.ssh" % fpw2.pw_dir - # /tmp/home/suzie/.ssh/authorized_keys2 = ssh-xmss@openssh.com - authorized_keys2 = self.tmp_path('authorized_keys2', - dir=user_ssh_folder) - util.write_file(authorized_keys2, - VALID_CONTENT['ssh-xmss@openssh.com']) + @patch("cloudinit.util.get_permissions") + @patch("cloudinit.util.get_owner") + @patch("cloudinit.util.get_group") + def test_two_users_local_file_custom( + self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam + ): + keys = {} + users = {} + mock_permissions = { + '/tmp/home/bobby': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh/authorized_keys2': ('bobby', 'bobby', 0o600), + '/tmp/home/suzie': ('suzie', 'suzie', 0o700), + '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700), + '/tmp/home/suzie/.ssh/authorized_keys2': ('suzie', 'suzie', 0o600), + } + + user_bobby = 'bobby' + user_suzie = 'suzie' + homes = self.create_fake_users( + [user_bobby, user_suzie], mock_permissions, m_get_group, + m_get_owner, m_get_permissions, m_getpwnam, users + ) + home_bobby = homes[0] + home_suzie = homes[1] - # /tmp/etc/ssh/authorized_keys = ecdsa - authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys2', - dir="/tmp") - util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) + # /tmp/home/bobby/.ssh/authorized_keys2 = rsa + authorized_keys = self.create_user_authorized_file( + home_bobby, 'authorized_keys2', 'rsa', keys + ) - # /tmp/sshd_config - sshd_config = self.tmp_path('sshd_config', dir="/tmp") - util.write_file( - sshd_config, - "AuthorizedKeysFile %s %%h/.ssh/authorized_keys2 %s" % - (authorized_keys_global, user_keys) + # /tmp/home/suzie/.ssh/authorized_keys2 = rsa + authorized_keys2 = self.create_user_authorized_file( + home_suzie, 'authorized_keys2', 'ssh-xmss@openssh.com', keys ) - # process first user - (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( - fpw.pw_name, sshd_config) - content = ssh_util.update_authorized_keys(auth_key_entries, []) + options = ".ssh/authorized_keys2" + sshd_config = self.create_sshd_config(options) + + self.execute_and_check( + user_bobby, sshd_config, authorized_keys, keys, delete_keys=False + ) + self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys) - self.assertEqual(user_keys, auth_key_fn) - self.assertTrue(VALID_CONTENT['rsa'] in content) - self.assertTrue(VALID_CONTENT['ecdsa'] in content) - self.assertTrue(VALID_CONTENT['dsa'] in content) - self.assertFalse(VALID_CONTENT['ssh-xmss@openssh.com'] in content) + @patch("cloudinit.ssh_util.pwd.getpwnam") + @patch("cloudinit.util.get_permissions") + @patch("cloudinit.util.get_owner") + @patch("cloudinit.util.get_group") + def test_two_users_local_global_files( + self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam + ): + keys = {} + users = {} + mock_permissions = { + '/tmp/home/bobby': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh/authorized_keys2': ('bobby', 'bobby', 0o600), + '/tmp/home/bobby/.ssh/user_keys3': ('bobby', 'bobby', 0o600), + '/tmp/home/suzie': ('suzie', 'suzie', 0o700), + '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700), + '/tmp/home/suzie/.ssh/authorized_keys2': ('suzie', 'suzie', 0o600), + '/tmp/home/suzie/.ssh/user_keys3': ('suzie', 'suzie', 0o600), + } + + user_bobby = 'bobby' + user_suzie = 'suzie' + homes = self.create_fake_users( + [user_bobby, user_suzie], mock_permissions, m_get_group, + m_get_owner, m_get_permissions, m_getpwnam, users + ) + home_bobby = homes[0] + home_suzie = homes[1] - m_getpwnam.return_value = fpw2 - # process second user - (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( - fpw2.pw_name, sshd_config) - content = ssh_util.update_authorized_keys(auth_key_entries, []) + # /tmp/home/bobby/.ssh/authorized_keys2 = rsa + self.create_user_authorized_file( + home_bobby, 'authorized_keys2', 'rsa', keys + ) + # /tmp/home/bobby/.ssh/user_keys3 = dsa + user_keys = self.create_user_authorized_file( + home_bobby, 'user_keys3', 'dsa', keys + ) + + # /tmp/home/suzie/.ssh/authorized_keys2 = rsa + authorized_keys2 = self.create_user_authorized_file( + home_suzie, 'authorized_keys2', 'ssh-xmss@openssh.com', keys + ) + + # /tmp/etc/ssh/authorized_keys = ecdsa + authorized_keys_global = self.create_global_authorized_file( + 'etc/ssh/authorized_keys2', 'ecdsa', keys + ) + + options = "%s %s %%h/.ssh/authorized_keys2" % \ + (authorized_keys_global, user_keys) + sshd_config = self.create_sshd_config(options) - self.assertEqual(authorized_keys2, auth_key_fn) - self.assertTrue(VALID_CONTENT['ssh-xmss@openssh.com'] in content) - self.assertTrue(VALID_CONTENT['ecdsa'] in content) - self.assertTrue(VALID_CONTENT['dsa'] in content) - self.assertFalse(VALID_CONTENT['rsa'] in content) + self.execute_and_check( + user_bobby, sshd_config, user_keys, keys, delete_keys=False + ) + self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys) + @patch("cloudinit.util.get_user_groups") @patch("cloudinit.ssh_util.pwd.getpwnam") - def test_multiple_authorizedkeys_file_multiuser2(self, m_getpwnam): - fpw = FakePwEnt(pw_name='bobby', pw_dir='/tmp/home/bobby') - m_getpwnam.return_value = fpw - user_ssh_folder = "%s/.ssh" % fpw.pw_dir + @patch("cloudinit.util.get_permissions") + @patch("cloudinit.util.get_owner") + @patch("cloudinit.util.get_group") + def test_two_users_local_global_files_badguy( + self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam, + m_get_user_groups + ): + keys = {} + users = {} + mock_permissions = { + '/tmp/home/bobby': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh/authorized_keys2': ('bobby', 'bobby', 0o600), + '/tmp/home/bobby/.ssh/user_keys3': ('bobby', 'bobby', 0o600), + '/tmp/home/badguy': ('root', 'root', 0o755), + '/tmp/home/badguy/home': ('root', 'root', 0o755), + '/tmp/home/badguy/home/bobby': ('root', 'root', 0o655), + } + + user_bobby = 'bobby' + user_badguy = 'badguy' + home_bobby, *_ = self.create_fake_users( + [user_bobby, user_badguy], mock_permissions, m_get_group, + m_get_owner, m_get_permissions, m_getpwnam, users + ) + m_get_user_groups.side_effect = mock_get_user_groups + # /tmp/home/bobby/.ssh/authorized_keys2 = rsa - authorized_keys = self.tmp_path('authorized_keys2', - dir=user_ssh_folder) - util.write_file(authorized_keys, VALID_CONTENT['rsa']) + authorized_keys = self.create_user_authorized_file( + home_bobby, 'authorized_keys2', 'rsa', keys + ) # /tmp/home/bobby/.ssh/user_keys3 = dsa - user_keys = self.tmp_path('user_keys3', dir=user_ssh_folder) - util.write_file(user_keys, VALID_CONTENT['dsa']) + user_keys = self.create_user_authorized_file( + home_bobby, 'user_keys3', 'dsa', keys + ) - fpw2 = FakePwEnt(pw_name='badguy', pw_dir='/tmp/home/badguy') - user_ssh_folder = "%s/.ssh" % fpw2.pw_dir # /tmp/home/badguy/home/bobby = "" authorized_keys2 = self.tmp_path('home/bobby', dir="/tmp/home/badguy") + util.write_file(authorized_keys2, '') # /tmp/etc/ssh/authorized_keys = ecdsa - authorized_keys_global = self.tmp_path('etc/ssh/authorized_keys2', - dir="/tmp") - util.write_file(authorized_keys_global, VALID_CONTENT['ecdsa']) + authorized_keys_global = self.create_global_authorized_file( + 'etc/ssh/authorized_keys2', 'ecdsa', keys + ) # /tmp/sshd_config - sshd_config = self.tmp_path('sshd_config', dir="/tmp") - util.write_file( - sshd_config, - "AuthorizedKeysFile %s %%h/.ssh/authorized_keys2 %s %s" % - (authorized_keys_global, user_keys, authorized_keys2) + options = "%s %%h/.ssh/authorized_keys2 %s %s" % \ + (authorized_keys2, authorized_keys_global, user_keys) + sshd_config = self.create_sshd_config(options) + + self.execute_and_check( + user_bobby, sshd_config, authorized_keys, keys, delete_keys=False + ) + self.execute_and_check( + user_badguy, sshd_config, authorized_keys2, keys ) - # process first user - (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( - fpw.pw_name, sshd_config) - content = ssh_util.update_authorized_keys(auth_key_entries, []) + @patch("cloudinit.util.get_user_groups") + @patch("cloudinit.ssh_util.pwd.getpwnam") + @patch("cloudinit.util.get_permissions") + @patch("cloudinit.util.get_owner") + @patch("cloudinit.util.get_group") + def test_two_users_unaccessible_file( + self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam, + m_get_user_groups + ): + keys = {} + users = {} + mock_permissions = { + '/tmp/home/bobby': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), + + '/tmp/etc': ('root', 'root', 0o755), + '/tmp/etc/ssh': ('root', 'root', 0o755), + '/tmp/etc/ssh/userkeys': ('root', 'root', 0o700), + '/tmp/etc/ssh/userkeys/bobby': ('bobby', 'bobby', 0o600), + '/tmp/etc/ssh/userkeys/badguy': ('badguy', 'badguy', 0o600), + + '/tmp/home/badguy': ('badguy', 'badguy', 0o700), + '/tmp/home/badguy/.ssh': ('badguy', 'badguy', 0o700), + '/tmp/home/badguy/.ssh/authorized_keys': + ('badguy', 'badguy', 0o600), + } + + user_bobby = 'bobby' + user_badguy = 'badguy' + homes = self.create_fake_users( + [user_bobby, user_badguy], mock_permissions, m_get_group, + m_get_owner, m_get_permissions, m_getpwnam, users + ) + m_get_user_groups.side_effect = mock_get_user_groups + home_bobby = homes[0] + home_badguy = homes[1] - self.assertEqual(user_keys, auth_key_fn) - self.assertTrue(VALID_CONTENT['rsa'] in content) - self.assertTrue(VALID_CONTENT['ecdsa'] in content) - self.assertTrue(VALID_CONTENT['dsa'] in content) + # /tmp/home/bobby/.ssh/authorized_keys = rsa + authorized_keys = self.create_user_authorized_file( + home_bobby, 'authorized_keys', 'rsa', keys + ) + # /tmp/etc/ssh/userkeys/bobby = dsa + # assume here that we can bypass userkeys, despite permissions + self.create_global_authorized_file( + 'etc/ssh/userkeys/bobby', 'dsa', keys + ) - m_getpwnam.return_value = fpw2 - # process second user - (auth_key_fn, auth_key_entries) = ssh_util.extract_authorized_keys( - fpw2.pw_name, sshd_config) - content = ssh_util.update_authorized_keys(auth_key_entries, []) + # /tmp/home/badguy/.ssh/authorized_keys = ssh-xmss@openssh.com + authorized_keys2 = self.create_user_authorized_file( + home_badguy, 'authorized_keys', 'ssh-xmss@openssh.com', keys + ) - # badguy should not take the key from the other user! - self.assertEqual(authorized_keys2, auth_key_fn) - self.assertTrue(VALID_CONTENT['ecdsa'] in content) - self.assertTrue(VALID_CONTENT['dsa'] in content) - self.assertFalse(VALID_CONTENT['rsa'] in content) + # /tmp/etc/ssh/userkeys/badguy = ecdsa + self.create_global_authorized_file( + 'etc/ssh/userkeys/badguy', 'ecdsa', keys + ) + + # /tmp/sshd_config + options = "/tmp/etc/ssh/userkeys/%u .ssh/authorized_keys" + sshd_config = self.create_sshd_config(options) + + self.execute_and_check( + user_bobby, sshd_config, authorized_keys, keys, delete_keys=False + ) + self.execute_and_check( + user_badguy, sshd_config, authorized_keys2, keys + ) + + @patch("cloudinit.util.get_user_groups") + @patch("cloudinit.ssh_util.pwd.getpwnam") + @patch("cloudinit.util.get_permissions") + @patch("cloudinit.util.get_owner") + @patch("cloudinit.util.get_group") + def test_two_users_accessible_file( + self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam, + m_get_user_groups + ): + keys = {} + users = {} + mock_permissions = { + '/tmp/home/bobby': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), + + '/tmp/etc': ('root', 'root', 0o755), + '/tmp/etc/ssh': ('root', 'root', 0o755), + '/tmp/etc/ssh/userkeys': ('root', 'root', 0o755), + '/tmp/etc/ssh/userkeys/bobby': ('bobby', 'bobby', 0o600), + '/tmp/etc/ssh/userkeys/badguy': ('badguy', 'badguy', 0o600), + + '/tmp/home/badguy': ('badguy', 'badguy', 0o700), + '/tmp/home/badguy/.ssh': ('badguy', 'badguy', 0o700), + '/tmp/home/badguy/.ssh/authorized_keys': + ('badguy', 'badguy', 0o600), + } + + user_bobby = 'bobby' + user_badguy = 'badguy' + homes = self.create_fake_users( + [user_bobby, user_badguy], mock_permissions, m_get_group, + m_get_owner, m_get_permissions, m_getpwnam, users + ) + m_get_user_groups.side_effect = mock_get_user_groups + home_bobby = homes[0] + home_badguy = homes[1] + + # /tmp/home/bobby/.ssh/authorized_keys = rsa + self.create_user_authorized_file( + home_bobby, 'authorized_keys', 'rsa', keys + ) + # /tmp/etc/ssh/userkeys/bobby = dsa + # assume here that we can bypass userkeys, despite permissions + authorized_keys = self.create_global_authorized_file( + 'etc/ssh/userkeys/bobby', 'dsa', keys + ) + + # /tmp/home/badguy/.ssh/authorized_keys = ssh-xmss@openssh.com + self.create_user_authorized_file( + home_badguy, 'authorized_keys', 'ssh-xmss@openssh.com', keys + ) + + # /tmp/etc/ssh/userkeys/badguy = ecdsa + authorized_keys2 = self.create_global_authorized_file( + 'etc/ssh/userkeys/badguy', 'ecdsa', keys + ) + + # /tmp/sshd_config + options = "/tmp/etc/ssh/userkeys/%u .ssh/authorized_keys" + sshd_config = self.create_sshd_config(options) + + self.execute_and_check( + user_bobby, sshd_config, authorized_keys, keys, delete_keys=False + ) + self.execute_and_check( + user_badguy, sshd_config, authorized_keys2, keys + ) + + @patch("cloudinit.util.get_user_groups") + @patch("cloudinit.ssh_util.pwd.getpwnam") + @patch("cloudinit.util.get_permissions") + @patch("cloudinit.util.get_owner") + @patch("cloudinit.util.get_group") + def test_two_users_hardcoded_single_user_file( + self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam, + m_get_user_groups + ): + keys = {} + users = {} + mock_permissions = { + '/tmp/home/bobby': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), + + '/tmp/home/suzie': ('suzie', 'suzie', 0o700), + '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700), + '/tmp/home/suzie/.ssh/authorized_keys': ('suzie', 'suzie', 0o600), + } + + user_bobby = 'bobby' + user_suzie = 'suzie' + homes = self.create_fake_users( + [user_bobby, user_suzie], mock_permissions, m_get_group, + m_get_owner, m_get_permissions, m_getpwnam, users + ) + home_bobby = homes[0] + home_suzie = homes[1] + m_get_user_groups.side_effect = mock_get_user_groups + + # /tmp/home/bobby/.ssh/authorized_keys = rsa + authorized_keys = self.create_user_authorized_file( + home_bobby, 'authorized_keys', 'rsa', keys + ) + + # /tmp/home/suzie/.ssh/authorized_keys = ssh-xmss@openssh.com + self.create_user_authorized_file( + home_suzie, 'authorized_keys', 'ssh-xmss@openssh.com', keys + ) + + # /tmp/sshd_config + options = "%s" % (authorized_keys) + sshd_config = self.create_sshd_config(options) + + self.execute_and_check( + user_bobby, sshd_config, authorized_keys, keys, delete_keys=False + ) + default = "%s/.ssh/authorized_keys" % home_suzie + self.execute_and_check(user_suzie, sshd_config, default, keys) + + @patch("cloudinit.util.get_user_groups") + @patch("cloudinit.ssh_util.pwd.getpwnam") + @patch("cloudinit.util.get_permissions") + @patch("cloudinit.util.get_owner") + @patch("cloudinit.util.get_group") + def test_two_users_hardcoded_single_user_file_inverted( + self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam, + m_get_user_groups + ): + keys = {} + users = {} + mock_permissions = { + '/tmp/home/bobby': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), + + '/tmp/home/suzie': ('suzie', 'suzie', 0o700), + '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700), + '/tmp/home/suzie/.ssh/authorized_keys': ('suzie', 'suzie', 0o600), + } + + user_bobby = 'bobby' + user_suzie = 'suzie' + homes = self.create_fake_users( + [user_bobby, user_suzie], mock_permissions, m_get_group, + m_get_owner, m_get_permissions, m_getpwnam, users + ) + home_bobby = homes[0] + home_suzie = homes[1] + m_get_user_groups.side_effect = mock_get_user_groups + + # /tmp/home/bobby/.ssh/authorized_keys = rsa + self.create_user_authorized_file( + home_bobby, 'authorized_keys', 'rsa', keys + ) + + # /tmp/home/suzie/.ssh/authorized_keys = ssh-xmss@openssh.com + authorized_keys2 = self.create_user_authorized_file( + home_suzie, 'authorized_keys', 'ssh-xmss@openssh.com', keys + ) + + # /tmp/sshd_config + options = "%s" % (authorized_keys2) + sshd_config = self.create_sshd_config(options) + + default = "%s/.ssh/authorized_keys" % home_bobby + self.execute_and_check( + user_bobby, sshd_config, default, keys, delete_keys=False + ) + self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys) + + @patch("cloudinit.util.get_user_groups") + @patch("cloudinit.ssh_util.pwd.getpwnam") + @patch("cloudinit.util.get_permissions") + @patch("cloudinit.util.get_owner") + @patch("cloudinit.util.get_group") + def test_two_users_hardcoded_user_files( + self, m_get_group, m_get_owner, m_get_permissions, m_getpwnam, + m_get_user_groups + ): + keys = {} + users = {} + mock_permissions = { + '/tmp/home/bobby': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh': ('bobby', 'bobby', 0o700), + '/tmp/home/bobby/.ssh/authorized_keys': ('bobby', 'bobby', 0o600), + + '/tmp/home/suzie': ('suzie', 'suzie', 0o700), + '/tmp/home/suzie/.ssh': ('suzie', 'suzie', 0o700), + '/tmp/home/suzie/.ssh/authorized_keys': ('suzie', 'suzie', 0o600), + } + + user_bobby = 'bobby' + user_suzie = 'suzie' + homes = self.create_fake_users( + [user_bobby, user_suzie], mock_permissions, m_get_group, + m_get_owner, m_get_permissions, m_getpwnam, users + ) + home_bobby = homes[0] + home_suzie = homes[1] + m_get_user_groups.side_effect = mock_get_user_groups + + # /tmp/home/bobby/.ssh/authorized_keys = rsa + authorized_keys = self.create_user_authorized_file( + home_bobby, 'authorized_keys', 'rsa', keys + ) + + # /tmp/home/suzie/.ssh/authorized_keys = ssh-xmss@openssh.com + authorized_keys2 = self.create_user_authorized_file( + home_suzie, 'authorized_keys', 'ssh-xmss@openssh.com', keys + ) + + # /tmp/etc/ssh/authorized_keys = ecdsa + authorized_keys_global = self.create_global_authorized_file( + 'etc/ssh/authorized_keys', 'ecdsa', keys + ) + + # /tmp/sshd_config + options = "%s %s %s" % \ + (authorized_keys_global, authorized_keys, authorized_keys2) + sshd_config = self.create_sshd_config(options) + + self.execute_and_check( + user_bobby, sshd_config, authorized_keys, keys, delete_keys=False + ) + self.execute_and_check(user_suzie, sshd_config, authorized_keys2, keys) # vi: ts=4 expandtab -- cgit v1.2.3 From 049d62b658b06e729291def6b7b6f9520827d0ba Mon Sep 17 00:00:00 2001 From: sshedi <53473811+sshedi@users.noreply.github.com> Date: Mon, 9 Aug 2021 22:19:13 +0530 Subject: photon: refactor hostname handling and add networkd activator (#958) --- cloudinit/distros/photon.py | 56 ++++++++++------------ cloudinit/net/activators.py | 27 +++++++++++ cloudinit/net/networkd.py | 2 +- tests/unittests/test_distros/test_photon.py | 23 +++++++-- .../test_handler/test_handler_set_hostname.py | 26 +++++++--- tests/unittests/test_net_activators.py | 27 ++++++++++- 6 files changed, 117 insertions(+), 44 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/distros/photon.py b/cloudinit/distros/photon.py index 61e270c0..4ff90ea6 100644 --- a/cloudinit/distros/photon.py +++ b/cloudinit/distros/photon.py @@ -13,13 +13,12 @@ from cloudinit import helpers from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE from cloudinit.distros import rhel_util as rhutil -from cloudinit.distros.parsers.hostname import HostnameConf LOG = logging.getLogger(__name__) class Distro(distros.Distro): - hostname_conf_fn = '/etc/hostname' + systemd_hostname_conf_fn = '/etc/hostname' network_conf_dir = '/etc/systemd/network/' systemd_locale_conf_fn = '/etc/locale.conf' resolve_conf_fn = '/etc/systemd/resolved.conf' @@ -43,17 +42,18 @@ class Distro(distros.Distro): self.osfamily = 'photon' self.init_cmd = ['systemctl'] - def exec_cmd(self, cmd, capture=False): + def exec_cmd(self, cmd, capture=True): LOG.debug('Attempting to run: %s', cmd) try: (out, err) = subp.subp(cmd, capture=capture) if err: LOG.warning('Running %s resulted in stderr output: %s', cmd, err) - return True, out, err + return True, out, err + return False, out, err except subp.ProcessExecutionError: util.logexc(LOG, 'Command %s failed', cmd) - return False, None, None + return True, None, None def generate_fallback_config(self): key = 'disable_fallback_netcfg' @@ -85,41 +85,32 @@ class Distro(distros.Distro): # For locale change to take effect, reboot is needed or we can restart # systemd-localed. This is equivalent of localectl cmd = ['systemctl', 'restart', 'systemd-localed'] - _ret, _out, _err = self.exec_cmd(cmd) + self.exec_cmd(cmd) def install_packages(self, pkglist): # self.update_package_sources() self.package_command('install', pkgs=pkglist) - def _bring_up_interfaces(self, device_names): - cmd = ['systemctl', 'restart', 'systemd-networkd', 'systemd-resolved'] - LOG.debug('Attempting to run bring up interfaces using command %s', - cmd) - ret, _out, _err = self.exec_cmd(cmd) - return ret - def _write_hostname(self, hostname, filename): - conf = None - try: - # Try to update the previous one - # Let's see if we can read it first. - conf = HostnameConf(util.load_file(filename)) - conf.parse() - except IOError: - pass - if not conf: - conf = HostnameConf('') - conf.set_hostname(hostname) - util.write_file(filename, str(conf), mode=0o644) + if filename and filename.endswith('/previous-hostname'): + util.write_file(filename, hostname) + else: + ret, _out, err = self.exec_cmd(['hostnamectl', 'set-hostname', + str(hostname)]) + if ret: + LOG.warning(('Error while setting hostname: %s\n' + 'Given hostname: %s', err, hostname)) def _read_system_hostname(self): - sys_hostname = self._read_hostname(self.hostname_conf_fn) - return (self.hostname_conf_fn, sys_hostname) + sys_hostname = self._read_hostname(self.systemd_hostname_conf_fn) + return (self.systemd_hostname_conf_fn, sys_hostname) def _read_hostname(self, filename, default=None): - _ret, out, _err = self.exec_cmd(['hostname']) + if filename and filename.endswith('/previous-hostname'): + return util.load_file(filename).strip() - return out if out else default + _ret, out, _err = self.exec_cmd(['hostname', '-f']) + return out.strip() if out else default def _get_localhost_ip(self): return '127.0.1.1' @@ -128,7 +119,7 @@ class Distro(distros.Distro): distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) def package_command(self, command, args=None, pkgs=None): - if pkgs is None: + if not pkgs: pkgs = [] cmd = ['tdnf', '-y'] @@ -142,8 +133,9 @@ class Distro(distros.Distro): pkglist = util.expand_package_list('%s-%s', pkgs) cmd.extend(pkglist) - # Allow the output of this to flow outwards (ie not be captured) - _ret, _out, _err = self.exec_cmd(cmd, capture=False) + ret, _out, err = self.exec_cmd(cmd) + if ret: + LOG.error('Error while installing packages: %s', err) def update_package_sources(self): self._runner.run('update-sources', self.package_command, diff --git a/cloudinit/net/activators.py b/cloudinit/net/activators.py index 84aaafc9..11149548 100644 --- a/cloudinit/net/activators.py +++ b/cloudinit/net/activators.py @@ -8,6 +8,7 @@ from cloudinit import subp from cloudinit import util from cloudinit.net.eni import available as eni_available from cloudinit.net.netplan import available as netplan_available +from cloudinit.net.networkd import available as networkd_available from cloudinit.net.network_state import NetworkState from cloudinit.net.sysconfig import NM_CFG_FILE @@ -213,12 +214,38 @@ class NetplanActivator(NetworkActivator): return _alter_interface(NetplanActivator.NETPLAN_CMD, 'all') +class NetworkdActivator(NetworkActivator): + @staticmethod + def available(target=None) -> bool: + """Return true if ifupdown can be used on this system.""" + return networkd_available(target=target) + + @staticmethod + def bring_up_interface(device_name: str) -> bool: + """ Return True is successful, otherwise return False """ + cmd = ['ip', 'link', 'set', 'up', device_name] + return _alter_interface(cmd, device_name) + + @staticmethod + def bring_up_all_interfaces(network_state: NetworkState) -> bool: + """ Return True is successful, otherwise return False """ + cmd = ['systemctl', 'restart', 'systemd-networkd', 'systemd-resolved'] + return _alter_interface(cmd, 'all') + + @staticmethod + def bring_down_interface(device_name: str) -> bool: + """ Return True is successful, otherwise return False """ + cmd = ['ip', 'link', 'set', 'down', device_name] + return _alter_interface(cmd, device_name) + + # This section is mostly copied and pasted from renderers.py. An abstract # version to encompass both seems overkill at this point DEFAULT_PRIORITY = [ IfUpDownActivator, NetworkManagerActivator, NetplanActivator, + NetworkdActivator, ] diff --git a/cloudinit/net/networkd.py b/cloudinit/net/networkd.py index 2dffce59..63e3a07f 100644 --- a/cloudinit/net/networkd.py +++ b/cloudinit/net/networkd.py @@ -246,7 +246,7 @@ class Renderer(renderer.Renderer): def available(target=None): - expected = ['systemctl'] + expected = ['ip', 'systemctl'] search = ['/usr/bin', '/bin'] for p in expected: if not subp.which(p, search=search, target=target): diff --git a/tests/unittests/test_distros/test_photon.py b/tests/unittests/test_distros/test_photon.py index 775f37ac..1c3145ca 100644 --- a/tests/unittests/test_distros/test_photon.py +++ b/tests/unittests/test_distros/test_photon.py @@ -25,11 +25,28 @@ class TestPhoton(CiTestCase): def test_get_distro(self): self.assertEqual(self.distro.osfamily, 'photon') - def test_write_hostname(self): + @mock.patch("cloudinit.distros.photon.subp.subp") + def test_write_hostname(self, m_subp): hostname = 'myhostname' - hostfile = self.tmp_path('hostfile') + hostfile = self.tmp_path('previous-hostname') self.distro._write_hostname(hostname, hostfile) - self.assertEqual(hostname + '\n', util.load_file(hostfile)) + self.assertEqual(hostname, util.load_file(hostfile)) + + ret = self.distro._read_hostname(hostfile) + self.assertEqual(ret, hostname) + + m_subp.return_value = (None, None) + hostfile += 'hostfile' + self.distro._write_hostname(hostname, hostfile) + + m_subp.return_value = (hostname, None) + ret = self.distro._read_hostname(hostfile) + self.assertEqual(ret, hostname) + + self.logs.truncate(0) + m_subp.return_value = (None, 'bla') + self.distro._write_hostname(hostname, None) + self.assertIn('Error while setting hostname', self.logs.getvalue()) @mock.patch('cloudinit.net.generate_fallback_config') def test_fallback_netcfg(self, m_fallback_cfg): diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/test_handler/test_handler_set_hostname.py index 32ca3b7e..1a524c7d 100644 --- a/tests/unittests/test_handler/test_handler_set_hostname.py +++ b/tests/unittests/test_handler/test_handler_set_hostname.py @@ -120,8 +120,8 @@ class TestHostname(t_help.FilesystemMockingTestCase): contents = util.load_file(distro.hostname_conf_fn) self.assertEqual('blah', contents.strip()) - @mock.patch('cloudinit.distros.Distro.uses_systemd', return_value=False) - def test_photon_hostname(self, m_uses_systemd): + @mock.patch('cloudinit.distros.photon.subp.subp') + def test_photon_hostname(self, m_subp): cfg1 = { 'hostname': 'photon', 'prefer_fqdn_over_hostname': True, @@ -134,17 +134,31 @@ class TestHostname(t_help.FilesystemMockingTestCase): } ds = None + m_subp.return_value = (None, None) distro = self._fetch_distro('photon', cfg1) paths = helpers.Paths({'cloud_dir': self.tmp}) cc = cloud.Cloud(ds, paths, {}, distro, None) - self.patchUtils(self.tmp) for c in [cfg1, cfg2]: cc_set_hostname.handle('cc_set_hostname', c, cc, LOG, []) - contents = util.load_file(distro.hostname_conf_fn, decode=True) + print("\n", m_subp.call_args_list) if c['prefer_fqdn_over_hostname']: - self.assertEqual(contents.strip(), c['fqdn']) + assert [ + mock.call(['hostnamectl', 'set-hostname', c['fqdn']], + capture=True) + ] in m_subp.call_args_list + assert [ + mock.call(['hostnamectl', 'set-hostname', c['hostname']], + capture=True) + ] not in m_subp.call_args_list else: - self.assertEqual(contents.strip(), c['hostname']) + assert [ + mock.call(['hostnamectl', 'set-hostname', c['hostname']], + capture=True) + ] in m_subp.call_args_list + assert [ + mock.call(['hostnamectl', 'set-hostname', c['fqdn']], + capture=True) + ] not in m_subp.call_args_list def test_multiple_calls_skips_unchanged_hostname(self): """Only new hostname or fqdn values will generate a hostname call.""" diff --git a/tests/unittests/test_net_activators.py b/tests/unittests/test_net_activators.py index db825c35..38f2edf2 100644 --- a/tests/unittests/test_net_activators.py +++ b/tests/unittests/test_net_activators.py @@ -11,7 +11,8 @@ from cloudinit.net.activators import ( from cloudinit.net.activators import ( IfUpDownActivator, NetplanActivator, - NetworkManagerActivator + NetworkManagerActivator, + NetworkdActivator ) from cloudinit.net.network_state import parse_net_config_data from cloudinit.safeyaml import load @@ -116,11 +117,17 @@ NETWORK_MANAGER_AVAILABLE_CALLS = [ (('nmcli',), {'target': None}), ] +NETWORKD_AVAILABLE_CALLS = [ + (('ip',), {'search': ['/usr/bin', '/bin'], 'target': None}), + (('systemctl',), {'search': ['/usr/bin', '/bin'], 'target': None}), +] + @pytest.mark.parametrize('activator, available_calls', [ (IfUpDownActivator, IF_UP_DOWN_AVAILABLE_CALLS), (NetplanActivator, NETPLAN_AVAILABLE_CALLS), (NetworkManagerActivator, NETWORK_MANAGER_AVAILABLE_CALLS), + (NetworkdActivator, NETWORKD_AVAILABLE_CALLS), ]) class TestActivatorsAvailable: def test_available( @@ -140,11 +147,18 @@ NETWORK_MANAGER_BRING_UP_CALL_LIST = [ ((['nmcli', 'connection', 'up', 'ifname', 'eth1'], ), {}), ] +NETWORKD_BRING_UP_CALL_LIST = [ + ((['ip', 'link', 'set', 'up', 'eth0'], ), {}), + ((['ip', 'link', 'set', 'up', 'eth1'], ), {}), + ((['systemctl', 'restart', 'systemd-networkd', 'systemd-resolved'], ), {}), +] + @pytest.mark.parametrize('activator, expected_call_list', [ (IfUpDownActivator, IF_UP_DOWN_BRING_UP_CALL_LIST), (NetplanActivator, NETPLAN_CALL_LIST), (NetworkManagerActivator, NETWORK_MANAGER_BRING_UP_CALL_LIST), + (NetworkdActivator, NETWORKD_BRING_UP_CALL_LIST), ]) class TestActivatorsBringUp: @patch('cloudinit.subp.subp', return_value=('', '')) @@ -159,8 +173,11 @@ class TestActivatorsBringUp: def test_bring_up_interfaces( self, m_subp, activator, expected_call_list, available_mocks ): + index = 0 activator.bring_up_interfaces(['eth0', 'eth1']) - assert expected_call_list == m_subp.call_args_list + for call in m_subp.call_args_list: + assert call == expected_call_list[index] + index += 1 @patch('cloudinit.subp.subp', return_value=('', '')) def test_bring_up_all_interfaces_v1( @@ -191,11 +208,17 @@ NETWORK_MANAGER_BRING_DOWN_CALL_LIST = [ ((['nmcli', 'connection', 'down', 'eth1'], ), {}), ] +NETWORKD_BRING_DOWN_CALL_LIST = [ + ((['ip', 'link', 'set', 'down', 'eth0'], ), {}), + ((['ip', 'link', 'set', 'down', 'eth1'], ), {}), +] + @pytest.mark.parametrize('activator, expected_call_list', [ (IfUpDownActivator, IF_UP_DOWN_BRING_DOWN_CALL_LIST), (NetplanActivator, NETPLAN_CALL_LIST), (NetworkManagerActivator, NETWORK_MANAGER_BRING_DOWN_CALL_LIST), + (NetworkdActivator, NETWORKD_BRING_DOWN_CALL_LIST), ]) class TestActivatorsBringDown: @patch('cloudinit.subp.subp', return_value=('', '')) -- cgit v1.2.3 From 8b4a9bc7b81e61943af873bad92e2133f8275b0b Mon Sep 17 00:00:00 2001 From: Andrew Kutz <101085+akutz@users.noreply.github.com> Date: Mon, 9 Aug 2021 21:24:07 -0500 Subject: Datasource for VMware (#953) This patch finally introduces the Cloud-Init Datasource for VMware GuestInfo as a part of cloud-init proper. This datasource has existed since 2018, and rapidly became the de facto datasource for developers working with Packer, Terraform, for projects like kube-image-builder, and the de jure datasource for Photon OS. The major change to the datasource from its previous incarnation is the name. Now named DatasourceVMware, this new version of the datasource will allow multiple transport types in addition to GuestInfo keys. This datasource includes several unique features developed to address real-world situations: * Support for reading any key (metadata, userdata, vendordata) both from the guestinfo table when running on a VM in vSphere as well as from an environment variable when running inside of a container, useful for rapid dev/test. * Allows booting with DHCP while still providing full participation in Cloud-Init instance data and Jinja queries. The netifaces library provides the ability to inspect the network after it is online, and the runtime network configuration is then merged into the existing metadata and persisted to disk. * Advertises the local_ipv4 and local_ipv6 addresses via guestinfo as well. This is useful as Guest Tools is not always able to identify what would be considered the local address. The primary author and current steward of this datasource spoke at Cloud-Init Con 2020 where there was interest in contributing this datasource to the Cloud-Init codebase. The datasource currently lives in its own GitHub repository at https://github.com/vmware/cloud-init-vmware-guestinfo. Once the datasource is merged into Cloud-Init, the old repository will be deprecated. --- README.md | 2 +- cloudinit/settings.py | 1 + cloudinit/sources/DataSourceVMware.py | 871 +++++++++++++++++++++++++ doc/rtd/topics/availability.rst | 1 + doc/rtd/topics/datasources.rst | 1 + doc/rtd/topics/datasources/vmware.rst | 359 ++++++++++ requirements.txt | 9 + tests/unittests/test_datasource/test_common.py | 3 + tests/unittests/test_datasource/test_vmware.py | 377 +++++++++++ tests/unittests/test_ds_identify.py | 279 +++++++- tools/.github-cla-signers | 1 + tools/ds-identify | 76 ++- 12 files changed, 1977 insertions(+), 3 deletions(-) create mode 100644 cloudinit/sources/DataSourceVMware.py create mode 100644 doc/rtd/topics/datasources/vmware.rst create mode 100644 tests/unittests/test_datasource/test_vmware.py (limited to 'cloudinit') diff --git a/README.md b/README.md index caf9a6e9..5828c2fa 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ get in contact with that distribution and send them our way! | Supported OSes | Supported Public Clouds | Supported Private Clouds | | --- | --- | --- | -| Alpine Linux
ArchLinux
Debian
DragonFlyBSD
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo/EuroLinux
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
DigitalOcean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| +| Alpine Linux
ArchLinux
Debian
DragonFlyBSD
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo/EuroLinux
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
DigitalOcean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)
VMware















| ## To start developing cloud-init diff --git a/cloudinit/settings.py b/cloudinit/settings.py index 23e4c0ad..f69005ea 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -43,6 +43,7 @@ CFG_BUILTIN = { 'Exoscale', 'RbxCloud', 'UpCloud', + 'VMware', # At the end to act as a 'catch' when none of the above work... 'None', ], diff --git a/cloudinit/sources/DataSourceVMware.py b/cloudinit/sources/DataSourceVMware.py new file mode 100644 index 00000000..22ca63de --- /dev/null +++ b/cloudinit/sources/DataSourceVMware.py @@ -0,0 +1,871 @@ +# Cloud-Init DataSource for VMware +# +# Copyright (c) 2018-2021 VMware, Inc. All Rights Reserved. +# +# Authors: Anish Swaminathan +# Andrew Kutz +# +# This file is part of cloud-init. See LICENSE file for license information. + +"""Cloud-Init DataSource for VMware + +This module provides a cloud-init datasource for VMware systems and supports +multiple transports types, including: + + * EnvVars + * GuestInfo + +Netifaces (https://github.com/al45tair/netifaces) + + Please note this module relies on the netifaces project to introspect the + runtime, network configuration of the host on which this datasource is + running. This is in contrast to the rest of cloud-init which uses the + cloudinit/netinfo module. + + The reasons for using netifaces include: + + * Netifaces is built in C and is more portable across multiple systems + and more deterministic than shell exec'ing local network commands and + parsing their output. + + * Netifaces provides a stable way to determine the view of the host's + network after DHCP has brought the network online. Unlike most other + datasources, this datasource still provides support for JINJA queries + based on networking information even when the network is based on a + DHCP lease. While this does not tie this datasource directly to + netifaces, it does mean the ability to consistently obtain the + correct information is paramount. + + * It is currently possible to execute this datasource on macOS + (which many developers use today) to print the output of the + get_host_info function. This function calls netifaces to obtain + the same runtime network configuration that the datasource would + persist to the local system's instance data. + + However, the netinfo module fails on macOS. The result is either a + hung operation that requires a SIGINT to return control to the user, + or, if brew is used to install iproute2mac, the ip commands are used + but produce output the netinfo module is unable to parse. + + While macOS is not a target of cloud-init, this feature is quite + useful when working on this datasource. + + For more information about this behavior, please see the following + PR comment, https://bit.ly/3fG7OVh. + + The authors of this datasource are not opposed to moving away from + netifaces. The goal may be to eventually do just that. This proviso was + added to the top of this module as a way to remind future-us and others + why netifaces was used in the first place in order to either smooth the + transition away from netifaces or embrace it further up the cloud-init + stack. +""" + +import collections +import copy +from distutils.spawn import find_executable +import ipaddress +import json +import os +import socket +import time + +from cloudinit import dmi, log as logging +from cloudinit import sources +from cloudinit import util +from cloudinit.subp import subp, ProcessExecutionError + +import netifaces + + +PRODUCT_UUID_FILE_PATH = "/sys/class/dmi/id/product_uuid" + +LOG = logging.getLogger(__name__) +NOVAL = "No value found" + +DATA_ACCESS_METHOD_ENVVAR = "envvar" +DATA_ACCESS_METHOD_GUESTINFO = "guestinfo" + +VMWARE_RPCTOOL = find_executable("vmware-rpctool") +REDACT = "redact" +CLEANUP_GUESTINFO = "cleanup-guestinfo" +VMX_GUESTINFO = "VMX_GUESTINFO" +GUESTINFO_EMPTY_YAML_VAL = "---" + +LOCAL_IPV4 = "local-ipv4" +LOCAL_IPV6 = "local-ipv6" +WAIT_ON_NETWORK = "wait-on-network" +WAIT_ON_NETWORK_IPV4 = "ipv4" +WAIT_ON_NETWORK_IPV6 = "ipv6" + + +class DataSourceVMware(sources.DataSource): + """ + Setting the hostname: + The hostname is set by way of the metadata key "local-hostname". + + Setting the instance ID: + The instance ID may be set by way of the metadata key "instance-id". + However, if this value is absent then the instance ID is read + from the file /sys/class/dmi/id/product_uuid. + + Configuring the network: + The network is configured by setting the metadata key "network" + with a value consistent with Network Config Versions 1 or 2, + depending on the Linux distro's version of cloud-init: + + Network Config Version 1 - http://bit.ly/cloudinit-net-conf-v1 + Network Config Version 2 - http://bit.ly/cloudinit-net-conf-v2 + + For example, CentOS 7's official cloud-init package is version + 0.7.9 and does not support Network Config Version 2. However, + this datasource still supports supplying Network Config Version 2 + data as long as the Linux distro's cloud-init package is new + enough to parse the data. + + The metadata key "network.encoding" may be used to indicate the + format of the metadata key "network". Valid encodings are base64 + and gzip+base64. + """ + + dsname = "VMware" + + def __init__(self, sys_cfg, distro, paths, ud_proc=None): + sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc) + + self.data_access_method = None + self.vmware_rpctool = VMWARE_RPCTOOL + + def _get_data(self): + """ + _get_data loads the metadata, userdata, and vendordata from one of + the following locations in the given order: + + * envvars + * guestinfo + + Please note when updating this function with support for new data + transports, the order should match the order in the dscheck_VMware + function from the file ds-identify. + """ + + # Initialize the locally scoped metadata, userdata, and vendordata + # variables. They are assigned below depending on the detected data + # access method. + md, ud, vd = None, None, None + + # First check to see if there is data via env vars. + if os.environ.get(VMX_GUESTINFO, ""): + md = guestinfo_envvar("metadata") + ud = guestinfo_envvar("userdata") + vd = guestinfo_envvar("vendordata") + + if md or ud or vd: + self.data_access_method = DATA_ACCESS_METHOD_ENVVAR + + # At this point, all additional data transports are valid only on + # a VMware platform. + if not self.data_access_method: + system_type = dmi.read_dmi_data("system-product-name") + if system_type is None: + LOG.debug("No system-product-name found") + return False + if "vmware" not in system_type.lower(): + LOG.debug("Not a VMware platform") + return False + + # If no data was detected, check the guestinfo transport next. + if not self.data_access_method: + if self.vmware_rpctool: + md = guestinfo("metadata", self.vmware_rpctool) + ud = guestinfo("userdata", self.vmware_rpctool) + vd = guestinfo("vendordata", self.vmware_rpctool) + + if md or ud or vd: + self.data_access_method = DATA_ACCESS_METHOD_GUESTINFO + + if not self.data_access_method: + LOG.error("failed to find a valid data access method") + return False + + LOG.info("using data access method %s", self._get_subplatform()) + + # Get the metadata. + self.metadata = process_metadata(load_json_or_yaml(md)) + + # Get the user data. + self.userdata_raw = ud + + # Get the vendor data. + self.vendordata_raw = vd + + # Redact any sensitive information. + self.redact_keys() + + # get_data returns true if there is any available metadata, + # userdata, or vendordata. + if self.metadata or self.userdata_raw or self.vendordata_raw: + return True + else: + return False + + def setup(self, is_new_instance): + """setup(is_new_instance) + + This is called before user-data and vendor-data have been processed. + + Unless the datasource has set mode to 'local', then networking + per 'fallback' or per 'network_config' will have been written and + brought up the OS at this point. + """ + + host_info = wait_on_network(self.metadata) + LOG.info("got host-info: %s", host_info) + + # Reflect any possible local IPv4 or IPv6 addresses in the guest + # info. + advertise_local_ip_addrs(host_info) + + # Ensure the metadata gets updated with information about the + # host, including the network interfaces, default IP addresses, + # etc. + self.metadata = util.mergemanydict([self.metadata, host_info]) + + # Persist the instance data for versions of cloud-init that support + # doing so. This occurs here rather than in the get_data call in + # order to ensure that the network interfaces are up and can be + # persisted with the metadata. + self.persist_instance_data() + + def _get_subplatform(self): + get_key_name_fn = None + if self.data_access_method == DATA_ACCESS_METHOD_ENVVAR: + get_key_name_fn = get_guestinfo_envvar_key_name + elif self.data_access_method == DATA_ACCESS_METHOD_GUESTINFO: + get_key_name_fn = get_guestinfo_key_name + else: + return sources.METADATA_UNKNOWN + + return "%s (%s)" % ( + self.data_access_method, + get_key_name_fn("metadata"), + ) + + @property + def network_config(self): + if "network" in self.metadata: + LOG.debug("using metadata network config") + else: + LOG.debug("using fallback network config") + self.metadata["network"] = { + "config": self.distro.generate_fallback_config(), + } + return self.metadata["network"]["config"] + + def get_instance_id(self): + # Pull the instance ID out of the metadata if present. Otherwise + # read the file /sys/class/dmi/id/product_uuid for the instance ID. + if self.metadata and "instance-id" in self.metadata: + return self.metadata["instance-id"] + with open(PRODUCT_UUID_FILE_PATH, "r") as id_file: + self.metadata["instance-id"] = str(id_file.read()).rstrip().lower() + return self.metadata["instance-id"] + + def get_public_ssh_keys(self): + for key_name in ( + "public-keys-data", + "public_keys_data", + "public-keys", + "public_keys", + ): + if key_name in self.metadata: + return sources.normalize_pubkey_data(self.metadata[key_name]) + return [] + + def redact_keys(self): + # Determine if there are any keys to redact. + keys_to_redact = None + if REDACT in self.metadata: + keys_to_redact = self.metadata[REDACT] + elif CLEANUP_GUESTINFO in self.metadata: + # This is for backwards compatibility. + keys_to_redact = self.metadata[CLEANUP_GUESTINFO] + + if self.data_access_method == DATA_ACCESS_METHOD_GUESTINFO: + guestinfo_redact_keys(keys_to_redact, self.vmware_rpctool) + + +def decode(key, enc_type, data): + """ + decode returns the decoded string value of data + key is a string used to identify the data being decoded in log messages + """ + LOG.debug("Getting encoded data for key=%s, enc=%s", key, enc_type) + + raw_data = None + if enc_type in ["gzip+base64", "gz+b64"]: + LOG.debug("Decoding %s format %s", enc_type, key) + raw_data = util.decomp_gzip(util.b64d(data)) + elif enc_type in ["base64", "b64"]: + LOG.debug("Decoding %s format %s", enc_type, key) + raw_data = util.b64d(data) + else: + LOG.debug("Plain-text data %s", key) + raw_data = data + + return util.decode_binary(raw_data) + + +def get_none_if_empty_val(val): + """ + get_none_if_empty_val returns None if the provided value, once stripped + of its trailing whitespace, is empty or equal to GUESTINFO_EMPTY_YAML_VAL. + + The return value is always a string, regardless of whether the input is + a bytes class or a string. + """ + + # If the provided value is a bytes class, convert it to a string to + # simplify the rest of this function's logic. + val = util.decode_binary(val) + val = val.rstrip() + if len(val) == 0 or val == GUESTINFO_EMPTY_YAML_VAL: + return None + return val + + +def advertise_local_ip_addrs(host_info): + """ + advertise_local_ip_addrs gets the local IP address information from + the provided host_info map and sets the addresses in the guestinfo + namespace + """ + if not host_info: + return + + # Reflect any possible local IPv4 or IPv6 addresses in the guest + # info. + local_ipv4 = host_info.get(LOCAL_IPV4) + if local_ipv4: + guestinfo_set_value(LOCAL_IPV4, local_ipv4) + LOG.info("advertised local ipv4 address %s in guestinfo", local_ipv4) + + local_ipv6 = host_info.get(LOCAL_IPV6) + if local_ipv6: + guestinfo_set_value(LOCAL_IPV6, local_ipv6) + LOG.info("advertised local ipv6 address %s in guestinfo", local_ipv6) + + +def handle_returned_guestinfo_val(key, val): + """ + handle_returned_guestinfo_val returns the provided value if it is + not empty or set to GUESTINFO_EMPTY_YAML_VAL, otherwise None is + returned + """ + val = get_none_if_empty_val(val) + if val: + return val + LOG.debug("No value found for key %s", key) + return None + + +def get_guestinfo_key_name(key): + return "guestinfo." + key + + +def get_guestinfo_envvar_key_name(key): + return ("vmx." + get_guestinfo_key_name(key)).upper().replace(".", "_", -1) + + +def guestinfo_envvar(key): + val = guestinfo_envvar_get_value(key) + if not val: + return None + enc_type = guestinfo_envvar_get_value(key + ".encoding") + return decode(get_guestinfo_envvar_key_name(key), enc_type, val) + + +def guestinfo_envvar_get_value(key): + env_key = get_guestinfo_envvar_key_name(key) + return handle_returned_guestinfo_val(key, os.environ.get(env_key, "")) + + +def guestinfo(key, vmware_rpctool=VMWARE_RPCTOOL): + """ + guestinfo returns the guestinfo value for the provided key, decoding + the value when required + """ + val = guestinfo_get_value(key, vmware_rpctool) + if not val: + return None + enc_type = guestinfo_get_value(key + ".encoding", vmware_rpctool) + return decode(get_guestinfo_key_name(key), enc_type, val) + + +def guestinfo_get_value(key, vmware_rpctool=VMWARE_RPCTOOL): + """ + Returns a guestinfo value for the specified key. + """ + LOG.debug("Getting guestinfo value for key %s", key) + + try: + (stdout, stderr) = subp( + [ + vmware_rpctool, + "info-get " + get_guestinfo_key_name(key), + ] + ) + if stderr == NOVAL: + LOG.debug("No value found for key %s", key) + elif not stdout: + LOG.error("Failed to get guestinfo value for key %s", key) + return handle_returned_guestinfo_val(key, stdout) + except ProcessExecutionError as error: + if error.stderr == NOVAL: + LOG.debug("No value found for key %s", key) + else: + util.logexc( + LOG, + "Failed to get guestinfo value for key %s: %s", + key, + error, + ) + except Exception: + util.logexc( + LOG, + "Unexpected error while trying to get " + + "guestinfo value for key %s", + key, + ) + + return None + + +def guestinfo_set_value(key, value, vmware_rpctool=VMWARE_RPCTOOL): + """ + Sets a guestinfo value for the specified key. Set value to an empty string + to clear an existing guestinfo key. + """ + + # If value is an empty string then set it to a single space as it is not + # possible to set a guestinfo key to an empty string. Setting a guestinfo + # key to a single space is as close as it gets to clearing an existing + # guestinfo key. + if value == "": + value = " " + + LOG.debug("Setting guestinfo key=%s to value=%s", key, value) + + try: + subp( + [ + vmware_rpctool, + ("info-set %s %s" % (get_guestinfo_key_name(key), value)), + ] + ) + return True + except ProcessExecutionError as error: + util.logexc( + LOG, + "Failed to set guestinfo key=%s to value=%s: %s", + key, + value, + error, + ) + except Exception: + util.logexc( + LOG, + "Unexpected error while trying to set " + + "guestinfo key=%s to value=%s", + key, + value, + ) + + return None + + +def guestinfo_redact_keys(keys, vmware_rpctool=VMWARE_RPCTOOL): + """ + guestinfo_redact_keys redacts guestinfo of all of the keys in the given + list. each key will have its value set to "---". Since the value is valid + YAML, cloud-init can still read it if it tries. + """ + if not keys: + return + if not type(keys) in (list, tuple): + keys = [keys] + for key in keys: + key_name = get_guestinfo_key_name(key) + LOG.info("clearing %s", key_name) + if not guestinfo_set_value( + key, GUESTINFO_EMPTY_YAML_VAL, vmware_rpctool + ): + LOG.error("failed to clear %s", key_name) + LOG.info("clearing %s.encoding", key_name) + if not guestinfo_set_value(key + ".encoding", "", vmware_rpctool): + LOG.error("failed to clear %s.encoding", key_name) + + +def load_json_or_yaml(data): + """ + load first attempts to unmarshal the provided data as JSON, and if + that fails then attempts to unmarshal the data as YAML. If data is + None then a new dictionary is returned. + """ + if not data: + return {} + try: + return util.load_json(data) + except (json.JSONDecodeError, TypeError): + return util.load_yaml(data) + + +def process_metadata(data): + """ + process_metadata processes metadata and loads the optional network + configuration. + """ + network = None + if "network" in data: + network = data["network"] + del data["network"] + + network_enc = None + if "network.encoding" in data: + network_enc = data["network.encoding"] + del data["network.encoding"] + + if network: + if isinstance(network, collections.abc.Mapping): + LOG.debug("network data copied to 'config' key") + network = {"config": copy.deepcopy(network)} + else: + LOG.debug("network data to be decoded %s", network) + dec_net = decode("metadata.network", network_enc, network) + network = { + "config": load_json_or_yaml(dec_net), + } + + LOG.debug("network data %s", network) + data["network"] = network + + return data + + +# Used to match classes to dependencies +datasources = [ + (DataSourceVMware, (sources.DEP_FILESYSTEM,)), # Run at init-local + (DataSourceVMware, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +] + + +def get_datasource_list(depends): + """ + Return a list of data sources that match this set of dependencies + """ + return sources.list_from_depends(depends, datasources) + + +def get_default_ip_addrs(): + """ + Returns the default IPv4 and IPv6 addresses based on the device(s) used for + the default route. Please note that None may be returned for either address + family if that family has no default route or if there are multiple + addresses associated with the device used by the default route for a given + address. + """ + # TODO(promote and use netifaces in cloudinit.net* modules) + gateways = netifaces.gateways() + if "default" not in gateways: + return None, None + + default_gw = gateways["default"] + if ( + netifaces.AF_INET not in default_gw + and netifaces.AF_INET6 not in default_gw + ): + return None, None + + ipv4 = None + ipv6 = None + + gw4 = default_gw.get(netifaces.AF_INET) + if gw4: + _, dev4 = gw4 + addr4_fams = netifaces.ifaddresses(dev4) + if addr4_fams: + af_inet4 = addr4_fams.get(netifaces.AF_INET) + if af_inet4: + if len(af_inet4) > 1: + LOG.warning( + "device %s has more than one ipv4 address: %s", + dev4, + af_inet4, + ) + elif "addr" in af_inet4[0]: + ipv4 = af_inet4[0]["addr"] + + # Try to get the default IPv6 address by first seeing if there is a default + # IPv6 route. + gw6 = default_gw.get(netifaces.AF_INET6) + if gw6: + _, dev6 = gw6 + addr6_fams = netifaces.ifaddresses(dev6) + if addr6_fams: + af_inet6 = addr6_fams.get(netifaces.AF_INET6) + if af_inet6: + if len(af_inet6) > 1: + LOG.warning( + "device %s has more than one ipv6 address: %s", + dev6, + af_inet6, + ) + elif "addr" in af_inet6[0]: + ipv6 = af_inet6[0]["addr"] + + # If there is a default IPv4 address but not IPv6, then see if there is a + # single IPv6 address associated with the same device associated with the + # default IPv4 address. + if ipv4 and not ipv6: + af_inet6 = addr4_fams.get(netifaces.AF_INET6) + if af_inet6: + if len(af_inet6) > 1: + LOG.warning( + "device %s has more than one ipv6 address: %s", + dev4, + af_inet6, + ) + elif "addr" in af_inet6[0]: + ipv6 = af_inet6[0]["addr"] + + # If there is a default IPv6 address but not IPv4, then see if there is a + # single IPv4 address associated with the same device associated with the + # default IPv6 address. + if not ipv4 and ipv6: + af_inet4 = addr6_fams.get(netifaces.AF_INET) + if af_inet4: + if len(af_inet4) > 1: + LOG.warning( + "device %s has more than one ipv4 address: %s", + dev6, + af_inet4, + ) + elif "addr" in af_inet4[0]: + ipv4 = af_inet4[0]["addr"] + + return ipv4, ipv6 + + +# patched socket.getfqdn() - see https://bugs.python.org/issue5004 + + +def getfqdn(name=""): + """Get fully qualified domain name from name. + An empty argument is interpreted as meaning the local host. + """ + # TODO(may want to promote this function to util.getfqdn) + # TODO(may want to extend util.get_hostname to accept fqdn=True param) + name = name.strip() + if not name or name == "0.0.0.0": + name = util.get_hostname() + try: + addrs = socket.getaddrinfo( + name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME + ) + except socket.error: + pass + else: + for addr in addrs: + if addr[3]: + name = addr[3] + break + return name + + +def is_valid_ip_addr(val): + """ + Returns false if the address is loopback, link local or unspecified; + otherwise true is returned. + """ + # TODO(extend cloudinit.net.is_ip_addr exclude link_local/loopback etc) + # TODO(migrate to use cloudinit.net.is_ip_addr)# + + addr = None + try: + addr = ipaddress.ip_address(val) + except ipaddress.AddressValueError: + addr = ipaddress.ip_address(str(val)) + except Exception: + return None + + if addr.is_link_local or addr.is_loopback or addr.is_unspecified: + return False + return True + + +def get_host_info(): + """ + Returns host information such as the host name and network interfaces. + """ + # TODO(look to promote netifices use up in cloud-init netinfo funcs) + host_info = { + "network": { + "interfaces": { + "by-mac": collections.OrderedDict(), + "by-ipv4": collections.OrderedDict(), + "by-ipv6": collections.OrderedDict(), + }, + }, + } + hostname = getfqdn(util.get_hostname()) + if hostname: + host_info["hostname"] = hostname + host_info["local-hostname"] = hostname + host_info["local_hostname"] = hostname + + default_ipv4, default_ipv6 = get_default_ip_addrs() + if default_ipv4: + host_info[LOCAL_IPV4] = default_ipv4 + if default_ipv6: + host_info[LOCAL_IPV6] = default_ipv6 + + by_mac = host_info["network"]["interfaces"]["by-mac"] + by_ipv4 = host_info["network"]["interfaces"]["by-ipv4"] + by_ipv6 = host_info["network"]["interfaces"]["by-ipv6"] + + ifaces = netifaces.interfaces() + for dev_name in ifaces: + addr_fams = netifaces.ifaddresses(dev_name) + af_link = addr_fams.get(netifaces.AF_LINK) + af_inet4 = addr_fams.get(netifaces.AF_INET) + af_inet6 = addr_fams.get(netifaces.AF_INET6) + + mac = None + if af_link and "addr" in af_link[0]: + mac = af_link[0]["addr"] + + # Do not bother recording localhost + if mac == "00:00:00:00:00:00": + continue + + if mac and (af_inet4 or af_inet6): + key = mac + val = {} + if af_inet4: + af_inet4_vals = [] + for ip_info in af_inet4: + if not is_valid_ip_addr(ip_info["addr"]): + continue + af_inet4_vals.append(ip_info) + val["ipv4"] = af_inet4_vals + if af_inet6: + af_inet6_vals = [] + for ip_info in af_inet6: + if not is_valid_ip_addr(ip_info["addr"]): + continue + af_inet6_vals.append(ip_info) + val["ipv6"] = af_inet6_vals + by_mac[key] = val + + if af_inet4: + for ip_info in af_inet4: + key = ip_info["addr"] + if not is_valid_ip_addr(key): + continue + val = copy.deepcopy(ip_info) + del val["addr"] + if mac: + val["mac"] = mac + by_ipv4[key] = val + + if af_inet6: + for ip_info in af_inet6: + key = ip_info["addr"] + if not is_valid_ip_addr(key): + continue + val = copy.deepcopy(ip_info) + del val["addr"] + if mac: + val["mac"] = mac + by_ipv6[key] = val + + return host_info + + +def wait_on_network(metadata): + # Determine whether we need to wait on the network coming online. + wait_on_ipv4 = False + wait_on_ipv6 = False + if WAIT_ON_NETWORK in metadata: + wait_on_network = metadata[WAIT_ON_NETWORK] + if WAIT_ON_NETWORK_IPV4 in wait_on_network: + wait_on_ipv4_val = wait_on_network[WAIT_ON_NETWORK_IPV4] + if isinstance(wait_on_ipv4_val, bool): + wait_on_ipv4 = wait_on_ipv4_val + else: + wait_on_ipv4 = util.translate_bool(wait_on_ipv4_val) + if WAIT_ON_NETWORK_IPV6 in wait_on_network: + wait_on_ipv6_val = wait_on_network[WAIT_ON_NETWORK_IPV6] + if isinstance(wait_on_ipv6_val, bool): + wait_on_ipv6 = wait_on_ipv6_val + else: + wait_on_ipv6 = util.translate_bool(wait_on_ipv6_val) + + # Get information about the host. + host_info = None + while host_info is None: + # This loop + sleep results in two logs every second while waiting + # for either ipv4 or ipv6 up. Do we really need to log each iteration + # or can we log once and log on successful exit? + host_info = get_host_info() + + network = host_info.get("network") or {} + interfaces = network.get("interfaces") or {} + by_ipv4 = interfaces.get("by-ipv4") or {} + by_ipv6 = interfaces.get("by-ipv6") or {} + + if wait_on_ipv4: + ipv4_ready = len(by_ipv4) > 0 if by_ipv4 else False + if not ipv4_ready: + host_info = None + + if wait_on_ipv6: + ipv6_ready = len(by_ipv6) > 0 if by_ipv6 else False + if not ipv6_ready: + host_info = None + + if host_info is None: + LOG.debug( + "waiting on network: wait4=%s, ready4=%s, wait6=%s, ready6=%s", + wait_on_ipv4, + ipv4_ready, + wait_on_ipv6, + ipv6_ready, + ) + time.sleep(1) + + LOG.debug("waiting on network complete") + return host_info + + +def main(): + """ + Executed when this file is used as a program. + """ + try: + logging.setupBasicLogging() + except Exception: + pass + metadata = { + "wait-on-network": {"ipv4": True, "ipv6": "false"}, + "network": {"config": {"dhcp": True}}, + } + host_info = wait_on_network(metadata) + metadata = util.mergemanydict([metadata, host_info]) + print(util.json_dumps(metadata)) + + +if __name__ == "__main__": + main() + +# vi: ts=4 expandtab diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst index e0644534..71827177 100644 --- a/doc/rtd/topics/availability.rst +++ b/doc/rtd/topics/availability.rst @@ -67,5 +67,6 @@ Additionally, cloud-init is supported on these private clouds: - LXD - KVM - Metal-as-a-Service (MAAS) +- VMware .. vi: textwidth=79 diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index 497b1467..f5aee1c2 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -50,6 +50,7 @@ The following is a list of documents for each supported datasource: datasources/upcloud.rst datasources/zstack.rst datasources/vultr.rst + datasources/vmware.rst Creation ======== diff --git a/doc/rtd/topics/datasources/vmware.rst b/doc/rtd/topics/datasources/vmware.rst new file mode 100644 index 00000000..996eb61f --- /dev/null +++ b/doc/rtd/topics/datasources/vmware.rst @@ -0,0 +1,359 @@ +.. _datasource_vmware: + +VMware +====== + +This datasource is for use with systems running on a VMware platform such as +vSphere and currently supports the following data transports: + + +* `GuestInfo `_ keys + +Configuration +------------- + +The configuration method is dependent upon the transport: + +GuestInfo Keys +^^^^^^^^^^^^^^ + +One method of providing meta, user, and vendor data is by setting the following +key/value pairs on a VM's ``extraConfig`` `property `_ : + +.. list-table:: + :header-rows: 1 + + * - Property + - Description + * - ``guestinfo.metadata`` + - A YAML or JSON document containing the cloud-init metadata. + * - ``guestinfo.metadata.encoding`` + - The encoding type for ``guestinfo.metadata``. + * - ``guestinfo.userdata`` + - A YAML document containing the cloud-init user data. + * - ``guestinfo.userdata.encoding`` + - The encoding type for ``guestinfo.userdata``. + * - ``guestinfo.vendordata`` + - A YAML document containing the cloud-init vendor data. + * - ``guestinfo.vendordata.encoding`` + - The encoding type for ``guestinfo.vendordata``. + + +All ``guestinfo.*.encoding`` values may be set to ``base64`` or +``gzip+base64``. + +Features +-------- + +This section reviews several features available in this datasource, regardless +of how the meta, user, and vendor data was discovered. + +Instance data and lazy networks +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +One of the hallmarks of cloud-init is `its use of instance-data and JINJA +queries <../instancedata.html#using-instance-data>`_ +-- the ability to write queries in user and vendor data that reference runtime +information present in ``/run/cloud-init/instance-data.json``. This works well +when the metadata provides all of the information up front, such as the network +configuration. For systems that rely on DHCP, however, this information may not +be available when the metadata is persisted to disk. + +This datasource ensures that even if the instance is using DHCP to configure +networking, the same details about the configured network are available in +``/run/cloud-init/instance-data.json`` as if static networking was used. This +information collected at runtime is easy to demonstrate by executing the +datasource on the command line. From the root of this repository, run the +following command: + +.. code-block:: bash + + PYTHONPATH="$(pwd)" python3 cloudinit/sources/DataSourceVMware.py + +The above command will result in output similar to the below JSON: + +.. code-block:: json + + { + "hostname": "akutz.localhost", + "local-hostname": "akutz.localhost", + "local-ipv4": "192.168.0.188", + "local_hostname": "akutz.localhost", + "network": { + "config": { + "dhcp": true + }, + "interfaces": { + "by-ipv4": { + "172.0.0.2": { + "netmask": "255.255.255.255", + "peer": "172.0.0.2" + }, + "192.168.0.188": { + "broadcast": "192.168.0.255", + "mac": "64:4b:f0:18:9a:21", + "netmask": "255.255.255.0" + } + }, + "by-ipv6": { + "fd8e:d25e:c5b6:1:1f5:b2fd:8973:22f2": { + "flags": 208, + "mac": "64:4b:f0:18:9a:21", + "netmask": "ffff:ffff:ffff:ffff::/64" + } + }, + "by-mac": { + "64:4b:f0:18:9a:21": { + "ipv4": [ + { + "addr": "192.168.0.188", + "broadcast": "192.168.0.255", + "netmask": "255.255.255.0" + } + ], + "ipv6": [ + { + "addr": "fd8e:d25e:c5b6:1:1f5:b2fd:8973:22f2", + "flags": 208, + "netmask": "ffff:ffff:ffff:ffff::/64" + } + ] + }, + "ac:de:48:00:11:22": { + "ipv6": [] + } + } + } + }, + "wait-on-network": { + "ipv4": true, + "ipv6": "false" + } + } + + +Redacting sensitive information +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Sometimes the cloud-init userdata might contain sensitive information, and it +may be desirable to have the ``guestinfo.userdata`` key (or other guestinfo +keys) redacted as soon as its data is read by the datasource. This is possible +by adding the following to the metadata: + +.. code-block:: yaml + + redact: # formerly named cleanup-guestinfo, which will also work + - userdata + - vendordata + +When the above snippet is added to the metadata, the datasource will iterate +over the elements in the ``redact`` array and clear each of the keys. For +example, when the guestinfo transport is used, the above snippet will cause +the following commands to be executed: + +.. code-block:: shell + + vmware-rpctool "info-set guestinfo.userdata ---" + vmware-rpctool "info-set guestinfo.userdata.encoding " + vmware-rpctool "info-set guestinfo.vendordata ---" + vmware-rpctool "info-set guestinfo.vendordata.encoding " + +Please note that keys are set to the valid YAML string ``---`` as it is not +possible remove an existing key from the guestinfo key-space. A key's analogous +encoding property will be set to a single white-space character, causing the +datasource to treat the actual key value as plain-text, thereby loading it as +an empty YAML doc (hence the aforementioned ``---``\ ). + +Reading the local IP addresses +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This datasource automatically discovers the local IPv4 and IPv6 addresses for +a guest operating system based on the default routes. However, when inspecting +a VM externally, it's not possible to know what the *default* IP address is for +the guest OS. That's why this datasource sets the discovered, local IPv4 and +IPv6 addresses back in the guestinfo namespace as the following keys: + + +* ``guestinfo.local-ipv4`` +* ``guestinfo.local-ipv6`` + +It is possible that a host may not have any default, local IP addresses. It's +also possible the reported, local addresses are link-local addresses. But these +two keys may be used to discover what this datasource determined were the local +IPv4 and IPv6 addresses for a host. + +Waiting on the network +^^^^^^^^^^^^^^^^^^^^^^ + +Sometimes cloud-init may bring up the network, but it will not finish coming +online before the datasource's ``setup`` function is called, resulting in an +``/var/run/cloud-init/instance-data.json`` file that does not have the correct +network information. It is possible to instruct the datasource to wait until an +IPv4 or IPv6 address is available before writing the instance data with the +following metadata properties: + +.. code-block:: yaml + + wait-on-network: + ipv4: true + ipv6: true + +If either of the above values are true, then the datasource will sleep for a +second, check the network status, and repeat until one or both addresses from +the specified families are available. + +Walkthrough +----------- + +The following series of steps is a demonstration on how to configure a VM with +this datasource: + + +#. Create the metadata file for the VM. Save the following YAML to a file named + ``metadata.yaml``\ : + + .. code-block:: yaml + + instance-id: cloud-vm + local-hostname: cloud-vm + network: + version: 2 + ethernets: + nics: + match: + name: ens* + dhcp4: yes + +#. Create the userdata file ``userdata.yaml``\ : + + .. code-block:: yaml + + #cloud-config + + users: + - default + - name: akutz + primary_group: akutz + sudo: ALL=(ALL) NOPASSWD:ALL + groups: sudo, wheel + ssh_import_id: None + lock_passwd: true + ssh_authorized_keys: + - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDE0c5FczvcGSh/tG4iw+Fhfi/O5/EvUM/96js65tly4++YTXK1d9jcznPS5ruDlbIZ30oveCBd3kT8LLVFwzh6hepYTf0YmCTpF4eDunyqmpCXDvVscQYRXyasEm5olGmVe05RrCJSeSShAeptv4ueIn40kZKOghinGWLDSZG4+FFfgrmcMCpx5YSCtX2gvnEYZJr0czt4rxOZuuP7PkJKgC/mt2PcPjooeX00vAj81jjU2f3XKrjjz2u2+KIt9eba+vOQ6HiC8c2IzRkUAJ5i1atLy8RIbejo23+0P4N2jjk17QySFOVHwPBDTYb0/0M/4ideeU74EN/CgVsvO6JrLsPBR4dojkV5qNbMNxIVv5cUwIy2ThlLgqpNCeFIDLCWNZEFKlEuNeSQ2mPtIO7ETxEL2Cz5y/7AIuildzYMc6wi2bofRC8HmQ7rMXRWdwLKWsR0L7SKjHblIwarxOGqLnUI+k2E71YoP7SZSlxaKi17pqkr0OMCF+kKqvcvHAQuwGqyumTEWOlH6TCx1dSPrW+pVCZSHSJtSTfDW2uzL6y8k10MT06+pVunSrWo5LHAXcS91htHV1M1UrH/tZKSpjYtjMb5+RonfhaFRNzvj7cCE1f3Kp8UVqAdcGBTtReoE8eRUT63qIxjw03a7VwAyB2w+9cu1R9/vAo8SBeRqw== sakutz@gmail.com + +#. Please note this step requires that the VM be powered off. All of the + commands below use the VMware CLI tool, `govc `_. + + Go ahead and assign the path to the VM to the environment variable ``VM``\ : + + .. code-block:: shell + + export VM="/inventory/path/to/the/vm" + +#. Power off the VM: + + .. raw:: html + +
+ + ⚠️ First Boot Mode + + To ensure the next power-on operation results in a first-boot scenario for + cloud-init, it may be necessary to run the following command just before + powering off the VM: + + .. code-block:: bash + + cloud-init clean + + Otherwise cloud-init may not run in first-boot mode. For more information + on how the boot mode is determined, please see the + `First Boot Documentation <../boot.html#first-boot-determination>`_. + + .. raw:: html + +
+ + .. code-block:: shell + + govc vm.power -off "${VM}" + +#. + Export the environment variables that contain the cloud-init metadata and + userdata: + + .. code-block:: shell + + export METADATA=$(gzip -c9 /dev/null || base64; }) \ + USERDATA=$(gzip -c9 /dev/null || base64; }) + +#. + Assign the metadata and userdata to the VM: + + .. code-block:: shell + + govc vm.change -vm "${VM}" \ + -e guestinfo.metadata="${METADATA}" \ + -e guestinfo.metadata.encoding="gzip+base64" \ + -e guestinfo.userdata="${USERDATA}" \ + -e guestinfo.userdata.encoding="gzip+base64" + + Please note the above commands include specifying the encoding for the + properties. This is important as it informs the datasource how to decode + the data for cloud-init. Valid values for ``metadata.encoding`` and + ``userdata.encoding`` include: + + + * ``base64`` + * ``gzip+base64`` + +#. + Power on the VM: + + .. code-block:: shell + + govc vm.power -vm "${VM}" -on + +If all went according to plan, the CentOS box is: + +* Locked down, allowing SSH access only for the user in the userdata +* Configured for a dynamic IP address via DHCP +* Has a hostname of ``cloud-vm`` + +Examples +-------- + +This section reviews common configurations: + +Setting the hostname +^^^^^^^^^^^^^^^^^^^^ + +The hostname is set by way of the metadata key ``local-hostname``. + +Setting the instance ID +^^^^^^^^^^^^^^^^^^^^^^^ + +The instance ID may be set by way of the metadata key ``instance-id``. However, +if this value is absent then then the instance ID is read from the file +``/sys/class/dmi/id/product_uuid``. + +Providing public SSH keys +^^^^^^^^^^^^^^^^^^^^^^^^^ + +The public SSH keys may be set by way of the metadata key ``public-keys-data``. +Each newline-terminated string will be interpreted as a separate SSH public +key, which will be placed in distro's default user's +``~/.ssh/authorized_keys``. If the value is empty or absent, then nothing will +be written to ``~/.ssh/authorized_keys``. + +Configuring the network +^^^^^^^^^^^^^^^^^^^^^^^ + +The network is configured by setting the metadata key ``network`` with a value +consistent with Network Config Versions +`1 <../network-config-format-v1.html>`_ or +`2 <../network-config-format-v2.html>`_\ , depending on the Linux +distro's version of cloud-init. + +The metadata key ``network.encoding`` may be used to indicate the format of +the metadata key "network". Valid encodings are ``base64`` and ``gzip+base64``. diff --git a/requirements.txt b/requirements.txt index 5817da3b..41d01d62 100644 --- a/requirements.txt +++ b/requirements.txt @@ -32,3 +32,12 @@ jsonpatch # For validating cloud-config sections per schema definitions jsonschema + +# Used by DataSourceVMware to inspect the host's network configuration during +# the "setup()" function. +# +# This allows a host that uses DHCP to bring up the network during BootLocal +# and still participate in instance-data by gathering the network in detail at +# runtime and merge that information into the metadata and repersist that to +# disk. +netifaces>=0.10.9 diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index 5e9c547a..00f0a78c 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -29,6 +29,7 @@ from cloudinit.sources import ( DataSourceSmartOS as SmartOS, DataSourceUpCloud as UpCloud, DataSourceVultr as Vultr, + DataSourceVMware as VMware, ) from cloudinit.sources import DataSourceNone as DSNone @@ -52,6 +53,7 @@ DEFAULT_LOCAL = [ RbxCloud.DataSourceRbxCloud, Scaleway.DataSourceScaleway, UpCloud.DataSourceUpCloudLocal, + VMware.DataSourceVMware, ] DEFAULT_NETWORK = [ @@ -68,6 +70,7 @@ DEFAULT_NETWORK = [ OpenStack.DataSourceOpenStack, OVF.DataSourceOVFNet, UpCloud.DataSourceUpCloud, + VMware.DataSourceVMware, ] diff --git a/tests/unittests/test_datasource/test_vmware.py b/tests/unittests/test_datasource/test_vmware.py new file mode 100644 index 00000000..597db7c8 --- /dev/null +++ b/tests/unittests/test_datasource/test_vmware.py @@ -0,0 +1,377 @@ +# Copyright (c) 2021 VMware, Inc. All Rights Reserved. +# +# Authors: Andrew Kutz +# +# This file is part of cloud-init. See LICENSE file for license information. + +import base64 +import gzip +from cloudinit import dmi, helpers, safeyaml +from cloudinit import settings +from cloudinit.sources import DataSourceVMware +from cloudinit.tests.helpers import ( + mock, + CiTestCase, + FilesystemMockingTestCase, + populate_dir, +) + +import os + +PRODUCT_NAME_FILE_PATH = "/sys/class/dmi/id/product_name" +PRODUCT_NAME = "VMware7,1" +PRODUCT_UUID = "82343CED-E4C7-423B-8F6B-0D34D19067AB" +REROOT_FILES = { + DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, + PRODUCT_NAME_FILE_PATH: PRODUCT_NAME, +} + +VMW_MULTIPLE_KEYS = [ + "ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@vmw.com", + "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@vmw.com", +] +VMW_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@vmw.com" + +VMW_METADATA_YAML = """instance-id: cloud-vm +local-hostname: cloud-vm +network: + version: 2 + ethernets: + nics: + match: + name: ens* + dhcp4: yes +""" + +VMW_USERDATA_YAML = """## template: jinja +#cloud-config +users: +- default +""" + +VMW_VENDORDATA_YAML = """## template: jinja +#cloud-config +runcmd: +- echo "Hello, world." +""" + + +class TestDataSourceVMware(CiTestCase): + """ + Test common functionality that is not transport specific. + """ + + def setUp(self): + super(TestDataSourceVMware, self).setUp() + self.tmp = self.tmp_dir() + + def test_no_data_access_method(self): + ds = get_ds(self.tmp) + ds.vmware_rpctool = None + ret = ds.get_data() + self.assertFalse(ret) + + def test_get_host_info(self): + host_info = DataSourceVMware.get_host_info() + self.assertTrue(host_info) + self.assertTrue(host_info["hostname"]) + self.assertTrue(host_info["local-hostname"]) + self.assertTrue(host_info["local_hostname"]) + self.assertTrue(host_info[DataSourceVMware.LOCAL_IPV4]) + + +class TestDataSourceVMwareEnvVars(FilesystemMockingTestCase): + """ + Test the envvar transport. + """ + + def setUp(self): + super(TestDataSourceVMwareEnvVars, self).setUp() + self.tmp = self.tmp_dir() + os.environ[DataSourceVMware.VMX_GUESTINFO] = "1" + self.create_system_files() + + def tearDown(self): + del os.environ[DataSourceVMware.VMX_GUESTINFO] + return super(TestDataSourceVMwareEnvVars, self).tearDown() + + def create_system_files(self): + rootd = self.tmp_dir() + populate_dir( + rootd, + { + DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, + }, + ) + self.assertTrue(self.reRoot(rootd)) + + def assert_get_data_ok(self, m_fn, m_fn_call_count=6): + ds = get_ds(self.tmp) + ds.vmware_rpctool = None + ret = ds.get_data() + self.assertTrue(ret) + self.assertEqual(m_fn_call_count, m_fn.call_count) + self.assertEqual( + ds.data_access_method, DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR + ) + return ds + + def assert_metadata(self, metadata, m_fn, m_fn_call_count=6): + ds = self.assert_get_data_ok(m_fn, m_fn_call_count) + assert_metadata(self, ds, metadata) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_subplatform(self, m_fn): + m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] + ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4) + self.assertEqual( + ds.subplatform, + "%s (%s)" + % ( + DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR, + DataSourceVMware.get_guestinfo_envvar_key_name("metadata"), + ), + ) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_metadata_only(self, m_fn): + m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_userdata_only(self, m_fn): + m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_vendordata_only(self, m_fn): + m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_metadata_base64(self, m_fn): + data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) + m_fn.side_effect = [data, "base64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_metadata_b64(self, m_fn): + data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) + m_fn.side_effect = [data, "b64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_metadata_gzip_base64(self, m_fn): + data = VMW_METADATA_YAML.encode("utf-8") + data = gzip.compress(data) + data = base64.b64encode(data) + m_fn.side_effect = [data, "gzip+base64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_metadata_gz_b64(self, m_fn): + data = VMW_METADATA_YAML.encode("utf-8") + data = gzip.compress(data) + data = base64.b64encode(data) + m_fn.side_effect = [data, "gz+b64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_metadata_single_ssh_key(self, m_fn): + metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) + metadata["public_keys"] = VMW_SINGLE_KEY + metadata_yaml = safeyaml.dumps(metadata) + m_fn.side_effect = [metadata_yaml, "", "", ""] + self.assert_metadata(metadata, m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_metadata_multiple_ssh_keys(self, m_fn): + metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) + metadata["public_keys"] = VMW_MULTIPLE_KEYS + metadata_yaml = safeyaml.dumps(metadata) + m_fn.side_effect = [metadata_yaml, "", "", ""] + self.assert_metadata(metadata, m_fn, m_fn_call_count=4) + + +class TestDataSourceVMwareGuestInfo(FilesystemMockingTestCase): + """ + Test the guestinfo transport on a VMware platform. + """ + + def setUp(self): + super(TestDataSourceVMwareGuestInfo, self).setUp() + self.tmp = self.tmp_dir() + self.create_system_files() + + def create_system_files(self): + rootd = self.tmp_dir() + populate_dir( + rootd, + { + DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, + PRODUCT_NAME_FILE_PATH: PRODUCT_NAME, + }, + ) + self.assertTrue(self.reRoot(rootd)) + + def assert_get_data_ok(self, m_fn, m_fn_call_count=6): + ds = get_ds(self.tmp) + ds.vmware_rpctool = "vmware-rpctool" + ret = ds.get_data() + self.assertTrue(ret) + self.assertEqual(m_fn_call_count, m_fn.call_count) + self.assertEqual( + ds.data_access_method, + DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO, + ) + return ds + + def assert_metadata(self, metadata, m_fn, m_fn_call_count=6): + ds = self.assert_get_data_ok(m_fn, m_fn_call_count) + assert_metadata(self, ds, metadata) + + def test_ds_valid_on_vmware_platform(self): + system_type = dmi.read_dmi_data("system-product-name") + self.assertEqual(system_type, PRODUCT_NAME) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_subplatform(self, m_fn): + m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] + ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4) + self.assertEqual( + ds.subplatform, + "%s (%s)" + % ( + DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO, + DataSourceVMware.get_guestinfo_key_name("metadata"), + ), + ) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_userdata_only(self, m_fn): + m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_vendordata_only(self, m_fn): + m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_metadata_single_ssh_key(self, m_fn): + metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) + metadata["public_keys"] = VMW_SINGLE_KEY + metadata_yaml = safeyaml.dumps(metadata) + m_fn.side_effect = [metadata_yaml, "", "", ""] + self.assert_metadata(metadata, m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_metadata_multiple_ssh_keys(self, m_fn): + metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) + metadata["public_keys"] = VMW_MULTIPLE_KEYS + metadata_yaml = safeyaml.dumps(metadata) + m_fn.side_effect = [metadata_yaml, "", "", ""] + self.assert_metadata(metadata, m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_metadata_base64(self, m_fn): + data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) + m_fn.side_effect = [data, "base64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_metadata_b64(self, m_fn): + data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) + m_fn.side_effect = [data, "b64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_metadata_gzip_base64(self, m_fn): + data = VMW_METADATA_YAML.encode("utf-8") + data = gzip.compress(data) + data = base64.b64encode(data) + m_fn.side_effect = [data, "gzip+base64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_metadata_gz_b64(self, m_fn): + data = VMW_METADATA_YAML.encode("utf-8") + data = gzip.compress(data) + data = base64.b64encode(data) + m_fn.side_effect = [data, "gz+b64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + +class TestDataSourceVMwareGuestInfo_InvalidPlatform(FilesystemMockingTestCase): + """ + Test the guestinfo transport on a non-VMware platform. + """ + + def setUp(self): + super(TestDataSourceVMwareGuestInfo_InvalidPlatform, self).setUp() + self.tmp = self.tmp_dir() + self.create_system_files() + + def create_system_files(self): + rootd = self.tmp_dir() + populate_dir( + rootd, + { + DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, + }, + ) + self.assertTrue(self.reRoot(rootd)) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_ds_invalid_on_non_vmware_platform(self, m_fn): + system_type = dmi.read_dmi_data("system-product-name") + self.assertEqual(system_type, None) + + m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] + ds = get_ds(self.tmp) + ds.vmware_rpctool = "vmware-rpctool" + ret = ds.get_data() + self.assertFalse(ret) + + +def assert_metadata(test_obj, ds, metadata): + test_obj.assertEqual(metadata.get("instance-id"), ds.get_instance_id()) + test_obj.assertEqual(metadata.get("local-hostname"), ds.get_hostname()) + + expected_public_keys = metadata.get("public_keys") + if not isinstance(expected_public_keys, list): + expected_public_keys = [expected_public_keys] + + test_obj.assertEqual(expected_public_keys, ds.get_public_ssh_keys()) + test_obj.assertIsInstance(ds.get_public_ssh_keys(), list) + + +def get_ds(temp_dir): + ds = DataSourceVMware.DataSourceVMware( + settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": temp_dir}) + ) + ds.vmware_rpctool = "vmware-rpctool" + return ds + + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 1d8aaf18..8617d7bd 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -649,6 +649,50 @@ class TestDsIdentify(DsIdentifyBase): """EC2: bobrightbox.com in product_serial is not brightbox'""" self._test_ds_not_found('Ec2-E24Cloud-negative') + def test_vmware_no_valid_transports(self): + """VMware: no valid transports""" + self._test_ds_not_found('VMware-NoValidTransports') + + def test_vmware_envvar_no_data(self): + """VMware: envvar transport no data""" + self._test_ds_not_found('VMware-EnvVar-NoData') + + def test_vmware_envvar_no_virt_id(self): + """VMware: envvar transport success if no virt id""" + self._test_ds_found('VMware-EnvVar-NoVirtID') + + def test_vmware_envvar_activated_by_metadata(self): + """VMware: envvar transport activated by metadata""" + self._test_ds_found('VMware-EnvVar-Metadata') + + def test_vmware_envvar_activated_by_userdata(self): + """VMware: envvar transport activated by userdata""" + self._test_ds_found('VMware-EnvVar-Userdata') + + def test_vmware_envvar_activated_by_vendordata(self): + """VMware: envvar transport activated by vendordata""" + self._test_ds_found('VMware-EnvVar-Vendordata') + + def test_vmware_guestinfo_no_data(self): + """VMware: guestinfo transport no data""" + self._test_ds_not_found('VMware-GuestInfo-NoData') + + def test_vmware_guestinfo_no_virt_id(self): + """VMware: guestinfo transport fails if no virt id""" + self._test_ds_not_found('VMware-GuestInfo-NoVirtID') + + def test_vmware_guestinfo_activated_by_metadata(self): + """VMware: guestinfo transport activated by metadata""" + self._test_ds_found('VMware-GuestInfo-Metadata') + + def test_vmware_guestinfo_activated_by_userdata(self): + """VMware: guestinfo transport activated by userdata""" + self._test_ds_found('VMware-GuestInfo-Userdata') + + def test_vmware_guestinfo_activated_by_vendordata(self): + """VMware: guestinfo transport activated by vendordata""" + self._test_ds_found('VMware-GuestInfo-Vendordata') + class TestBSDNoSys(DsIdentifyBase): """Test *BSD code paths @@ -1136,7 +1180,240 @@ VALID_CFG = { 'Ec2-E24Cloud-negative': { 'ds': 'Ec2', 'files': {P_SYS_VENDOR: 'e24cloudyday\n'}, - } + }, + 'VMware-NoValidTransports': { + 'ds': 'VMware', + 'mocks': [ + MOCK_VIRT_IS_VMWARE, + ], + }, + 'VMware-EnvVar-NoData': { + 'ds': 'VMware', + 'mocks': [ + { + 'name': 'vmware_has_envvar_vmx_guestinfo', + 'ret': 0, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_metadata', + 'ret': 1, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_userdata', + 'ret': 1, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata', + 'ret': 1, + }, + MOCK_VIRT_IS_VMWARE, + ], + }, + 'VMware-EnvVar-NoVirtID': { + 'ds': 'VMware', + 'mocks': [ + { + 'name': 'vmware_has_envvar_vmx_guestinfo', + 'ret': 0, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_metadata', + 'ret': 0, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_userdata', + 'ret': 1, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata', + 'ret': 1, + }, + ], + }, + 'VMware-EnvVar-Metadata': { + 'ds': 'VMware', + 'mocks': [ + { + 'name': 'vmware_has_envvar_vmx_guestinfo', + 'ret': 0, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_metadata', + 'ret': 0, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_userdata', + 'ret': 1, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata', + 'ret': 1, + }, + MOCK_VIRT_IS_VMWARE, + ], + }, + 'VMware-EnvVar-Userdata': { + 'ds': 'VMware', + 'mocks': [ + { + 'name': 'vmware_has_envvar_vmx_guestinfo', + 'ret': 0, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_metadata', + 'ret': 1, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_userdata', + 'ret': 0, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata', + 'ret': 1, + }, + MOCK_VIRT_IS_VMWARE, + ], + }, + 'VMware-EnvVar-Vendordata': { + 'ds': 'VMware', + 'mocks': [ + { + 'name': 'vmware_has_envvar_vmx_guestinfo', + 'ret': 0, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_metadata', + 'ret': 1, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_userdata', + 'ret': 1, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata', + 'ret': 0, + }, + MOCK_VIRT_IS_VMWARE, + ], + }, + 'VMware-GuestInfo-NoData': { + 'ds': 'VMware', + 'mocks': [ + { + 'name': 'vmware_has_rpctool', + 'ret': 0, + 'out': '/usr/bin/vmware-rpctool', + }, + { + 'name': 'vmware_rpctool_guestinfo_metadata', + 'ret': 1, + }, + { + 'name': 'vmware_rpctool_guestinfo_userdata', + 'ret': 1, + }, + { + 'name': 'vmware_rpctool_guestinfo_vendordata', + 'ret': 1, + }, + MOCK_VIRT_IS_VMWARE, + ], + }, + 'VMware-GuestInfo-NoVirtID': { + 'ds': 'VMware', + 'mocks': [ + { + 'name': 'vmware_has_rpctool', + 'ret': 0, + 'out': '/usr/bin/vmware-rpctool', + }, + { + 'name': 'vmware_rpctool_guestinfo_metadata', + 'ret': 0, + 'out': '---', + }, + { + 'name': 'vmware_rpctool_guestinfo_userdata', + 'ret': 1, + }, + { + 'name': 'vmware_rpctool_guestinfo_vendordata', + 'ret': 1, + }, + ], + }, + 'VMware-GuestInfo-Metadata': { + 'ds': 'VMware', + 'mocks': [ + { + 'name': 'vmware_has_rpctool', + 'ret': 0, + 'out': '/usr/bin/vmware-rpctool', + }, + { + 'name': 'vmware_rpctool_guestinfo_metadata', + 'ret': 0, + 'out': '---', + }, + { + 'name': 'vmware_rpctool_guestinfo_userdata', + 'ret': 1, + }, + { + 'name': 'vmware_rpctool_guestinfo_vendordata', + 'ret': 1, + }, + MOCK_VIRT_IS_VMWARE, + ], + }, + 'VMware-GuestInfo-Userdata': { + 'ds': 'VMware', + 'mocks': [ + { + 'name': 'vmware_has_rpctool', + 'ret': 0, + 'out': '/usr/bin/vmware-rpctool', + }, + { + 'name': 'vmware_rpctool_guestinfo_metadata', + 'ret': 1, + }, + { + 'name': 'vmware_rpctool_guestinfo_userdata', + 'ret': 0, + 'out': '---', + }, + { + 'name': 'vmware_rpctool_guestinfo_vendordata', + 'ret': 1, + }, + MOCK_VIRT_IS_VMWARE, + ], + }, + 'VMware-GuestInfo-Vendordata': { + 'ds': 'VMware', + 'mocks': [ + { + 'name': 'vmware_has_rpctool', + 'ret': 0, + 'out': '/usr/bin/vmware-rpctool', + }, + { + 'name': 'vmware_rpctool_guestinfo_metadata', + 'ret': 1, + }, + { + 'name': 'vmware_rpctool_guestinfo_userdata', + 'ret': 1, + }, + { + 'name': 'vmware_rpctool_guestinfo_vendordata', + 'ret': 0, + 'out': '---', + }, + MOCK_VIRT_IS_VMWARE, + ], + }, } # vi: ts=4 expandtab diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index 3c2c6d14..5089dd70 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -1,5 +1,6 @@ ader1990 ajmyyra +akutz AlexBaranowski Aman306 andrewbogott diff --git a/tools/ds-identify b/tools/ds-identify index 73e27c71..234ffa81 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -125,7 +125,7 @@ DI_DSNAME="" # be searched if there is no setting found in config. DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ CloudSigma CloudStack DigitalOcean Vultr AliYun Ec2 GCE OpenNebula OpenStack \ -OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud UpCloud" +OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud UpCloud VMware" DI_DSLIST="" DI_MODE="" DI_ON_FOUND="" @@ -1364,6 +1364,80 @@ dscheck_Vultr() { return $DS_NOT_FOUND } +vmware_has_envvar_vmx_guestinfo() { + [ -n "${VMX_GUESTINFO:-}" ] +} + +vmware_has_envvar_vmx_guestinfo_metadata() { + [ -n "${VMX_GUESTINFO_METADATA:-}" ] +} + +vmware_has_envvar_vmx_guestinfo_userdata() { + [ -n "${VMX_GUESTINFO_USERDATA:-}" ] +} + +vmware_has_envvar_vmx_guestinfo_vendordata() { + [ -n "${VMX_GUESTINFO_VENDORDATA:-}" ] +} + +vmware_has_rpctool() { + command -v vmware-rpctool >/dev/null 2>&1 +} + +vmware_rpctool_guestinfo_metadata() { + vmware-rpctool "info-get guestinfo.metadata" +} + +vmware_rpctool_guestinfo_userdata() { + vmware-rpctool "info-get guestinfo.userdata" +} + +vmware_rpctool_guestinfo_vendordata() { + vmware-rpctool "info-get guestinfo.vendordata" +} + +dscheck_VMware() { + # Checks to see if there is valid data for the VMware datasource. + # The data transports are checked in the following order: + # + # * envvars + # * guestinfo + # + # Please note when updating this function with support for new data + # transports, the order should match the order in the _get_data + # function from the file DataSourceVMware.py. + + # Check to see if running in a container and the VMware + # datasource is configured via environment variables. + if vmware_has_envvar_vmx_guestinfo; then + if vmware_has_envvar_vmx_guestinfo_metadata || \ + vmware_has_envvar_vmx_guestinfo_userdata || \ + vmware_has_envvar_vmx_guestinfo_vendordata; then + return "${DS_FOUND}" + fi + fi + + # Do not proceed unless the detected platform is VMware. + if [ ! "${DI_VIRT}" = "vmware" ]; then + return "${DS_NOT_FOUND}" + fi + + # Do not proceed if the vmware-rpctool command is not present. + if ! vmware_has_rpctool; then + return "${DS_NOT_FOUND}" + fi + + # Activate the VMware datasource only if any of the fields used + # by the datasource are present in the guestinfo table. + if { vmware_rpctool_guestinfo_metadata || \ + vmware_rpctool_guestinfo_userdata || \ + vmware_rpctool_guestinfo_vendordata; } >/dev/null 2>&1; then + return "${DS_FOUND}" + fi + + return "${DS_NOT_FOUND}" +} + collect_info() { read_uname_info read_virt -- cgit v1.2.3 From 9893dfcd2f0be92197d707236cbd44cb7452364d Mon Sep 17 00:00:00 2001 From: Gabriel Nagy Date: Tue, 10 Aug 2021 18:14:23 +0300 Subject: cc_puppet: support AIO installations and more (#960) - update the puppet module to support AIO installations by setting `install_type` to `aio` - make the install collection configurable through the `collection` parameter; by default the rolling `puppet` collection will be used, which installs the latest version) - when `install_type` is `aio`, puppetlabs repos will be purged after installation; set `cleanup` to `False` to prevent this - AIO installations are performed by downloading and executing a shell script; the URL for this script can be overridden using the `aio_install_url` parameter - make it possible to run puppet agent after installation/configuration via the `exec` key - by default, puppet agent will run with the `--test` argument; this can be overridden via the `exec_args` key --- cloudinit/config/cc_puppet.py | 159 +++++++++++-- doc/examples/cloud-config-puppet.txt | 60 ++++- .../testcases/examples/setup_run_puppet.yaml | 10 +- .../unittests/test_handler/test_handler_puppet.py | 261 +++++++++++++++++++-- 4 files changed, 426 insertions(+), 64 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index bc981cf4..a0779eb0 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -29,22 +29,41 @@ The keys are ``package_name``, ``conf_file``, ``ssl_dir`` and ones that work with puppet 3.x and with distributions that ship modified puppet 4.x that uses the old paths. +Agent packages from the puppetlabs repositories can be installed by setting +``install_type`` to ``aio``. Based on this setting, the default config/SSL/CSR +paths will be adjusted accordingly. To maintain backwards compatibility this +setting defaults to ``packages`` which will install puppet from the distro +packages. + +If installing ``aio`` packages, ``collection`` can also be set to one of +``puppet`` (rolling release), ``puppet6``, ``puppet7`` (or their nightly +counterparts) in order to install specific release streams. By default, the +puppetlabs repository will be purged after installation finishes; set +``cleanup`` to ``false`` to prevent this. AIO packages are installed through a +shell script which is downloaded on the machine and then executed; the path to +this script can be overridden using the ``aio_install_url`` key. + Puppet configuration can be specified under the ``conf`` key. The configuration is specified as a dictionary containing high-level ``
`` keys and lists of ``=`` pairs within each section. Each section name and ``=`` pair is written directly to ``puppet.conf``. As -such, section names should be one of: ``main``, ``master``, ``agent`` or +such, section names should be one of: ``main``, ``server``, ``agent`` or ``user`` and keys should be valid puppet configuration options. The ``certname`` key supports string substitutions for ``%i`` and ``%f``, corresponding to the instance id and fqdn of the machine respectively. If ``ca_cert`` is present, it will not be written to ``puppet.conf``, but -instead will be used as the puppermaster certificate. It should be specified +instead will be used as the puppetserver certificate. It should be specified in pem format as a multi-line string (using the ``|`` yaml notation). -Additionally it's possible to create a csr_attributes.yaml for -CSR attributes and certificate extension requests. +Additionally it's possible to create a ``csr_attributes.yaml`` file for CSR +attributes and certificate extension requests. See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html +The puppet service will be automatically enabled after installation. A manual +run can also be triggered by setting ``exec`` to ``true``, and additional +arguments can be passed to ``puppet agent`` via the ``exec_args`` key (by +default the agent will execute with the ``--test`` flag). + **Internal name:** ``cc_puppet`` **Module frequency:** per instance @@ -56,13 +75,19 @@ See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html puppet: install: version: + collection: + install_type: + aio_install_url: 'https://git.io/JBhoQ' + cleanup: conf_file: '/etc/puppet/puppet.conf' ssl_dir: '/var/lib/puppet/ssl' csr_attributes_path: '/etc/puppet/csr_attributes.yaml' package_name: 'puppet' + exec: + exec_args: ['--test'] conf: agent: - server: "puppetmaster.example.org" + server: "puppetserver.example.org" certname: "%i.%f" ca_cert: | -------BEGIN CERTIFICATE------- @@ -84,12 +109,12 @@ from io import StringIO from cloudinit import helpers from cloudinit import subp +from cloudinit import temp_utils from cloudinit import util +from cloudinit import url_helper -PUPPET_CONF_PATH = '/etc/puppet/puppet.conf' -PUPPET_SSL_DIR = '/var/lib/puppet/ssl' -PUPPET_CSR_ATTRIBUTES_PATH = '/etc/puppet/csr_attributes.yaml' -PUPPET_PACKAGE_NAME = 'puppet' +AIO_INSTALL_URL = 'https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh' # noqa: E501 +PUPPET_AGENT_DEFAULT_ARGS = ['--test'] class PuppetConstants(object): @@ -119,6 +144,43 @@ def _autostart_puppet(log): " puppet services on this system")) +def get_config_value(puppet_bin, setting): + """Get the config value for a given setting using `puppet config print` + :param puppet_bin: path to puppet binary + :param setting: setting to query + """ + out, _ = subp.subp([puppet_bin, 'config', 'print', setting]) + return out.rstrip() + + +def install_puppet_aio(url=AIO_INSTALL_URL, version=None, + collection=None, cleanup=True): + """Install puppet-agent from the puppetlabs repositories using the one-shot + shell script + + :param url: URL from where to download the install script + :param version: version to install, blank defaults to latest + :param collection: collection to install, blank defaults to latest + :param cleanup: whether to purge the puppetlabs repo after installation + """ + args = [] + if version is not None: + args = ['-v', version] + if collection is not None: + args += ['-c', collection] + + # Purge puppetlabs repos after installation + if cleanup: + args += ['--cleanup'] + content = url_helper.readurl(url=url, retries=5).contents + + # Use tmpdir over tmpfile to avoid 'text file busy' on execute + with temp_utils.tempdir(needs_exe=True) as tmpd: + tmpf = os.path.join(tmpd, 'puppet-install') + util.write_file(tmpf, content, mode=0o700) + return subp.subp([tmpf] + args, capture=False) + + def handle(name, cfg, cloud, log, _args): # If there isn't a puppet key in the configuration don't do anything if 'puppet' not in cfg: @@ -130,23 +192,50 @@ def handle(name, cfg, cloud, log, _args): # Start by installing the puppet package if necessary... install = util.get_cfg_option_bool(puppet_cfg, 'install', True) version = util.get_cfg_option_str(puppet_cfg, 'version', None) - package_name = util.get_cfg_option_str( - puppet_cfg, 'package_name', PUPPET_PACKAGE_NAME) - conf_file = util.get_cfg_option_str( - puppet_cfg, 'conf_file', PUPPET_CONF_PATH) - ssl_dir = util.get_cfg_option_str(puppet_cfg, 'ssl_dir', PUPPET_SSL_DIR) - csr_attributes_path = util.get_cfg_option_str( - puppet_cfg, 'csr_attributes_path', PUPPET_CSR_ATTRIBUTES_PATH) + collection = util.get_cfg_option_str(puppet_cfg, 'collection', None) + install_type = util.get_cfg_option_str( + puppet_cfg, 'install_type', 'packages') + cleanup = util.get_cfg_option_bool(puppet_cfg, 'cleanup', True) + run = util.get_cfg_option_bool(puppet_cfg, 'exec', default=False) + aio_install_url = util.get_cfg_option_str( + puppet_cfg, 'aio_install_url', default=AIO_INSTALL_URL) - p_constants = PuppetConstants(conf_file, ssl_dir, csr_attributes_path, log) + # AIO and distro packages use different paths + if install_type == 'aio': + puppet_user = 'root' + puppet_bin = '/opt/puppetlabs/bin/puppet' + puppet_package = 'puppet-agent' + else: # default to 'packages' + puppet_user = 'puppet' + puppet_bin = 'puppet' + puppet_package = 'puppet' + + package_name = util.get_cfg_option_str( + puppet_cfg, 'package_name', puppet_package) if not install and version: - log.warning(("Puppet install set false but version supplied," + log.warning(("Puppet install set to false but version supplied," " doing nothing.")) elif install: - log.debug(("Attempting to install puppet %s,"), - version if version else 'latest') + log.debug(("Attempting to install puppet %s from %s"), + version if version else 'latest', install_type) - cloud.distro.install_packages((package_name, version)) + if install_type == "packages": + cloud.distro.install_packages((package_name, version)) + elif install_type == "aio": + install_puppet_aio(aio_install_url, version, collection, cleanup) + else: + log.warning("Unknown puppet install type '%s'", install_type) + run = False + + conf_file = util.get_cfg_option_str( + puppet_cfg, 'conf_file', get_config_value(puppet_bin, 'config')) + ssl_dir = util.get_cfg_option_str( + puppet_cfg, 'ssl_dir', get_config_value(puppet_bin, 'ssldir')) + csr_attributes_path = util.get_cfg_option_str( + puppet_cfg, 'csr_attributes_path', + get_config_value(puppet_bin, 'csr_attributes')) + + p_constants = PuppetConstants(conf_file, ssl_dir, csr_attributes_path, log) # ... and then update the puppet configuration if 'conf' in puppet_cfg: @@ -165,17 +254,18 @@ def handle(name, cfg, cloud, log, _args): source=p_constants.conf_path) for (cfg_name, cfg) in puppet_cfg['conf'].items(): # Cert configuration is a special case - # Dump the puppet master ca certificate in the correct place + # Dump the puppetserver ca certificate in the correct place if cfg_name == 'ca_cert': # Puppet ssl sub-directory isn't created yet # Create it with the proper permissions and ownership util.ensure_dir(p_constants.ssl_dir, 0o771) - util.chownbyname(p_constants.ssl_dir, 'puppet', 'root') + util.chownbyname(p_constants.ssl_dir, puppet_user, 'root') util.ensure_dir(p_constants.ssl_cert_dir) - util.chownbyname(p_constants.ssl_cert_dir, 'puppet', 'root') + util.chownbyname(p_constants.ssl_cert_dir, puppet_user, 'root') util.write_file(p_constants.ssl_cert_path, cfg) - util.chownbyname(p_constants.ssl_cert_path, 'puppet', 'root') + util.chownbyname(p_constants.ssl_cert_path, + puppet_user, 'root') else: # Iterate through the config items, we'll use ConfigParser.set # to overwrite or create new items as needed @@ -203,6 +293,25 @@ def handle(name, cfg, cloud, log, _args): # Set it up so it autostarts _autostart_puppet(log) + # Run the agent if needed + if run: + log.debug('Running puppet-agent') + cmd = [puppet_bin, 'agent'] + if 'exec_args' in puppet_cfg: + cmd_args = puppet_cfg['exec_args'] + if isinstance(cmd_args, (list, tuple)): + cmd.extend(cmd_args) + elif isinstance(cmd_args, str): + cmd.extend(cmd_args.split()) + else: + log.warning("Unknown type %s provided for puppet" + " 'exec_args' expected list, tuple," + " or string", type(cmd_args)) + cmd.extend(PUPPET_AGENT_DEFAULT_ARGS) + else: + cmd.extend(PUPPET_AGENT_DEFAULT_ARGS) + subp.subp(cmd, capture=False) + # Start puppetd subp.subp(['service', 'puppet', 'start'], capture=False) diff --git a/doc/examples/cloud-config-puppet.txt b/doc/examples/cloud-config-puppet.txt index 3c7e2da7..c6bc15de 100644 --- a/doc/examples/cloud-config-puppet.txt +++ b/doc/examples/cloud-config-puppet.txt @@ -1,25 +1,65 @@ #cloud-config # -# This is an example file to automatically setup and run puppetd +# This is an example file to automatically setup and run puppet # when the instance boots for the first time. # Make sure that this file is valid yaml before starting instances. # It should be passed as user-data when starting the instance. puppet: + # Boolean: whether or not to install puppet (default: true) + install: true + + # A specific version to pass to the installer script or package manager + version: "7.7.0" + + # Valid values are 'packages' and 'aio' (default: 'packages') + install_type: "packages" + + # Puppet collection to install if 'install_type' is 'aio' + collection: "puppet7" + + # Boolean: whether or not to remove the puppetlabs repo after installation + # if 'install_type' is 'aio' (default: true) + cleanup: true + + # If 'install_type' is 'aio', change the url to the install script + aio_install_url: "https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh" + + # Path to the puppet config file (default: depends on 'install_type') + conf_file: "/etc/puppet/puppet.conf" + + # Path to the puppet SSL directory (default: depends on 'install_type') + ssl_dir: "/var/lib/puppet/ssl" + + # Path to the CSR attributes file (default: depends on 'install_type') + csr_attributes_path: "/etc/puppet/csr_attributes.yaml" + + # The name of the puppet package to install (no-op if 'install_type' is 'aio') + package_name: "puppet" + + # Boolean: whether or not to run puppet after configuration finishes + # (default: false) + exec: false + + # A list of arguments to pass to 'puppet agent' if 'exec' is true + # (default: ['--test']) + exec_args: ['--test'] + # Every key present in the conf object will be added to puppet.conf: # [name] # subkey=value # # For example the configuration below will have the following section # added to puppet.conf: - # [puppetd] - # server=puppetmaster.example.org + # [main] + # server=puppetserver.example.org # certname=i-0123456.ip-X-Y-Z.cloud.internal # - # The puppmaster ca certificate will be available in - # /var/lib/puppet/ssl/certs/ca.pem + # The puppetserver ca certificate will be available in + # /var/lib/puppet/ssl/certs/ca.pem if using distro packages + # or /etc/puppetlabs/puppet/ssl/certs/ca.pem if using AIO packages. conf: agent: - server: "puppetmaster.example.org" + server: "puppetserver.example.org" # certname supports substitutions at runtime: # %i: instanceid # Example: i-0123456 @@ -29,11 +69,13 @@ puppet: # NB: the certname will automatically be lowercased as required by puppet certname: "%i.%f" # ca_cert is a special case. It won't be added to puppet.conf. - # It holds the puppetmaster certificate in pem format. + # It holds the puppetserver certificate in pem format. # It should be a multi-line string (using the | yaml notation for # multi-line strings). - # The puppetmaster certificate is located in - # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetmaster host. + # The puppetserver certificate is located in + # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetserver host if using + # distro packages or /etc/puppetlabs/puppet/ssl/ca/ca_crt.pem if using AIO + # packages. # ca_cert: | -----BEGIN CERTIFICATE----- diff --git a/tests/cloud_tests/testcases/examples/setup_run_puppet.yaml b/tests/cloud_tests/testcases/examples/setup_run_puppet.yaml index e366c042..cdb1c28d 100644 --- a/tests/cloud_tests/testcases/examples/setup_run_puppet.yaml +++ b/tests/cloud_tests/testcases/examples/setup_run_puppet.yaml @@ -14,14 +14,14 @@ cloud_config: | # For example the configuration below will have the following section # added to puppet.conf: # [puppetd] - # server=puppetmaster.example.org + # server=puppetserver.example.org # certname=i-0123456.ip-X-Y-Z.cloud.internal # # The puppmaster ca certificate will be available in # /var/lib/puppet/ssl/certs/ca.pem conf: agent: - server: "puppetmaster.example.org" + server: "puppetserver.example.org" # certname supports substitutions at runtime: # %i: instanceid # Example: i-0123456 @@ -31,11 +31,11 @@ cloud_config: | # NB: the certname will automatically be lowercased as required by puppet certname: "%i.%f" # ca_cert is a special case. It won't be added to puppet.conf. - # It holds the puppetmaster certificate in pem format. + # It holds the puppetserver certificate in pem format. # It should be a multi-line string (using the | yaml notation for # multi-line strings). - # The puppetmaster certificate is located in - # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetmaster host. + # The puppetserver certificate is located in + # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetserver host. # ca_cert: | -----BEGIN CERTIFICATE----- diff --git a/tests/unittests/test_handler/test_handler_puppet.py b/tests/unittests/test_handler/test_handler_puppet.py index 62388ac6..c0ba2e3c 100644 --- a/tests/unittests/test_handler/test_handler_puppet.py +++ b/tests/unittests/test_handler/test_handler_puppet.py @@ -3,8 +3,9 @@ from cloudinit.config import cc_puppet from cloudinit.sources import DataSourceNone from cloudinit import (distros, helpers, cloud, util) -from cloudinit.tests.helpers import CiTestCase, mock +from cloudinit.tests.helpers import CiTestCase, HttprettyTestCase, mock +import httpretty import logging import textwrap @@ -63,7 +64,8 @@ class TestPuppetHandle(CiTestCase): super(TestPuppetHandle, self).setUp() self.new_root = self.tmp_dir() self.conf = self.tmp_path('puppet.conf') - self.csr_attributes_path = self.tmp_path('csr_attributes.yaml') + self.csr_attributes_path = self.tmp_path( + 'csr_attributes.yaml') def _get_cloud(self, distro): paths = helpers.Paths({'templates_dir': self.new_root}) @@ -72,7 +74,7 @@ class TestPuppetHandle(CiTestCase): myds = DataSourceNone.DataSourceNone({}, mydist, paths) return cloud.Cloud(myds, paths, {}, mydist, None) - def test_handler_skips_missing_puppet_key_in_cloudconfig(self, m_auto): + def test_skips_missing_puppet_key_in_cloudconfig(self, m_auto): """Cloud-config containing no 'puppet' key is skipped.""" mycloud = self._get_cloud('ubuntu') cfg = {} @@ -81,19 +83,19 @@ class TestPuppetHandle(CiTestCase): "no 'puppet' configuration found", self.logs.getvalue()) self.assertEqual(0, m_auto.call_count) - @mock.patch('cloudinit.config.cc_puppet.subp.subp') - def test_handler_puppet_config_starts_puppet_service(self, m_subp, m_auto): + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_config_starts_puppet_service(self, m_subp, m_auto): """Cloud-config 'puppet' configuration starts puppet.""" mycloud = self._get_cloud('ubuntu') cfg = {'puppet': {'install': False}} cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) self.assertEqual(1, m_auto.call_count) - self.assertEqual( + self.assertIn( [mock.call(['service', 'puppet', 'start'], capture=False)], m_subp.call_args_list) - @mock.patch('cloudinit.config.cc_puppet.subp.subp') - def test_handler_empty_puppet_config_installs_puppet(self, m_subp, m_auto): + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_empty_puppet_config_installs_puppet(self, m_subp, m_auto): """Cloud-config empty 'puppet' configuration installs latest puppet.""" mycloud = self._get_cloud('ubuntu') mycloud.distro = mock.MagicMock() @@ -103,8 +105,8 @@ class TestPuppetHandle(CiTestCase): [mock.call(('puppet', None))], mycloud.distro.install_packages.call_args_list) - @mock.patch('cloudinit.config.cc_puppet.subp.subp') - def test_handler_puppet_config_installs_puppet_on_true(self, m_subp, _): + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_config_installs_puppet_on_true(self, m_subp, _): """Cloud-config with 'puppet' key installs when 'install' is True.""" mycloud = self._get_cloud('ubuntu') mycloud.distro = mock.MagicMock() @@ -114,8 +116,85 @@ class TestPuppetHandle(CiTestCase): [mock.call(('puppet', None))], mycloud.distro.install_packages.call_args_list) - @mock.patch('cloudinit.config.cc_puppet.subp.subp') - def test_handler_puppet_config_installs_puppet_version(self, m_subp, _): + @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_config_installs_puppet_aio(self, m_subp, m_aio, _): + """Cloud-config with 'puppet' key installs + when 'install_type' is 'aio'.""" + mycloud = self._get_cloud('ubuntu') + mycloud.distro = mock.MagicMock() + cfg = {'puppet': {'install': True, 'install_type': 'aio'}} + cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + m_aio.assert_called_with( + cc_puppet.AIO_INSTALL_URL, + None, None, True) + + @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_config_installs_puppet_aio_with_version(self, + m_subp, m_aio, _): + """Cloud-config with 'puppet' key installs + when 'install_type' is 'aio' and 'version' is specified.""" + mycloud = self._get_cloud('ubuntu') + mycloud.distro = mock.MagicMock() + cfg = {'puppet': {'install': True, + 'version': '6.24.0', 'install_type': 'aio'}} + cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + m_aio.assert_called_with( + cc_puppet.AIO_INSTALL_URL, + '6.24.0', None, True) + + @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_config_installs_puppet_aio_with_collection(self, + m_subp, + m_aio, _): + """Cloud-config with 'puppet' key installs + when 'install_type' is 'aio' and 'collection' is specified.""" + mycloud = self._get_cloud('ubuntu') + mycloud.distro = mock.MagicMock() + cfg = {'puppet': {'install': True, + 'collection': 'puppet6', 'install_type': 'aio'}} + cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + m_aio.assert_called_with( + cc_puppet.AIO_INSTALL_URL, + None, 'puppet6', True) + + @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_config_installs_puppet_aio_with_custom_url(self, + m_subp, + m_aio, _): + """Cloud-config with 'puppet' key installs + when 'install_type' is 'aio' and 'aio_install_url' is specified.""" + mycloud = self._get_cloud('ubuntu') + mycloud.distro = mock.MagicMock() + cfg = {'puppet': + {'install': True, + 'aio_install_url': 'http://test.url/path/to/script.sh', + 'install_type': 'aio'}} + cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + m_aio.assert_called_with( + 'http://test.url/path/to/script.sh', None, None, True) + + @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_config_installs_puppet_aio_without_cleanup(self, + m_subp, + m_aio, _): + """Cloud-config with 'puppet' key installs + when 'install_type' is 'aio' and no cleanup.""" + mycloud = self._get_cloud('ubuntu') + mycloud.distro = mock.MagicMock() + cfg = {'puppet': {'install': True, + 'cleanup': False, 'install_type': 'aio'}} + cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + m_aio.assert_called_with( + cc_puppet.AIO_INSTALL_URL, + None, None, False) + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_config_installs_puppet_version(self, m_subp, _): """Cloud-config 'puppet' configuration can specify a version.""" mycloud = self._get_cloud('ubuntu') mycloud.distro = mock.MagicMock() @@ -125,26 +204,39 @@ class TestPuppetHandle(CiTestCase): [mock.call(('puppet', '3.8'))], mycloud.distro.install_packages.call_args_list) - @mock.patch('cloudinit.config.cc_puppet.subp.subp') - def test_handler_puppet_config_updates_puppet_conf(self, m_subp, m_auto): + @mock.patch('cloudinit.config.cc_puppet.get_config_value') + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_config_updates_puppet_conf(self, + m_subp, m_default, m_auto): """When 'conf' is provided update values in PUPPET_CONF_PATH.""" + + def _fake_get_config_value(puppet_bin, setting): + return self.conf + + m_default.side_effect = _fake_get_config_value mycloud = self._get_cloud('ubuntu') cfg = { 'puppet': { - 'conf': {'agent': {'server': 'puppetmaster.example.org'}}}} - util.write_file(self.conf, '[agent]\nserver = origpuppet\nother = 3') - puppet_conf_path = 'cloudinit.config.cc_puppet.PUPPET_CONF_PATH' + 'conf': {'agent': {'server': 'puppetserver.example.org'}}}} + util.write_file( + self.conf, '[agent]\nserver = origpuppet\nother = 3') mycloud.distro = mock.MagicMock() - with mock.patch(puppet_conf_path, self.conf): - cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) content = util.load_file(self.conf) - expected = '[agent]\nserver = puppetmaster.example.org\nother = 3\n\n' + expected = '[agent]\nserver = puppetserver.example.org\nother = 3\n\n' self.assertEqual(expected, content) + @mock.patch('cloudinit.config.cc_puppet.get_config_value') @mock.patch('cloudinit.config.cc_puppet.subp.subp') - def test_handler_puppet_writes_csr_attributes_file(self, m_subp, m_auto): + def test_puppet_writes_csr_attributes_file(self, + m_subp, m_default, m_auto): """When csr_attributes is provided creates file in PUPPET_CSR_ATTRIBUTES_PATH.""" + + def _fake_get_config_value(puppet_bin, setting): + return self.csr_attributes_path + + m_default.side_effect = _fake_get_config_value mycloud = self._get_cloud('ubuntu') mycloud.distro = mock.MagicMock() cfg = { @@ -163,10 +255,7 @@ class TestPuppetHandle(CiTestCase): } } } - csr_attributes = 'cloudinit.config.cc_puppet.' \ - 'PUPPET_CSR_ATTRIBUTES_PATH' - with mock.patch(csr_attributes, self.csr_attributes_path): - cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) content = util.load_file(self.csr_attributes_path) expected = textwrap.dedent("""\ custom_attributes: @@ -177,3 +266,125 @@ class TestPuppetHandle(CiTestCase): pp_uuid: ED803750-E3C7-44F5-BB08-41A04433FE2E """) self.assertEqual(expected, content) + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_runs_puppet_if_requested(self, m_subp, m_auto): + """Run puppet with default args if 'exec' is set to True.""" + mycloud = self._get_cloud('ubuntu') + cfg = {'puppet': {'exec': True}} + cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + self.assertEqual(1, m_auto.call_count) + self.assertIn( + [mock.call(['puppet', 'agent', '--test'], capture=False)], + m_subp.call_args_list) + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_runs_puppet_with_args_list_if_requested(self, + m_subp, m_auto): + """Run puppet with 'exec_args' list if 'exec' is set to True.""" + mycloud = self._get_cloud('ubuntu') + cfg = {'puppet': {'exec': True, 'exec_args': [ + '--onetime', '--detailed-exitcodes']}} + cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + self.assertEqual(1, m_auto.call_count) + self.assertIn( + [mock.call( + ['puppet', 'agent', '--onetime', '--detailed-exitcodes'], + capture=False)], + m_subp.call_args_list) + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_runs_puppet_with_args_string_if_requested(self, + m_subp, m_auto): + """Run puppet with 'exec_args' string if 'exec' is set to True.""" + mycloud = self._get_cloud('ubuntu') + cfg = {'puppet': {'exec': True, + 'exec_args': '--onetime --detailed-exitcodes'}} + cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + self.assertEqual(1, m_auto.call_count) + self.assertIn( + [mock.call( + ['puppet', 'agent', '--onetime', '--detailed-exitcodes'], + capture=False)], + m_subp.call_args_list) + + +class TestInstallPuppetAio(HttprettyTestCase): + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', + return_value=(None, None)) + def test_install_with_default_arguments(self, m_subp): + """Install AIO with no arguments""" + response = b'#!/bin/bash\necho "Hi Mom"' + httpretty.register_uri( + httpretty.GET, cc_puppet.AIO_INSTALL_URL, + body=response, status=200) + + cc_puppet.install_puppet_aio() + + self.assertEqual( + [mock.call([mock.ANY, '--cleanup'], capture=False)], + m_subp.call_args_list) + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', + return_value=(None, None)) + def test_install_with_custom_url(self, m_subp): + """Install AIO from custom URL""" + response = b'#!/bin/bash\necho "Hi Mom"' + url = 'http://custom.url/path/to/script.sh' + httpretty.register_uri( + httpretty.GET, url, body=response, status=200) + + cc_puppet.install_puppet_aio('http://custom.url/path/to/script.sh') + + self.assertEqual( + [mock.call([mock.ANY, '--cleanup'], capture=False)], + m_subp.call_args_list) + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', + return_value=(None, None)) + def test_install_with_version(self, m_subp): + """Install AIO with specific version""" + response = b'#!/bin/bash\necho "Hi Mom"' + httpretty.register_uri( + httpretty.GET, cc_puppet.AIO_INSTALL_URL, + body=response, status=200) + + cc_puppet.install_puppet_aio(cc_puppet.AIO_INSTALL_URL, '7.6.0') + + self.assertEqual( + [mock.call([mock.ANY, '-v', '7.6.0', '--cleanup'], capture=False)], + m_subp.call_args_list) + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', + return_value=(None, None)) + def test_install_with_collection(self, m_subp): + """Install AIO with specific collection""" + response = b'#!/bin/bash\necho "Hi Mom"' + httpretty.register_uri( + httpretty.GET, cc_puppet.AIO_INSTALL_URL, + body=response, status=200) + + cc_puppet.install_puppet_aio( + cc_puppet.AIO_INSTALL_URL, None, 'puppet6-nightly') + + self.assertEqual( + [mock.call([mock.ANY, '-c', 'puppet6-nightly', '--cleanup'], + capture=False)], + m_subp.call_args_list) + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', + return_value=(None, None)) + def test_install_with_no_cleanup(self, m_subp): + """Install AIO with no cleanup""" + response = b'#!/bin/bash\necho "Hi Mom"' + httpretty.register_uri( + httpretty.GET, cc_puppet.AIO_INSTALL_URL, + body=response, status=200) + + cc_puppet.install_puppet_aio( + cc_puppet.AIO_INSTALL_URL, None, None, False) + + self.assertEqual( + [mock.call([mock.ANY], capture=False)], + m_subp.call_args_list) -- cgit v1.2.3 From c62cb3af59abc464380011c106b31879181e7c45 Mon Sep 17 00:00:00 2001 From: Andrew Kutz <101085+akutz@users.noreply.github.com> Date: Tue, 10 Aug 2021 13:18:56 -0500 Subject: Update inconsistent indentation (#962) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch updates some indentation in a comment that prevented an attempt to run the Black formatter (https://github.com/psf/black) against the cloud-init codebase: $ find cloudinit -name '*.py' -type f | xargs black -l 79 --check ... Oh no! 💥 💔 💥 262 files would be reformatted, 19 files would be left unchanged, 1 file would fail to reformat. The one file that fails to format is cloudinit/net/__init__.py. With this fix in place, the black command can successfully parse the file into AST and back again: $ black -l 79 --check cloudinit/net/__init__.py would reformat cloudinit/net/__init__.py Oh no! 💥 💔 💥 1 file would be reformatted. Normally this patch would be part of such an overall effort, but since this is the only location that interrupted running the black command, this author felt it was worth addressing this discrepancy sooner than later in the case there is subsequent desire to use a standard format tool such as black. --- cloudinit/net/__init__.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index b827d41a..655558a1 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -313,11 +313,11 @@ def is_netfail_standby(devname, driver=None): def is_renamed(devname): """ /* interface name assignment types (sysfs name_assign_type attribute) */ - #define NET_NAME_UNKNOWN 0 /* unknown origin (not exposed to user) */ - #define NET_NAME_ENUM 1 /* enumerated by kernel */ - #define NET_NAME_PREDICTABLE 2 /* predictably named by the kernel */ - #define NET_NAME_USER 3 /* provided by user-space */ - #define NET_NAME_RENAMED 4 /* renamed by user-space */ + #define NET_NAME_UNKNOWN 0 /* unknown origin (not exposed to user) */ + #define NET_NAME_ENUM 1 /* enumerated by kernel */ + #define NET_NAME_PREDICTABLE 2 /* predictably named by the kernel */ + #define NET_NAME_USER 3 /* provided by user-space */ + #define NET_NAME_RENAMED 4 /* renamed by user-space */ """ name_assign_type = read_sys_net_safe(devname, 'name_assign_type') if name_assign_type and name_assign_type in ['3', '4']: -- cgit v1.2.3 From d3271217e2745fb0e3405bd093b61c39fe0708a7 Mon Sep 17 00:00:00 2001 From: aswinrajamannar <39812128+aswinrajamannar@users.noreply.github.com> Date: Tue, 10 Aug 2021 12:28:00 -0700 Subject: Azure: Limit polling network metadata on connection errors (#961) --- cloudinit/sources/DataSourceAzure.py | 27 +++++++++++++++++---------- tests/unittests/test_datasource/test_azure.py | 10 ++++++++-- 2 files changed, 25 insertions(+), 12 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 01e2c959..6df9934b 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -972,7 +972,7 @@ class DataSourceAzure(sources.DataSource): imds_md = None metadata_poll_count = 0 metadata_logging_threshold = 1 - metadata_timeout_count = 0 + expected_errors_count = 0 # For now, only a VM's primary NIC can contact IMDS and WireServer. If # DHCP fails for a NIC, we have no mechanism to determine if the NIC is @@ -998,13 +998,16 @@ class DataSourceAzure(sources.DataSource): raise # Retry polling network metadata for a limited duration only when the - # calls fail due to timeout. This is because the platform drops packets - # going towards IMDS when it is not a primary nic. If the calls fail - # due to other issues like 410, 503 etc, then it means we are primary - # but IMDS service is unavailable at the moment. Retry indefinitely in - # those cases since we cannot move on without the network metadata. + # calls fail due to network unreachable error or timeout. + # This is because the platform drops packets going towards IMDS + # when it is not a primary nic. If the calls fail due to other issues + # like 410, 503 etc, then it means we are primary but IMDS service + # is unavailable at the moment. Retry indefinitely in those cases + # since we cannot move on without the network metadata. In the future, + # all this will not be necessary, as a new dhcp option would tell + # whether the nic is primary or not. def network_metadata_exc_cb(msg, exc): - nonlocal metadata_timeout_count, metadata_poll_count + nonlocal expected_errors_count, metadata_poll_count nonlocal metadata_logging_threshold metadata_poll_count = metadata_poll_count + 1 @@ -1024,9 +1027,13 @@ class DataSourceAzure(sources.DataSource): (msg, exc.cause, exc.code), logger_func=LOG.error) - if exc.cause and isinstance(exc.cause, requests.Timeout): - metadata_timeout_count = metadata_timeout_count + 1 - return (metadata_timeout_count <= 10) + # Retry up to a certain limit for both timeout and network + # unreachable errors. + if exc.cause and isinstance( + exc.cause, (requests.Timeout, requests.ConnectionError) + ): + expected_errors_count = expected_errors_count + 1 + return (expected_errors_count <= 10) return True # Primary nic detection will be optimized in the future. The fact that diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 3bf8fdb2..63eaf384 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -2825,7 +2825,8 @@ class TestPreprovisioningHotAttachNics(CiTestCase): @mock.patch(MOCKPATH + 'EphemeralDHCPv4') def test_check_if_nic_is_primary_retries_on_failures( self, m_dhcpv4, m_imds): - """Retry polling for network metadata on all failures except timeout""" + """Retry polling for network metadata on all failures except timeout + and network unreachable errors""" dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) lease = { 'interface': 'eth9', 'fixed-address': '192.168.2.9', @@ -2854,8 +2855,13 @@ class TestPreprovisioningHotAttachNics(CiTestCase): error = url_helper.UrlError(cause=cause, code=410) eth0Retries.append(exc_cb("No goal state.", error)) else: - cause = requests.Timeout('Fake connection timeout') for _ in range(0, 10): + # We are expected to retry for a certain period for both + # timeout errors and network unreachable errors. + if _ < 5: + cause = requests.Timeout('Fake connection timeout') + else: + cause = requests.ConnectionError('Network Unreachable') error = url_helper.UrlError(cause=cause) eth1Retries.append(exc_cb("Connection timeout", error)) # Should stop retrying after 10 retries -- cgit v1.2.3 From 82a30e6d821dbaec90bed066e09613bfb28fd52a Mon Sep 17 00:00:00 2001 From: Moustafa Moustafa Date: Thu, 12 Aug 2021 10:13:13 -0700 Subject: Azure: Logging the detected interfaces (#968) --- cloudinit/net/__init__.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'cloudinit') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 655558a1..017c50c5 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -661,6 +661,8 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True, cur['name'] = name cur_info[name] = cur + LOG.debug("Detected interfaces %s", cur_info) + def update_byname(bymac): return dict((data['name'], data) for data in cur_info.values()) -- cgit v1.2.3 From e119ceceb7d76af7d75c04a8779b9c5fc68083a8 Mon Sep 17 00:00:00 2001 From: aswinrajamannar <39812128+aswinrajamannar@users.noreply.github.com> Date: Thu, 12 Aug 2021 12:44:53 -0700 Subject: Azure: Check if interface is up after sleep when trying to bring it up (#972) When bringing interface up by unbinding and then binding hv_netvsc driver, it might take a short delay after binding for the link to be up. So before trying unbind/bind again after sleep, check if the link is up. This is a corner case when a preprovisioned VM is reused and the NICs are hot-attached. --- cloudinit/sources/DataSourceAzure.py | 10 ++++++++++ tests/unittests/test_datasource/test_azure.py | 19 +++++++++++++++++++ 2 files changed, 29 insertions(+) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 6df9934b..ba23139b 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -923,6 +923,16 @@ class DataSourceAzure(sources.DataSource): sleep(sleep_duration) + # Since we just did a unbind and bind, check again after sleep + # but before doing unbind and bind again to avoid races where the + # link might take a slight delay after bind to be up. + if self.distro.networking.is_up(ifname): + msg = ("Link is up after checking after sleeping for %d secs" + " after %d attempts" % + (sleep_duration, attempts)) + report_diagnostic_event(msg, logger_func=LOG.info) + return + @azure_ds_telemetry_reporter def _create_report_ready_marker(self): path = REPORTED_READY_MARKER_FILE diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 63eaf384..03609c3d 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -2907,6 +2907,25 @@ class TestPreprovisioningHotAttachNics(CiTestCase): dsa.wait_for_link_up("eth0") self.assertEqual(1, m_is_link_up.call_count) + @mock.patch(MOCKPATH + 'net.is_up', autospec=True) + @mock.patch(MOCKPATH + 'util.write_file') + @mock.patch('cloudinit.net.read_sys_net') + @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up') + def test_wait_for_link_up_checks_link_after_sleep( + self, m_is_link_up, m_read_sys_net, m_writefile, m_is_up): + """Waiting for link to be up should return immediately if the link is + already up.""" + + distro_cls = distros.fetch('ubuntu') + distro = distro_cls('ubuntu', {}, self.paths) + dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths) + m_is_link_up.return_value = False + m_is_up.return_value = True + + dsa.wait_for_link_up("eth0") + self.assertEqual(2, m_is_link_up.call_count) + self.assertEqual(1, m_is_up.call_count) + @mock.patch(MOCKPATH + 'util.write_file') @mock.patch('cloudinit.net.read_sys_net') @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up') -- cgit v1.2.3 From 04047438d7bbca0367f1b0722c0959fd92166e3e Mon Sep 17 00:00:00 2001 From: Shreenidhi Shedi <53473811+sshedi@users.noreply.github.com> Date: Fri, 13 Aug 2021 09:08:53 +0530 Subject: cc_resolv_conf: fix typos (#969) Add tests for cc_resolv_conf handler --- cloudinit/config/cc_resolv_conf.py | 7 +- cloudinit/net/networkd.py | 4 +- .../test_handler/test_handler_resolv_conf.py | 105 +++++++++++++++++++++ 3 files changed, 111 insertions(+), 5 deletions(-) create mode 100644 tests/unittests/test_handler/test_handler_resolv_conf.py (limited to 'cloudinit') diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py index c51967e2..648935e4 100644 --- a/cloudinit/config/cc_resolv_conf.py +++ b/cloudinit/config/cc_resolv_conf.py @@ -108,18 +108,19 @@ def handle(name, cfg, cloud, log, _args): if "resolv_conf" not in cfg: log.warning("manage_resolv_conf True but no parameters provided!") + return try: template_fn = cloud.get_template_filename( - RESOLVE_CONFIG_TEMPLATE_MAP[cloud.distro.resolv_conf_fn]) + RESOLVE_CONFIG_TEMPLATE_MAP[cloud.distro.resolve_conf_fn]) except KeyError: - log.warning("No template found, not rendering /etc/resolv.conf") + log.warning("No template found, not rendering resolve configs") return generate_resolv_conf( template_fn=template_fn, params=cfg["resolv_conf"], - target_fname=cloud.disro.resolve_conf_fn + target_fname=cloud.distro.resolve_conf_fn ) return diff --git a/cloudinit/net/networkd.py b/cloudinit/net/networkd.py index 63e3a07f..a311572f 100644 --- a/cloudinit/net/networkd.py +++ b/cloudinit/net/networkd.py @@ -72,8 +72,8 @@ class Renderer(renderer.Renderer): def __init__(self, config=None): if not config: config = {} - self.resolved_conf = config.get('resolved_conf_fn', - '/etc/systemd/resolved.conf') + self.resolve_conf_fn = config.get('resolve_conf_fn', + '/etc/systemd/resolved.conf') self.network_conf_dir = config.get('network_conf_dir', '/etc/systemd/network/') diff --git a/tests/unittests/test_handler/test_handler_resolv_conf.py b/tests/unittests/test_handler/test_handler_resolv_conf.py new file mode 100644 index 00000000..96139001 --- /dev/null +++ b/tests/unittests/test_handler/test_handler_resolv_conf.py @@ -0,0 +1,105 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.config import cc_resolv_conf + +from cloudinit import cloud +from cloudinit import distros +from cloudinit import helpers +from cloudinit import util +from copy import deepcopy + +from cloudinit.tests import helpers as t_help + +import logging +import os +import shutil +import tempfile +from unittest import mock + +LOG = logging.getLogger(__name__) + + +class TestResolvConf(t_help.FilesystemMockingTestCase): + with_logs = True + cfg = {'manage_resolv_conf': True, 'resolv_conf': {}} + + def setUp(self): + super(TestResolvConf, self).setUp() + self.tmp = tempfile.mkdtemp() + util.ensure_dir(os.path.join(self.tmp, 'data')) + self.addCleanup(shutil.rmtree, self.tmp) + + def _fetch_distro(self, kind, conf=None): + cls = distros.fetch(kind) + paths = helpers.Paths({'cloud_dir': self.tmp}) + conf = {} if conf is None else conf + return cls(kind, conf, paths) + + def call_resolv_conf_handler(self, distro_name, conf, cc=None): + if not cc: + ds = None + distro = self._fetch_distro(distro_name, conf) + paths = helpers.Paths({'cloud_dir': self.tmp}) + cc = cloud.Cloud(ds, paths, {}, distro, None) + cc_resolv_conf.handle('cc_resolv_conf', conf, cc, LOG, []) + + @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") + def test_resolv_conf_systemd_resolved(self, m_render_to_file): + self.call_resolv_conf_handler('photon', self.cfg) + + assert [ + mock.call(mock.ANY, '/etc/systemd/resolved.conf', mock.ANY) + ] == m_render_to_file.call_args_list + + @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") + def test_resolv_conf_no_param(self, m_render_to_file): + tmp = deepcopy(self.cfg) + self.logs.truncate(0) + tmp.pop('resolv_conf') + self.call_resolv_conf_handler('photon', tmp) + + self.assertIn('manage_resolv_conf True but no parameters provided', + self.logs.getvalue()) + assert [ + mock.call(mock.ANY, '/etc/systemd/resolved.conf', mock.ANY) + ] not in m_render_to_file.call_args_list + + @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") + def test_resolv_conf_manage_resolv_conf_false(self, m_render_to_file): + tmp = deepcopy(self.cfg) + self.logs.truncate(0) + tmp['manage_resolv_conf'] = False + self.call_resolv_conf_handler('photon', tmp) + self.assertIn("'manage_resolv_conf' present but set to False", + self.logs.getvalue()) + assert [ + mock.call(mock.ANY, '/etc/systemd/resolved.conf', mock.ANY) + ] not in m_render_to_file.call_args_list + + @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") + def test_resolv_conf_etc_resolv_conf(self, m_render_to_file): + self.call_resolv_conf_handler('rhel', self.cfg) + + assert [ + mock.call(mock.ANY, '/etc/resolv.conf', mock.ANY) + ] == m_render_to_file.call_args_list + + @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") + def test_resolv_conf_invalid_resolve_conf_fn(self, m_render_to_file): + ds = None + distro = self._fetch_distro('rhel', self.cfg) + paths = helpers.Paths({'cloud_dir': self.tmp}) + cc = cloud.Cloud(ds, paths, {}, distro, None) + cc.distro.resolve_conf_fn = 'bla' + + self.logs.truncate(0) + self.call_resolv_conf_handler('rhel', self.cfg, cc) + + self.assertIn('No template found, not rendering resolve configs', + self.logs.getvalue()) + + assert [ + mock.call(mock.ANY, '/etc/resolv.conf', mock.ANY) + ] not in m_render_to_file.call_args_list + +# vi: ts=4 expandtab -- cgit v1.2.3 From 65607405aed2fb5e7797bb181dc947025c10f346 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 13 Aug 2021 15:34:16 -0500 Subject: Only invoke hotplug socket when functionality is enabled (#952) Alters hotplug hook to have a query mechanism checking if the functionality is enabled. This allows us to avoid using the hotplug socket and service when hotplug is disabled. --- cloudinit/cmd/devel/hotplug_hook.py | 123 ++++++++++++++++-------- cloudinit/sources/__init__.py | 18 ++-- tests/integration_tests/modules/test_hotplug.py | 14 ++- tools/hook-hotplug | 9 +- 4 files changed, 112 insertions(+), 52 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/cmd/devel/hotplug_hook.py b/cloudinit/cmd/devel/hotplug_hook.py index 0282f24a..a0058f03 100644 --- a/cloudinit/cmd/devel/hotplug_hook.py +++ b/cloudinit/cmd/devel/hotplug_hook.py @@ -3,6 +3,7 @@ import abc import argparse import os +import sys import time from cloudinit import log @@ -12,7 +13,7 @@ from cloudinit.net import activators, read_sys_net_safe from cloudinit.net.network_state import parse_net_config_data from cloudinit.reporting import events from cloudinit.stages import Init -from cloudinit.sources import DataSource +from cloudinit.sources import DataSource, DataSourceNotFoundException LOG = log.getLogger(__name__) @@ -31,15 +32,35 @@ def get_parser(parser=None): parser = argparse.ArgumentParser(prog=NAME, description=__doc__) parser.description = __doc__ - parser.add_argument("-d", "--devpath", required=True, - metavar="PATH", - help="sysfs path to hotplugged device") - parser.add_argument("-s", "--subsystem", required=True, - help="subsystem to act on", - choices=['net']) - parser.add_argument("-u", "--udevaction", required=True, - help="action to take", - choices=['add', 'remove']) + parser.add_argument( + "-s", "--subsystem", required=True, + help="subsystem to act on", + choices=['net'] + ) + + subparsers = parser.add_subparsers( + title='Hotplug Action', + dest='hotplug_action' + ) + subparsers.required = True + + subparsers.add_parser( + 'query', + help='query if hotplug is enabled for given subsystem' + ) + + parser_handle = subparsers.add_parser( + 'handle', help='handle the hotplug event') + parser_handle.add_argument( + "-d", "--devpath", required=True, + metavar="PATH", + help="sysfs path to hotplugged device" + ) + parser_handle.add_argument( + "-u", "--udevaction", required=True, + help="action to take", + choices=['add', 'remove'] + ) return parser @@ -133,27 +154,42 @@ SUBSYSTEM_PROPERTES_MAP = { } -def handle_hotplug( - hotplug_init: Init, devpath, subsystem, udevaction -): - handler_cls, event_scope = SUBSYSTEM_PROPERTES_MAP.get( - subsystem, (None, None) - ) - if handler_cls is None: +def is_enabled(hotplug_init, subsystem): + try: + scope = SUBSYSTEM_PROPERTES_MAP[subsystem][1] + except KeyError as e: raise Exception( 'hotplug-hook: cannot handle events for subsystem: {}'.format( - subsystem)) + subsystem) + ) from e + + return hotplug_init.update_event_enabled( + event_source_type=EventType.HOTPLUG, + scope=scope + ) + +def initialize_datasource(hotplug_init, subsystem): LOG.debug('Fetching datasource') datasource = hotplug_init.fetch(existing="trust") - if not hotplug_init.update_event_enabled( - event_source_type=EventType.HOTPLUG, - scope=EventScope.NETWORK - ): - LOG.debug('hotplug not enabled for event of type %s', event_scope) + if not datasource.get_supported_events([EventType.HOTPLUG]): + LOG.debug('hotplug not supported for event of type %s', subsystem) return + if not is_enabled(hotplug_init, subsystem): + LOG.debug('hotplug not enabled for event of type %s', subsystem) + return + return datasource + + +def handle_hotplug( + hotplug_init: Init, devpath, subsystem, udevaction +): + datasource = initialize_datasource(hotplug_init, subsystem) + if not datasource: + return + handler_cls = SUBSYSTEM_PROPERTES_MAP[subsystem][0] LOG.debug('Creating %s event handler', subsystem) event_handler = handler_cls( datasource=datasource, @@ -200,29 +236,36 @@ def handle_args(name, args): log.setupLogging(hotplug_init.cfg) if 'reporting' in hotplug_init.cfg: reporting.update_configuration(hotplug_init.cfg.get('reporting')) - # Logging isn't going to be setup until now LOG.debug( - '%s called with the following arguments: {udevaction: %s, ' - 'subsystem: %s, devpath: %s}', - name, args.udevaction, args.subsystem, args.devpath - ) - LOG.debug( - '%s called with the following arguments:\n' - 'udevaction: %s\n' - 'subsystem: %s\n' - 'devpath: %s', - name, args.udevaction, args.subsystem, args.devpath + '%s called with the following arguments: {' + 'hotplug_action: %s, subsystem: %s, udevaction: %s, devpath: %s}', + name, + args.hotplug_action, + args.subsystem, + args.udevaction if 'udevaction' in args else None, + args.devpath if 'devpath' in args else None, ) with hotplug_reporter: try: - handle_hotplug( - hotplug_init=hotplug_init, - devpath=args.devpath, - subsystem=args.subsystem, - udevaction=args.udevaction, - ) + if args.hotplug_action == 'query': + try: + datasource = initialize_datasource( + hotplug_init, args.subsystem) + except DataSourceNotFoundException: + print( + "Unable to determine hotplug state. No datasource " + "detected") + sys.exit(1) + print('enabled' if datasource else 'disabled') + else: + handle_hotplug( + hotplug_init=hotplug_init, + devpath=args.devpath, + subsystem=args.subsystem, + udevaction=args.udevaction, + ) except Exception: LOG.exception('Received fatal exception handling hotplug!') raise diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index bf6bf139..cc7e1c3c 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -679,6 +679,16 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): def get_package_mirror_info(self): return self.distro.get_package_mirror_info(data_source=self) + def get_supported_events(self, source_event_types: List[EventType]): + supported_events = {} # type: Dict[EventScope, set] + for event in source_event_types: + for update_scope, update_events in self.supported_update_events.items(): # noqa: E501 + if event in update_events: + if not supported_events.get(update_scope): + supported_events[update_scope] = set() + supported_events[update_scope].add(event) + return supported_events + def update_metadata_if_supported( self, source_event_types: List[EventType] ) -> bool: @@ -694,13 +704,7 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): @return True if the datasource did successfully update cached metadata due to source_event_type. """ - supported_events = {} # type: Dict[EventScope, set] - for event in source_event_types: - for update_scope, update_events in self.supported_update_events.items(): # noqa: E501 - if event in update_events: - if not supported_events.get(update_scope): - supported_events[update_scope] = set() - supported_events[update_scope].add(event) + supported_events = self.get_supported_events(source_event_types) for scope, matched_events in supported_events.items(): LOG.debug( "Update datasource metadata and %s config due to events: %s", diff --git a/tests/integration_tests/modules/test_hotplug.py b/tests/integration_tests/modules/test_hotplug.py index b683566f..a42d1c8c 100644 --- a/tests/integration_tests/modules/test_hotplug.py +++ b/tests/integration_tests/modules/test_hotplug.py @@ -48,7 +48,7 @@ def test_hotplug_add_remove(client: IntegrationInstance): # Add new NIC added_ip = client.instance.add_network_interface() - _wait_till_hotplug_complete(client) + _wait_till_hotplug_complete(client, expected_runs=2) ips_after_add = _get_ip_addr(client) new_addition = [ip for ip in ips_after_add if ip.ip4 == added_ip][0] @@ -63,7 +63,7 @@ def test_hotplug_add_remove(client: IntegrationInstance): # Remove new NIC client.instance.remove_network_interface(added_ip) - _wait_till_hotplug_complete(client, expected_runs=2) + _wait_till_hotplug_complete(client, expected_runs=4) ips_after_remove = _get_ip_addr(client) assert len(ips_after_remove) == len(ips_before) assert added_ip not in [ip.ip4 for ip in ips_after_remove] @@ -72,6 +72,10 @@ def test_hotplug_add_remove(client: IntegrationInstance): config = yaml.safe_load(netplan_cfg) assert new_addition.interface not in config['network']['ethernets'] + assert 'enabled' == client.execute( + 'cloud-init devel hotplug-hook -s net query' + ) + @pytest.mark.openstack def test_no_hotplug_in_userdata(client: IntegrationInstance): @@ -83,7 +87,7 @@ def test_no_hotplug_in_userdata(client: IntegrationInstance): client.instance.add_network_interface() _wait_till_hotplug_complete(client) log = client.read_from_file('/var/log/cloud-init.log') - assert 'hotplug not enabled for event of type network' in log + assert "Event Denied: scopes=['network'] EventType=hotplug" in log ips_after_add = _get_ip_addr(client) if len(ips_after_add) == len(ips_before) + 1: @@ -92,3 +96,7 @@ def test_no_hotplug_in_userdata(client: IntegrationInstance): assert new_ip.state == 'DOWN' else: assert len(ips_after_add) == len(ips_before) + + assert 'disabled' == client.execute( + 'cloud-init devel hotplug-hook -s net query' + ) diff --git a/tools/hook-hotplug b/tools/hook-hotplug index 34e95929..ced268b3 100755 --- a/tools/hook-hotplug +++ b/tools/hook-hotplug @@ -8,12 +8,17 @@ is_finished() { [ -e /run/cloud-init/result.json ] } -if is_finished; then +hotplug_enabled() { + [ "$(cloud-init devel hotplug-hook -s "${SUBSYSTEM}" query)" == "enabled" ] +} + +if is_finished && hotplug_enabled; then # open cloud-init's hotplug-hook fifo rw exec 3<>/run/cloud-init/hook-hotplug-cmd env_params=( - --devpath="${DEVPATH}" --subsystem="${SUBSYSTEM}" + handle + --devpath="${DEVPATH}" --udevaction="${ACTION}" ) # write params to cloud-init's hotplug-hook fifo -- cgit v1.2.3 From 776bd36385b3bd5c796479983afd2c9492cbdbe4 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 19 Aug 2021 16:23:31 -0500 Subject: Ignore hotplug socket when collecting logs (#985) Update "cloud-init collect-logs" to ignore /run/cloud-init/hook-hotplug-cmd as this will raise the error "/run/cloud-init/hook-hotplug-cmd` is a named pipe" if included. Also updated logs.py to continue writing the tarball if it fails collecting a file rather than let the exception bubble up. LP: #1940235 --- cloudinit/cmd/devel/logs.py | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py index 51c61cca..31ade73d 100644 --- a/cloudinit/cmd/devel/logs.py +++ b/cloudinit/cmd/devel/logs.py @@ -48,11 +48,15 @@ def get_parser(parser=None): return parser -def _copytree_ignore_sensitive_files(curdir, files): - """Return a list of files to ignore if we are non-root""" - if os.getuid() == 0: - return () - return (INSTANCE_JSON_SENSITIVE_FILE,) # Ignore root-permissioned files +def _copytree_rundir_ignore_files(curdir, files): + """Return a list of files to ignore for /run/cloud-init directory""" + ignored_files = [ + 'hook-hotplug-cmd', # named pipe for hotplug + ] + if os.getuid() != 0: + # Ignore root-permissioned files + ignored_files.append(INSTANCE_JSON_SENSITIVE_FILE) + return ignored_files def _write_command_output_to_file(cmd, filename, msg, verbosity): @@ -123,9 +127,13 @@ def collect_logs(tarfile, include_userdata, verbosity=0): run_dir = os.path.join(log_dir, 'run') ensure_dir(run_dir) if os.path.exists(CLOUDINIT_RUN_DIR): - shutil.copytree(CLOUDINIT_RUN_DIR, - os.path.join(run_dir, 'cloud-init'), - ignore=_copytree_ignore_sensitive_files) + try: + shutil.copytree(CLOUDINIT_RUN_DIR, + os.path.join(run_dir, 'cloud-init'), + ignore=_copytree_rundir_ignore_files) + except shutil.Error as e: + sys.stderr.write("Failed collecting file(s) due to error:\n") + sys.stderr.write(str(e) + '\n') _debug("collected dir %s\n" % CLOUDINIT_RUN_DIR, 1, verbosity) else: _debug("directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR, 1, -- cgit v1.2.3 From 7d3f5d750f6111c2716143364ea33486df67c927 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 20 Aug 2021 17:09:49 -0500 Subject: Fix home permissions modified by ssh module (SC-338) (#984) Fix home permissions modified by ssh module In #956, we updated the file and directory permissions for keys not in the user's home directory. We also unintentionally modified the permissions within the home directory as well. These should not change, and this commit changes that back. LP: #1940233 --- cloudinit/ssh_util.py | 35 +++++- .../integration_tests/modules/test_ssh_keysfile.py | 132 ++++++++++++++++++--- 2 files changed, 146 insertions(+), 21 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index b8a3c8f7..9ccadf09 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -321,23 +321,48 @@ def check_create_path(username, filename, strictmodes): home_folder = os.path.dirname(user_pwent.pw_dir) for directory in directories: parent_folder += "/" + directory - if home_folder.startswith(parent_folder): + + # security check, disallow symlinks in the AuthorizedKeysFile path. + if os.path.islink(parent_folder): + LOG.debug( + "Invalid directory. Symlink exists in path: %s", + parent_folder) + return False + + if os.path.isfile(parent_folder): + LOG.debug( + "Invalid directory. File exists in path: %s", + parent_folder) + return False + + if (home_folder.startswith(parent_folder) or + parent_folder == user_pwent.pw_dir): continue - if not os.path.isdir(parent_folder): + if not os.path.exists(parent_folder): # directory does not exist, and permission so far are good: # create the directory, and make it accessible by everyone # but owned by root, as it might be used by many users. with util.SeLinuxGuard(parent_folder): - os.makedirs(parent_folder, mode=0o755, exist_ok=True) - util.chownbyid(parent_folder, root_pwent.pw_uid, - root_pwent.pw_gid) + mode = 0o755 + uid = root_pwent.pw_uid + gid = root_pwent.pw_gid + if parent_folder.startswith(user_pwent.pw_dir): + mode = 0o700 + uid = user_pwent.pw_uid + gid = user_pwent.pw_gid + os.makedirs(parent_folder, mode=mode, exist_ok=True) + util.chownbyid(parent_folder, uid, gid) permissions = check_permissions(username, parent_folder, filename, False, strictmodes) if not permissions: return False + if os.path.islink(filename) or os.path.isdir(filename): + LOG.debug("%s is not a file!", filename) + return False + # check the file if not os.path.exists(filename): # if file does not exist: we need to create it, since the diff --git a/tests/integration_tests/modules/test_ssh_keysfile.py b/tests/integration_tests/modules/test_ssh_keysfile.py index f82d7649..3159feb9 100644 --- a/tests/integration_tests/modules/test_ssh_keysfile.py +++ b/tests/integration_tests/modules/test_ssh_keysfile.py @@ -10,10 +10,10 @@ TEST_USER1_KEYS = get_test_rsa_keypair('test1') TEST_USER2_KEYS = get_test_rsa_keypair('test2') TEST_DEFAULT_KEYS = get_test_rsa_keypair('test3') -USERDATA = """\ +_USERDATA = """\ #cloud-config bootcmd: - - sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile /etc/ssh/authorized_keys %h/.ssh/authorized_keys2;' /etc/ssh/sshd_config + - {bootcmd} ssh_authorized_keys: - {default} users: @@ -24,27 +24,17 @@ users: - name: test_user2 ssh_authorized_keys: - {user2} -""".format( # noqa: E501 +""".format( + bootcmd='{bootcmd}', default=TEST_DEFAULT_KEYS.public_key, user1=TEST_USER1_KEYS.public_key, user2=TEST_USER2_KEYS.public_key, ) -@pytest.mark.ubuntu -@pytest.mark.user_data(USERDATA) -def test_authorized_keys(client: IntegrationInstance): - expected_keys = [ - ('test_user1', '/home/test_user1/.ssh/authorized_keys2', - TEST_USER1_KEYS), - ('test_user2', '/home/test_user2/.ssh/authorized_keys2', - TEST_USER2_KEYS), - ('ubuntu', '/home/ubuntu/.ssh/authorized_keys2', - TEST_DEFAULT_KEYS), - ('root', '/root/.ssh/authorized_keys2', TEST_DEFAULT_KEYS), - ] - +def common_verify(client, expected_keys): for user, filename, keys in expected_keys: + # Ensure key is in the key file contents = client.read_from_file(filename) if user in ['ubuntu', 'root']: # Our personal public key gets added by pycloudlib @@ -83,3 +73,113 @@ def test_authorized_keys(client: IntegrationInstance): look_for_keys=False, allow_agent=False, ) + + # Ensure we haven't messed with any /home permissions + # See LP: #1940233 + home_dir = '/home/{}'.format(user) + home_perms = '755' + if user == 'root': + home_dir = '/root' + home_perms = '700' + assert '{} {}'.format(user, home_perms) == client.execute( + 'stat -c "%U %a" {}'.format(home_dir) + ) + if client.execute("test -d {}/.ssh".format(home_dir)).ok: + assert '{} 700'.format(user) == client.execute( + 'stat -c "%U %a" {}/.ssh'.format(home_dir) + ) + assert '{} 600'.format(user) == client.execute( + 'stat -c "%U %a" {}'.format(filename) + ) + + # Also ensure ssh-keygen works as expected + client.execute('mkdir {}/.ssh'.format(home_dir)) + assert client.execute( + "ssh-keygen -b 2048 -t rsa -f {}/.ssh/id_rsa -q -N ''".format( + home_dir) + ).ok + assert client.execute('test -f {}/.ssh/id_rsa'.format(home_dir)) + assert client.execute('test -f {}/.ssh/id_rsa.pub'.format(home_dir)) + + assert 'root 755' == client.execute('stat -c "%U %a" /home') + + +DEFAULT_KEYS_USERDATA = _USERDATA.format(bootcmd='""') + + +@pytest.mark.ubuntu +@pytest.mark.user_data(DEFAULT_KEYS_USERDATA) +def test_authorized_keys_default(client: IntegrationInstance): + expected_keys = [ + ('test_user1', '/home/test_user1/.ssh/authorized_keys', + TEST_USER1_KEYS), + ('test_user2', '/home/test_user2/.ssh/authorized_keys', + TEST_USER2_KEYS), + ('ubuntu', '/home/ubuntu/.ssh/authorized_keys', + TEST_DEFAULT_KEYS), + ('root', '/root/.ssh/authorized_keys', TEST_DEFAULT_KEYS), + ] + common_verify(client, expected_keys) + + +AUTHORIZED_KEYS2_USERDATA = _USERDATA.format(bootcmd=( + "sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile " + "/etc/ssh/authorized_keys %h/.ssh/authorized_keys2;' " + "/etc/ssh/sshd_config")) + + +@pytest.mark.ubuntu +@pytest.mark.user_data(AUTHORIZED_KEYS2_USERDATA) +def test_authorized_keys2(client: IntegrationInstance): + expected_keys = [ + ('test_user1', '/home/test_user1/.ssh/authorized_keys2', + TEST_USER1_KEYS), + ('test_user2', '/home/test_user2/.ssh/authorized_keys2', + TEST_USER2_KEYS), + ('ubuntu', '/home/ubuntu/.ssh/authorized_keys2', + TEST_DEFAULT_KEYS), + ('root', '/root/.ssh/authorized_keys2', TEST_DEFAULT_KEYS), + ] + common_verify(client, expected_keys) + + +NESTED_KEYS_USERDATA = _USERDATA.format(bootcmd=( + "sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile " + "/etc/ssh/authorized_keys %h/foo/bar/ssh/keys;' " + "/etc/ssh/sshd_config")) + + +@pytest.mark.ubuntu +@pytest.mark.user_data(NESTED_KEYS_USERDATA) +def test_nested_keys(client: IntegrationInstance): + expected_keys = [ + ('test_user1', '/home/test_user1/foo/bar/ssh/keys', + TEST_USER1_KEYS), + ('test_user2', '/home/test_user2/foo/bar/ssh/keys', + TEST_USER2_KEYS), + ('ubuntu', '/home/ubuntu/foo/bar/ssh/keys', + TEST_DEFAULT_KEYS), + ('root', '/root/foo/bar/ssh/keys', TEST_DEFAULT_KEYS), + ] + common_verify(client, expected_keys) + + +EXTERNAL_KEYS_USERDATA = _USERDATA.format(bootcmd=( + "sed -i 's;#AuthorizedKeysFile.*;AuthorizedKeysFile " + "/etc/ssh/authorized_keys /etc/ssh/authorized_keys/%u/keys;' " + "/etc/ssh/sshd_config")) + + +@pytest.mark.ubuntu +@pytest.mark.user_data(EXTERNAL_KEYS_USERDATA) +def test_external_keys(client: IntegrationInstance): + expected_keys = [ + ('test_user1', '/etc/ssh/authorized_keys/test_user1/keys', + TEST_USER1_KEYS), + ('test_user2', '/etc/ssh/authorized_keys/test_user2/keys', + TEST_USER2_KEYS), + ('ubuntu', '/etc/ssh/authorized_keys/ubuntu/keys', + TEST_DEFAULT_KEYS), + ('root', '/etc/ssh/authorized_keys/root/keys', TEST_DEFAULT_KEYS), + ] + common_verify(client, expected_keys) -- cgit v1.2.3 From 3ec8ddde0d1d2fd8597f7d2915baa3e328552ab1 Mon Sep 17 00:00:00 2001 From: aswinrajamannar <39812128+aswinrajamannar@users.noreply.github.com> Date: Fri, 20 Aug 2021 15:53:18 -0700 Subject: Azure: During primary nic detection, check interface status continuously before rebinding again (#990) Add 10 second polling loop in wait_for_link_up after performing an unbind and re-bind of primary NIC in hv_netvsc driver. Also reduce cloud-init logging levels to debug for these operations. --- cloudinit/sources/DataSourceAzure.py | 38 ++++++++++++++------------- tests/unittests/test_datasource/test_azure.py | 20 ++++++++++---- 2 files changed, 35 insertions(+), 23 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index ba23139b..fddfe363 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -892,12 +892,12 @@ class DataSourceAzure(sources.DataSource): logger_func=LOG.info) return - LOG.info("Attempting to bring %s up", ifname) + LOG.debug("Attempting to bring %s up", ifname) attempts = 0 + LOG.info("Unbinding and binding the interface %s", ifname) while True: - LOG.info("Unbinding and binding the interface %s", ifname) devicename = net.read_sys_net(ifname, 'device/device_id').strip('{}') util.write_file('/sys/bus/vmbus/drivers/hv_netvsc/unbind', @@ -912,26 +912,28 @@ class DataSourceAzure(sources.DataSource): report_diagnostic_event(msg, logger_func=LOG.info) return - sleep_duration = 1 - msg = ("Link is not up after %d attempts with %d seconds sleep " - "between attempts." % (attempts, sleep_duration)) - if attempts % 10 == 0: + msg = ("Link is not up after %d attempts to rebind" % attempts) report_diagnostic_event(msg, logger_func=LOG.info) - else: LOG.info(msg) - sleep(sleep_duration) - - # Since we just did a unbind and bind, check again after sleep - # but before doing unbind and bind again to avoid races where the - # link might take a slight delay after bind to be up. - if self.distro.networking.is_up(ifname): - msg = ("Link is up after checking after sleeping for %d secs" - " after %d attempts" % - (sleep_duration, attempts)) - report_diagnostic_event(msg, logger_func=LOG.info) - return + # It could take some time after rebind for the interface to be up. + # So poll for the status for some time before attempting to rebind + # again. + sleep_duration = 0.5 + max_status_polls = 20 + LOG.debug("Polling %d seconds for primary NIC link up after " + "rebind.", sleep_duration * max_status_polls) + + for i in range(0, max_status_polls): + if self.distro.networking.is_up(ifname): + msg = ("After %d attempts to rebind, link is up after " + "polling the link status %d times" % (attempts, i)) + report_diagnostic_event(msg, logger_func=LOG.info) + LOG.debug(msg) + return + else: + sleep(sleep_duration) @azure_ds_telemetry_reporter def _create_report_ready_marker(self): diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 03609c3d..851cf82e 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -2912,19 +2912,29 @@ class TestPreprovisioningHotAttachNics(CiTestCase): @mock.patch('cloudinit.net.read_sys_net') @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up') def test_wait_for_link_up_checks_link_after_sleep( - self, m_is_link_up, m_read_sys_net, m_writefile, m_is_up): + self, m_try_set_link_up, m_read_sys_net, m_writefile, m_is_up): """Waiting for link to be up should return immediately if the link is already up.""" distro_cls = distros.fetch('ubuntu') distro = distro_cls('ubuntu', {}, self.paths) dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths) - m_is_link_up.return_value = False - m_is_up.return_value = True + m_try_set_link_up.return_value = False + + callcount = 0 + + def is_up_mock(key): + nonlocal callcount + if callcount == 0: + callcount += 1 + return False + return True + + m_is_up.side_effect = is_up_mock dsa.wait_for_link_up("eth0") - self.assertEqual(2, m_is_link_up.call_count) - self.assertEqual(1, m_is_up.call_count) + self.assertEqual(2, m_try_set_link_up.call_count) + self.assertEqual(2, m_is_up.call_count) @mock.patch(MOCKPATH + 'util.write_file') @mock.patch('cloudinit.net.read_sys_net') -- cgit v1.2.3 From 03ee10cd378773fab50eacf6fce3c55e8f828879 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 23 Aug 2021 12:43:37 -0500 Subject: Release 21.3 (#993) Bump the version in cloudinit/version.py to 21.3 and update ChangeLog. LP: #1940839 --- ChangeLog | 101 +++++++++++++++++++++++++++++++++++++++++++++++++++ cloudinit/version.py | 2 +- 2 files changed, 102 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/ChangeLog b/ChangeLog index 98528249..6de07ad3 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,104 @@ +21.3 + - Azure: During primary nic detection, check interface status continuously + before rebinding again (#990) [aswinrajamannar] + - Fix home permissions modified by ssh module (SC-338) (#984) + (LP: #1940233) + - Add integration test for sensitive jinja substitution (#986) + - Ignore hotplug socket when collecting logs (#985) (LP: #1940235) + - testing: Add missing mocks to test_vmware.py (#982) + - add Zadara Edge Cloud Platform to the supported clouds list (#963) + [sarahwzadara] + - testing: skip upgrade tests on LXD VMs (#980) + - Only invoke hotplug socket when functionality is enabled (#952) + - Revert unnecesary lcase in ds-identify (#978) [Andrew Kutz] + - cc_resolv_conf: fix typos (#969) [Shreenidhi Shedi] + - Replace broken httpretty tests with mock (SC-324) (#973) + - Azure: Check if interface is up after sleep when trying to bring it up + (#972) [aswinrajamannar] + - Update dscheck_VMware's rpctool check (#970) [Shreenidhi Shedi] + - Azure: Logging the detected interfaces (#968) [Moustafa Moustafa] + - Change netifaces dependency to 0.10.4 (#965) [Andrew Kutz] + - Azure: Limit polling network metadata on connection errors (#961) + [aswinrajamannar] + - Update inconsistent indentation (#962) [Andrew Kutz] + - cc_puppet: support AIO installations and more (#960) [Gabriel Nagy] + - Add Puppet contributors to CLA signers (#964) [Noah Fontes] + - Datasource for VMware (#953) [Andrew Kutz] + - photon: refactor hostname handling and add networkd activator (#958) + [sshedi] + - Stop copying ssh system keys and check folder permissions (#956) + [Emanuele Giuseppe Esposito] + - testing: port remaining cloud tests to integration testing framework + (SC-191) (#955) + - generate contents for ovf-env.xml when provisioning via IMDS (#959) + [Anh Vo] + - Add support for EuroLinux 7 && EuroLinux 8 (#957) [Aleksander Baranowski] + - Implementing device_aliases as described in docs (#945) + [Mal Graty] (LP: #1867532) + - testing: fix test_ssh_import_id.py (#954) + - Add ability to manage fallback network config on PhotonOS (#941) [sshedi] + - Add VZLinux support (#951) [eb3095] + - VMware: add network-config support in ovf-env.xml (#947) [PengpengSun] + - Update pylint to v2.9.3 and fix the new issues it spots (#946) + [Paride Legovini] + - Azure: mount default provisioning iso before try device listing (#870) + [Anh Vo] + - Document known hotplug limitations (#950) + - Initial hotplug support (#936) + - Fix MIME policy failure on python version upgrade (#934) + - run-container: fixup the centos repos baseurls when using http_proxy + (#944) [Paride Legovini] + - tools: add support for building rpms on rocky linux (#940) + - ssh-util: allow cloudinit to merge all ssh keys into a custom user + file, defined in AuthorizedKeysFile (#937) [Emanuele Giuseppe Esposito] + (LP: #1911680) + - VMware: new "allow_raw_data" switch (#939) [xiaofengw-vmware] + - bump pycloudlib version (#935) + - add renanrodrigo as a contributor (#938) [Renan Rodrigo] + - testing: simplify test_upgrade.py (#932) + - freebsd/net_v1 format: read MTU from root (#930) [Gonéri Le Bouder] + - Add new network activators to bring up interfaces (#919) + - - Detect a Python version change and clear the cache (#857) + [Robert Schweikert] + - cloud_tests: fix the Impish release name (#931) [Paride Legovini] + - Removed distro specific network code from Photon (#929) [sshedi] + - Add support for VMware PhotonOS (#909) [sshedi] + - cloud_tests: add impish release definition (#927) [Paride Legovini] + - docs: fix stale links rename master branch to main (#926) + - Fix DNS in NetworkState (SC-133) (#923) + - tests: Add 'adhoc' mark for integration tests (#925) + - Fix the spelling of "DigitalOcean" (#924) [Mark Mercado] + - Small Doc Update for ReportEventStack and Test (#920) [Mike Russell] + - Replace deprecated collections.Iterable with abc replacement (#922) + (LP: #1932048) + - testing: OCI availability domain is now required (SC-59) (#910) + - add DragonFlyBSD support (#904) [Gonéri Le Bouder] + - Use instance-data-sensitive.json in jinja templates (SC-117) (#917) + (LP: #1931392) + - doc: Update NoCloud docs stating required files (#918) (LP: #1931577) + - build-on-netbsd: don't pin a specific py3 version (#913) + [Gonéri Le Bouder] + - - Create the log file with 640 permissions (#858) [Robert Schweikert] + - Allow braces to appear in dhclient output (#911) [eb3095] + - Docs: Replace all freenode references with libera (#912) + - openbsd/net: flush the route table on net restart (#908) + [Gonéri Le Bouder] + - Add Rocky Linux support to cloud-init (#906) [Louis Abel] + - Add "esposem" as contributor (#907) [Emanuele Giuseppe Esposito] + - Add integration test for #868 (#901) + - Added support for importing keys via primary/security mirror clauses + (#882) [Paul Goins] (LP: #1925395) + - [examples] config-user-groups expire in the future (#902) + [Geert Stappers] + - BSD: static network, set the mtu (#894) [Gonéri Le Bouder] + - Add integration test for lp-1920939 (#891) + - Fix unit tests breaking from new httpretty version (#903) + - Allow user control over update events (#834) + - Update test characters in substitution unit test (#893) + - cc_disk_setup.py: remove UDEVADM_CMD definition as not used (#886) + [dermotbradley] + - Add AlmaLinux OS support (#872) [Andrew Lukoshko] + 21.2 - Add \r\n check for SSH keys in Azure (#889) - Revert "Add support to resize rootfs if using LVM (#721)" (#887) diff --git a/cloudinit/version.py b/cloudinit/version.py index be47aff3..b798a6d7 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -__VERSION__ = "21.2" +__VERSION__ = "21.3" _PACKAGED_VERSION = '@@PACKAGED_VERSION@@' FEATURES = [ -- cgit v1.2.3 From 28e56d993fc40feab139f149dacc10cae51a3fe0 Mon Sep 17 00:00:00 2001 From: aswinrajamannar <39812128+aswinrajamannar@users.noreply.github.com> Date: Tue, 24 Aug 2021 13:45:41 -0700 Subject: Azure: Retry dhcp on timeouts when polling reprovisiondata (#998) In the nic attach path, we skip doing dhcp since we already did it when bringing the interface up. However when polling for reprovisiondata, it is possible for the request to timeout due to platform issues. In those cases we still need to do dhcp and try again since we tear down the context. We can only skip the first dhcp attempt. --- cloudinit/sources/DataSourceAzure.py | 4 ++++ tests/unittests/test_datasource/test_azure.py | 34 +++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index fddfe363..caffa944 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -1317,6 +1317,10 @@ class DataSourceAzure(sources.DataSource): except UrlError: # Teardown our EphemeralDHCPv4 context on failure as we retry self._ephemeral_dhcp_ctx.clean_network() + + # Also reset this flag which determines if we should do dhcp + # during retries. + is_ephemeral_ctx_present = False finally: if nl_sock: nl_sock.close() diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 851cf82e..a4296bf6 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -3055,6 +3055,40 @@ class TestPreprovisioningPollIMDS(CiTestCase): self.assertEqual(0, m_dhcp.call_count) self.assertEqual(0, m_media_switch.call_count) + @mock.patch('os.path.isfile') + @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + def test_poll_imds_does_dhcp_on_retries_if_ctx_present( + self, m_ephemeral_dhcpv4, m_isfile, report_ready_func, m_request, + m_media_switch, m_dhcp, m_net): + """The poll_imds function should reuse the dhcp ctx if it is already + present. This happens when we wait for nic to be hot-attached before + polling for reprovisiondata. Note that if this ctx is set when + _poll_imds is called, then it is not expected to be waiting for + media_disconnect_connect either.""" + + tries = 0 + + def fake_timeout_once(**kwargs): + nonlocal tries + tries += 1 + if tries == 1: + raise requests.Timeout('Fake connection timeout') + return mock.MagicMock(status_code=200, text="good", content="good") + + m_request.side_effect = fake_timeout_once + report_file = self.tmp_path('report_marker', self.tmp) + m_isfile.return_value = True + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) + with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file),\ + mock.patch.object(dsa, '_ephemeral_dhcp_ctx') as m_dhcp_ctx: + m_dhcp_ctx.obtain_lease.return_value = "Dummy lease" + dsa._ephemeral_dhcp_ctx = m_dhcp_ctx + dsa._poll_imds() + self.assertEqual(1, m_dhcp_ctx.clean_network.call_count) + self.assertEqual(1, m_ephemeral_dhcpv4.call_count) + self.assertEqual(0, m_media_switch.call_count) + self.assertEqual(2, m_request.call_count) + def test_does_not_poll_imds_report_ready_when_marker_file_exists( self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net): """poll_imds should not call report ready when the reported ready -- cgit v1.2.3 From 7fc3f08ebc20eb496b3d318bb718ece569b10dba Mon Sep 17 00:00:00 2001 From: Shreenidhi Shedi <53473811+sshedi@users.noreply.github.com> Date: Tue, 31 Aug 2021 00:51:10 +0530 Subject: Add support to accept-ra in networkd renderer (#999) Also fix search path in networkd --- cloudinit/net/networkd.py | 6 ++- tests/unittests/test_net.py | 74 ++++++++++++++++++++++++++++++++++ tests/unittests/test_net_activators.py | 4 +- 3 files changed, 81 insertions(+), 3 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/net/networkd.py b/cloudinit/net/networkd.py index a311572f..ee6fd2ad 100644 --- a/cloudinit/net/networkd.py +++ b/cloudinit/net/networkd.py @@ -160,6 +160,10 @@ class Renderer(renderer.Renderer): cfg.update_section(sec, 'DHCP', dhcp) + if (dhcp in ['ipv6', 'yes'] and + isinstance(iface.get('accept-ra', ''), bool)): + cfg.update_section(sec, 'IPv6AcceptRA', iface['accept-ra']) + # This is to accommodate extra keys present in VMware config def dhcp_domain(self, d, cfg): for item in ['dhcp4domain', 'dhcp6domain']: @@ -247,7 +251,7 @@ class Renderer(renderer.Renderer): def available(target=None): expected = ['ip', 'systemctl'] - search = ['/usr/bin', '/bin'] + search = ['/usr/sbin', '/bin'] for p in expected: if not subp.which(p, search=search, target=target): return False diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index fc77b11e..094450b4 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1205,6 +1205,13 @@ NETWORK_CONFIGS = { USERCTL=no """), }, + 'expected_networkd': textwrap.dedent("""\ + [Match] + Name=iface0 + [Network] + DHCP=ipv6 + IPv6AcceptRA=True + """).rstrip(' '), }, 'dhcpv6_reject_ra': { 'expected_eni': textwrap.dedent("""\ @@ -1260,6 +1267,13 @@ NETWORK_CONFIGS = { USERCTL=no """), }, + 'expected_networkd': textwrap.dedent("""\ + [Match] + Name=iface0 + [Network] + DHCP=ipv6 + IPv6AcceptRA=False + """).rstrip(' '), }, 'ipv6_slaac': { 'expected_eni': textwrap.dedent("""\ @@ -5203,6 +5217,66 @@ class TestNetworkdRoundTrip(CiTestCase): self.compare_dicts(actual, expected) + @mock.patch("cloudinit.net.util.chownbyname", return_value=True) + def test_dhcpv6_accept_ra_config_v1(self, m_chown): + nwk_fn = '/etc/systemd/network/10-cloud-init-iface0.network' + entry = NETWORK_CONFIGS['dhcpv6_accept_ra'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml_v1'])) + + actual = files[nwk_fn].splitlines() + actual = self.create_conf_dict(actual) + + expected = entry['expected_networkd'].splitlines() + expected = self.create_conf_dict(expected) + + self.compare_dicts(actual, expected) + + @mock.patch("cloudinit.net.util.chownbyname", return_value=True) + def test_dhcpv6_accept_ra_config_v2(self, m_chown): + nwk_fn = '/etc/systemd/network/10-cloud-init-iface0.network' + entry = NETWORK_CONFIGS['dhcpv6_accept_ra'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml_v2'])) + + actual = files[nwk_fn].splitlines() + actual = self.create_conf_dict(actual) + + expected = entry['expected_networkd'].splitlines() + expected = self.create_conf_dict(expected) + + self.compare_dicts(actual, expected) + + @mock.patch("cloudinit.net.util.chownbyname", return_value=True) + def test_dhcpv6_reject_ra_config_v1(self, m_chown): + nwk_fn = '/etc/systemd/network/10-cloud-init-iface0.network' + entry = NETWORK_CONFIGS['dhcpv6_reject_ra'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml_v1'])) + + actual = files[nwk_fn].splitlines() + actual = self.create_conf_dict(actual) + + expected = entry['expected_networkd'].splitlines() + expected = self.create_conf_dict(expected) + + self.compare_dicts(actual, expected) + + @mock.patch("cloudinit.net.util.chownbyname", return_value=True) + def test_dhcpv6_reject_ra_config_v2(self, m_chown): + nwk_fn = '/etc/systemd/network/10-cloud-init-iface0.network' + entry = NETWORK_CONFIGS['dhcpv6_reject_ra'] + files = self._render_and_read(network_config=yaml.load( + entry['yaml_v2'])) + + actual = files[nwk_fn].splitlines() + actual = self.create_conf_dict(actual) + + expected = entry['expected_networkd'].splitlines() + expected = self.create_conf_dict(expected) + + self.compare_dicts(actual, expected) + class TestRenderersSelect: diff --git a/tests/unittests/test_net_activators.py b/tests/unittests/test_net_activators.py index 38f2edf2..f63a8b74 100644 --- a/tests/unittests/test_net_activators.py +++ b/tests/unittests/test_net_activators.py @@ -118,8 +118,8 @@ NETWORK_MANAGER_AVAILABLE_CALLS = [ ] NETWORKD_AVAILABLE_CALLS = [ - (('ip',), {'search': ['/usr/bin', '/bin'], 'target': None}), - (('systemctl',), {'search': ['/usr/bin', '/bin'], 'target': None}), + (('ip',), {'search': ['/usr/sbin', '/bin'], 'target': None}), + (('systemctl',), {'search': ['/usr/sbin', '/bin'], 'target': None}), ] -- cgit v1.2.3 From 58c2de4c97de6cfa6edbf5319641f2ef71284895 Mon Sep 17 00:00:00 2001 From: Shreenidhi Shedi <53473811+sshedi@users.noreply.github.com> Date: Wed, 1 Sep 2021 19:53:55 +0530 Subject: Fix `make style-check` errors (#1000) Using flake8 inplace of pyflakes Renamed run-pyflakes -> run-flake8 Changed target name to flake8 in Makefile With pyflakes we can't suppress warnings/errors in few required places. flake8 is flexible in that regard. Hence using flake8 seems to be a better choice here. flake8 does the job of pep8 anyway. So, removed pep8 target from Makefile along with tools/run-pep8 script. Included setup.py in flake8 checks --- Makefile | 11 ++++------- cloudinit/cmd/devel/hotplug_hook.py | 3 ++- cloudinit/distros/__init__.py | 2 +- cloudinit/sources/__init__.py | 2 +- cloudinit/stages.py | 2 +- setup.py | 8 ++++++-- tests/integration_tests/clouds.py | 2 +- tests/integration_tests/instances.py | 4 +++- tools/run-flake8 | 17 +++++++++++++++++ tools/run-pep8 | 21 --------------------- tools/run-pyflakes | 17 ----------------- tox.ini | 4 ++-- 12 files changed, 38 insertions(+), 55 deletions(-) create mode 100755 tools/run-flake8 delete mode 100755 tools/run-pep8 delete mode 100755 tools/run-pyflakes (limited to 'cloudinit') diff --git a/Makefile b/Makefile index 5fb0fcbf..0c015dae 100644 --- a/Makefile +++ b/Makefile @@ -18,13 +18,10 @@ all: check check: check_version test yaml -style-check: pep8 $(pyflakes) +style-check: flake8 -pep8: - @$(CWD)/tools/run-pep8 - -pyflakes: - @$(CWD)/tools/run-pyflakes +flake8: + @$(CWD)/tools/run-flake8 unittest: clean_pyc python3 -m pytest -v tests/unittests cloudinit @@ -86,6 +83,6 @@ deb-src: doc: tox -e doc -.PHONY: test pyflakes clean pep8 rpm srpm deb deb-src yaml +.PHONY: test flake8 clean rpm srpm deb deb-src yaml .PHONY: check_version pip-test-requirements pip-requirements clean_pyc .PHONY: unittest style-check doc diff --git a/cloudinit/cmd/devel/hotplug_hook.py b/cloudinit/cmd/devel/hotplug_hook.py index a0058f03..d4f0547e 100644 --- a/cloudinit/cmd/devel/hotplug_hook.py +++ b/cloudinit/cmd/devel/hotplug_hook.py @@ -13,7 +13,8 @@ from cloudinit.net import activators, read_sys_net_safe from cloudinit.net.network_state import parse_net_config_data from cloudinit.reporting import events from cloudinit.stages import Init -from cloudinit.sources import DataSource, DataSourceNotFoundException +from cloudinit.sources import DataSource # noqa: F401 +from cloudinit.sources import DataSourceNotFoundException LOG = log.getLogger(__name__) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index a634623a..2e629143 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -16,7 +16,7 @@ import stat import string import urllib.parse from io import StringIO -from typing import Any, Mapping +from typing import Any, Mapping # noqa: F401 from cloudinit import importer from cloudinit import log as logging diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index cc7e1c3c..54b8240a 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -13,7 +13,7 @@ import copy import json import os from collections import namedtuple -from typing import Dict, List +from typing import Dict, List # noqa: F401 from cloudinit import dmi from cloudinit import importer diff --git a/cloudinit/stages.py b/cloudinit/stages.py index bc164fa0..80aa9f5e 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -9,7 +9,7 @@ import os import pickle import sys from collections import namedtuple -from typing import Dict, Set +from typing import Dict, Set # noqa: F401 from cloudinit.settings import ( FREQUENCIES, CLOUD_CONFIG, PER_INSTANCE, PER_ONCE, RUN_CLOUD_CONFIG) diff --git a/setup.py b/setup.py index 7fa03e63..100575ff 100755 --- a/setup.py +++ b/setup.py @@ -28,9 +28,11 @@ import subprocess RENDERED_TMPD_PREFIX = "RENDERED_TEMPD" VARIANT = None + def is_f(p): return os.path.isfile(p) + def is_generator(p): return '-generator' in p @@ -111,6 +113,7 @@ def render_tmpl(template, mode=None): # return path relative to setup.py return os.path.join(os.path.basename(tmpd), bname) + # User can set the variant for template rendering if '--distro' in sys.argv: idx = sys.argv.index('--distro') @@ -166,7 +169,7 @@ elif os.path.isfile('/etc/system-release-cpe'): with open('/etc/system-release-cpe') as f: cpe_data = f.read().rstrip().split(':') - if cpe_data[1] == "\o": + if cpe_data[1] == "\o": # noqa: W605 # URI formated CPE inc = 0 else: @@ -216,7 +219,8 @@ class InitsysInstallData(install): if self.init_system and isinstance(self.init_system, str): self.init_system = self.init_system.split(",") - if len(self.init_system) == 0 and not platform.system().endswith('BSD'): + if (len(self.init_system) == 0 and + not platform.system().endswith('BSD')): self.init_system = ['systemd'] bad = [f for f in self.init_system if f not in INITSYS_TYPES] diff --git a/tests/integration_tests/clouds.py b/tests/integration_tests/clouds.py index f2362b5d..32fdc91e 100644 --- a/tests/integration_tests/clouds.py +++ b/tests/integration_tests/clouds.py @@ -28,7 +28,7 @@ from tests.integration_tests.instances import ( from tests.integration_tests.util import emit_dots_on_travis try: - from typing import Optional + from typing import Optional # noqa: F401 except ImportError: pass diff --git a/tests/integration_tests/instances.py b/tests/integration_tests/instances.py index 055ec758..63e0e630 100644 --- a/tests/integration_tests/instances.py +++ b/tests/integration_tests/instances.py @@ -13,7 +13,9 @@ from tests.integration_tests import integration_settings try: from typing import TYPE_CHECKING if TYPE_CHECKING: - from tests.integration_tests.clouds import IntegrationCloud + from tests.integration_tests.clouds import ( # noqa: F401 + IntegrationCloud + ) except ImportError: pass diff --git a/tools/run-flake8 b/tools/run-flake8 new file mode 100755 index 00000000..0021cdb9 --- /dev/null +++ b/tools/run-flake8 @@ -0,0 +1,17 @@ +#!/bin/bash + +CR=" +" +pycheck_dirs=( "cloudinit/" "tests/" "tools/" "setup.py" ) + +set -f +if [ $# -eq 0 ]; then + files=( "${pycheck_dirs[@]}" ) +else + files=( "$@" ) +fi + +cmd=( "python3" -m "flake8" "${files[@]}" ) + +echo "Running: " "${cmd[@]}" 1>&2 +exec "${cmd[@]}" diff --git a/tools/run-pep8 b/tools/run-pep8 deleted file mode 100755 index 4bd0bbfb..00000000 --- a/tools/run-pep8 +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -pycheck_dirs=( "cloudinit/" "tests/" "tools/" ) - -CR=" -" -[ "$1" = "-v" ] && { verbose="$1"; shift; } || verbose="" - -set -f -if [ $# -eq 0 ]; then unset IFS - IFS="$CR" - files=( "${bin_files[@]}" "${pycheck_dirs[@]}" ) - unset IFS -else - files=( "$@" ) -fi - -myname=${0##*/} -cmd=( "${myname#run-}" $verbose "${files[@]}" ) -echo "Running: " "${cmd[@]}" 1>&2 -exec "${cmd[@]}" diff --git a/tools/run-pyflakes b/tools/run-pyflakes deleted file mode 100755 index 179afebe..00000000 --- a/tools/run-pyflakes +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -CR=" -" -pycheck_dirs=( "cloudinit/" "tests/" "tools/" ) - -set -f -if [ $# -eq 0 ]; then - files=( "${pycheck_dirs[@]}" ) -else - files=( "$@" ) -fi - -cmd=( "python3" -m "pyflakes" "${files[@]}" ) - -echo "Running: " "${cmd[@]}" 1>&2 -exec "${cmd[@]}" diff --git a/tox.ini b/tox.ini index 27c16ef3..aad286ff 100644 --- a/tox.ini +++ b/tox.ini @@ -13,7 +13,7 @@ passenv= basepython = python3 deps = flake8==3.8.2 -commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/} +commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/ setup.py} # https://github.com/gabrielfalcao/HTTPretty/issues/223 setenv = @@ -119,7 +119,7 @@ deps = pytest==3.0.7 [testenv:tip-flake8] -commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/} +commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/ setup.py} deps = flake8 [testenv:tip-pylint] -- cgit v1.2.3 From db72c841c2ec0d94d366df7fa623e82e91e2201c Mon Sep 17 00:00:00 2001 From: Andrew Bogott Date: Wed, 1 Sep 2021 09:50:37 -0500 Subject: puppet config: add the start_agent option (#1002) The current code starts the puppet agent and also sets autostart in all cases. This conflicts with a common pattern where puppet itself manages the agent and autostart state. For example, in my deploy puppet disables the puppet agent and replaces it with a cron. This causes various races both within this cloud-init unit and within puppet itself while cloud-init and puppet fight over whether or not to enable the service. --- cloudinit/config/cc_puppet.py | 23 +++++++++++++++------- .../unittests/test_handler/test_handler_puppet.py | 22 +++++++++++++++++++++ 2 files changed, 38 insertions(+), 7 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index a0779eb0..dc20fc44 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -59,10 +59,13 @@ Additionally it's possible to create a ``csr_attributes.yaml`` file for CSR attributes and certificate extension requests. See https://puppet.com/docs/puppet/latest/config_file_csr_attributes.html -The puppet service will be automatically enabled after installation. A manual -run can also be triggered by setting ``exec`` to ``true``, and additional -arguments can be passed to ``puppet agent`` via the ``exec_args`` key (by -default the agent will execute with the ``--test`` flag). +By default, the puppet service will be automatically enabled after installation +and set to automatically start on boot. To override this in favor of manual +puppet execution set ``start_service`` to ``false``. + +A single manual run can be triggered by setting ``exec`` to ``true``, and +additional arguments can be passed to ``puppet agent`` via the ``exec_args`` +key (by default the agent will execute with the ``--test`` flag). **Internal name:** ``cc_puppet`` @@ -85,6 +88,7 @@ default the agent will execute with the ``--test`` flag). package_name: 'puppet' exec: exec_args: ['--test'] + start_service: conf: agent: server: "puppetserver.example.org" @@ -197,6 +201,9 @@ def handle(name, cfg, cloud, log, _args): puppet_cfg, 'install_type', 'packages') cleanup = util.get_cfg_option_bool(puppet_cfg, 'cleanup', True) run = util.get_cfg_option_bool(puppet_cfg, 'exec', default=False) + start_puppetd = util.get_cfg_option_bool(puppet_cfg, + 'start_service', + default=True) aio_install_url = util.get_cfg_option_str( puppet_cfg, 'aio_install_url', default=AIO_INSTALL_URL) @@ -291,7 +298,8 @@ def handle(name, cfg, cloud, log, _args): default_flow_style=False)) # Set it up so it autostarts - _autostart_puppet(log) + if start_puppetd: + _autostart_puppet(log) # Run the agent if needed if run: @@ -312,7 +320,8 @@ def handle(name, cfg, cloud, log, _args): cmd.extend(PUPPET_AGENT_DEFAULT_ARGS) subp.subp(cmd, capture=False) - # Start puppetd - subp.subp(['service', 'puppet', 'start'], capture=False) + if start_puppetd: + # Start puppetd + subp.subp(['service', 'puppet', 'start'], capture=False) # vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_puppet.py b/tests/unittests/test_handler/test_handler_puppet.py index b7891ab4..19f72a0c 100644 --- a/tests/unittests/test_handler/test_handler_puppet.py +++ b/tests/unittests/test_handler/test_handler_puppet.py @@ -277,6 +277,28 @@ class TestPuppetHandle(CiTestCase): [mock.call(['puppet', 'agent', '--test'], capture=False)], m_subp.call_args_list) + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_starts_puppetd(self, m_subp, m_auto): + """Run puppet with default args if 'exec' is set to True.""" + mycloud = self._get_cloud('ubuntu') + cfg = {'puppet': {}} + cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + self.assertEqual(1, m_auto.call_count) + self.assertIn( + [mock.call(['service', 'puppet', 'start'], capture=False)], + m_subp.call_args_list) + + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) + def test_puppet_skips_puppetd(self, m_subp, m_auto): + """Run puppet with default args if 'exec' is set to True.""" + mycloud = self._get_cloud('ubuntu') + cfg = {'puppet': {'start_service': False}} + cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + self.assertEqual(0, m_auto.call_count) + self.assertNotIn( + [mock.call(['service', 'puppet', 'start'], capture=False)], + m_subp.call_args_list) + @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) def test_puppet_runs_puppet_with_args_list_if_requested(self, m_subp, m_auto): -- cgit v1.2.3 From f6c71fd758a3589ac5f62fd251d60b00edf5bb1c Mon Sep 17 00:00:00 2001 From: Alexandr Kravchenko Date: Wed, 1 Sep 2021 22:01:09 +0300 Subject: Add CloudLinux OS support (#1003) https://www.cloudlinux.com/ --- README.md | 2 +- cloudinit/config/cc_ntp.py | 6 +++--- cloudinit/config/cc_yum_add_repo.py | 8 ++++---- cloudinit/distros/__init__.py | 4 ++-- cloudinit/distros/cloudlinux.py | 9 +++++++++ cloudinit/net/sysconfig.py | 4 ++-- cloudinit/tests/test_util.py | 33 ++++++++++++++++++++++++++++++++- cloudinit/util.py | 5 +++-- config/cloud.cfg.tmpl | 6 +++--- systemd/cloud-init-generator.tmpl | 2 +- systemd/cloud-init.service.tmpl | 2 +- tests/unittests/test_cli.py | 6 +++--- tools/.github-cla-signers | 1 + tools/render-cloudcfg | 2 +- 14 files changed, 66 insertions(+), 24 deletions(-) create mode 100644 cloudinit/distros/cloudlinux.py (limited to 'cloudinit') diff --git a/README.md b/README.md index 5828c2fa..b705a065 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ get in contact with that distribution and send them our way! | Supported OSes | Supported Public Clouds | Supported Private Clouds | | --- | --- | --- | -| Alpine Linux
ArchLinux
Debian
DragonFlyBSD
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo/EuroLinux
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
DigitalOcean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)
VMware















| +| Alpine Linux
ArchLinux
Debian
DragonFlyBSD
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo/EuroLinux/CloudLinux
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
DigitalOcean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)
VMware















| ## To start developing cloud-init diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index 7c371a49..e2231cbb 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -24,9 +24,9 @@ LOG = logging.getLogger(__name__) frequency = PER_INSTANCE NTP_CONF = '/etc/ntp.conf' NR_POOL_SERVERS = 4 -distros = ['almalinux', 'alpine', 'centos', 'debian', 'eurolinux', 'fedora', - 'opensuse', 'photon', 'rhel', 'rocky', 'sles', 'ubuntu', - 'virtuozzo'] +distros = ['almalinux', 'alpine', 'centos', 'cloudlinux', 'debian', + 'eurolinux', 'fedora', 'opensuse', 'photon', 'rhel', 'rocky', + 'sles', 'ubuntu', 'virtuozzo'] NTP_CLIENT_CONFIG = { 'chrony': { diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index b7a48dcc..899cb082 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -18,8 +18,8 @@ entry, the config entry will be skipped. **Module frequency:** per always -**Supported distros:** almalinux, centos, eurolinux, fedora, photon, rhel, - rocky, virtuozzo +**Supported distros:** almalinux, centos, cloudlinux, eurolinux, fedora, + photon, rhel, rocky, virtuozzo **Config keys**:: @@ -37,8 +37,8 @@ from configparser import ConfigParser from cloudinit import util -distros = ['almalinux', 'centos', 'eurolinux', 'fedora', 'photon', 'rhel', - 'rocky', 'virtuozzo'] +distros = ['almalinux', 'centos', 'cloudlinux', 'eurolinux', 'fedora', + 'photon', 'rhel', 'rocky', 'virtuozzo'] def _canonicalize_id(repo_id): diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 2e629143..a0526948 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -49,8 +49,8 @@ OSFAMILIES = { 'debian': ['debian', 'ubuntu'], 'freebsd': ['freebsd'], 'gentoo': ['gentoo'], - 'redhat': ['almalinux', 'amazon', 'centos', 'eurolinux', 'fedora', - 'photon', 'rhel', 'rocky', 'virtuozzo'], + 'redhat': ['almalinux', 'amazon', 'centos', 'cloudlinux', 'eurolinux', + 'fedora', 'photon', 'rhel', 'rocky', 'virtuozzo'], 'suse': ['opensuse', 'sles'], } diff --git a/cloudinit/distros/cloudlinux.py b/cloudinit/distros/cloudlinux.py new file mode 100644 index 00000000..edb3165d --- /dev/null +++ b/cloudinit/distros/cloudlinux.py @@ -0,0 +1,9 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.distros import rhel + + +class Distro(rhel.Distro): + pass + +# vi: ts=4 expandtab diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 06f7255e..7b8e4da7 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -18,8 +18,8 @@ from .network_state import ( is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6, IPV6_DYNAMIC_TYPES) LOG = logging.getLogger(__name__) -KNOWN_DISTROS = ['almalinux', 'centos', 'eurolinux', 'fedora', 'rhel', 'rocky', - 'suse', 'virtuozzo'] +KNOWN_DISTROS = ['almalinux', 'centos', 'cloudlinux', 'eurolinux', 'fedora', + 'rhel', 'rocky', 'suse', 'virtuozzo'] NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf" diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index 9dd01158..f11cfb27 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -186,6 +186,20 @@ OS_RELEASE_VIRTUOZZO_8 = dedent("""\ BUG_REPORT_URL="https://bugs.openvz.org" """) +OS_RELEASE_CLOUDLINUX_8 = dedent("""\ + NAME="CloudLinux" + VERSION="8.4 (Valery Rozhdestvensky)" + ID="cloudlinux" + ID_LIKE="rhel fedora centos" + VERSION_ID="8.4" + PLATFORM_ID="platform:el8" + PRETTY_NAME="CloudLinux 8.4 (Valery Rozhdestvensky)" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:cloudlinux:cloudlinux:8.4:GA:server" + HOME_URL="https://www.cloudlinux.com/" + BUG_REPORT_URL="https://www.cloudlinux.com/support" +""") + REDHAT_RELEASE_CENTOS_6 = "CentOS release 6.10 (Final)" REDHAT_RELEASE_CENTOS_7 = "CentOS Linux release 7.5.1804 (Core)" REDHAT_RELEASE_REDHAT_6 = ( @@ -200,7 +214,8 @@ REDHAT_RELEASE_ROCKY_8 = ( "Rocky Linux release 8.3 (Green Obsidian)") REDHAT_RELEASE_VIRTUOZZO_8 = ( "Virtuozzo Linux release 8") - +REDHAT_RELEASE_CLOUDLINUX_8 = ( + "CloudLinux release 8.4 (Valery Rozhdestvensky)") OS_RELEASE_DEBIAN = dedent("""\ PRETTY_NAME="Debian GNU/Linux 9 (stretch)" NAME="Debian GNU/Linux" @@ -679,6 +694,22 @@ class TestGetLinuxDistro(CiTestCase): dist = util.get_linux_distro() self.assertEqual(('virtuozzo', '8', 'Virtuozzo Linux'), dist) + @mock.patch('cloudinit.util.load_file') + def test_get_linux_cloud8_rhrelease(self, m_os_release, m_path_exists): + """Verify cloudlinux 8 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_CLOUDLINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('cloudlinux', '8.4', 'Valery Rozhdestvensky'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_cloud8_osrelease(self, m_os_release, m_path_exists): + """Verify cloudlinux 8 read from os-release.""" + m_os_release.return_value = OS_RELEASE_CLOUDLINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('cloudlinux', '8.4', 'Valery Rozhdestvensky'), dist) + @mock.patch('cloudinit.util.load_file') def test_get_linux_debian(self, m_os_release, m_path_exists): """Verify we get the correct name and release name on Debian.""" diff --git a/cloudinit/util.py b/cloudinit/util.py index c53f6453..894245bf 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -548,8 +548,9 @@ def system_info(): if system == "linux": linux_dist = info['dist'][0].lower() if linux_dist in ( - 'almalinux', 'alpine', 'arch', 'centos', 'debian', 'eurolinux', - 'fedora', 'photon', 'rhel', 'rocky', 'suse', 'virtuozzo'): + 'almalinux', 'alpine', 'arch', 'centos', 'cloudlinux', + 'debian', 'eurolinux', 'fedora', 'photon', 'rhel', 'rocky', + 'suse', 'virtuozzo'): var = linux_dist elif linux_dist in ('ubuntu', 'linuxmint', 'mint'): var = 'ubuntu' diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 825deff4..e5fbc10b 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -32,7 +32,7 @@ disable_root: false disable_root: true {% endif %} -{% if variant in ["almalinux", "alpine", "amazon", "centos", "eurolinux", +{% if variant in ["almalinux", "alpine", "amazon", "centos", "cloudlinux", "eurolinux", "fedora", "rhel", "rocky", "virtuozzo"] %} mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2'] {% if variant == "amazon" %} @@ -173,7 +173,7 @@ cloud_final_modules: # (not accessible to handlers/transforms) system_info: # This will affect which distro class gets used -{% if variant in ["almalinux", "alpine", "amazon", "arch", "centos", "debian", +{% if variant in ["almalinux", "alpine", "amazon", "arch", "centos", "cloudlinux", "debian", "eurolinux", "fedora", "freebsd", "netbsd", "openbsd", "photon", "rhel", "rocky", "suse", "ubuntu", "virtuozzo"] %} distro: {{ variant }} @@ -228,7 +228,7 @@ system_info: primary: http://ports.ubuntu.com/ubuntu-ports security: http://ports.ubuntu.com/ubuntu-ports ssh_svcname: ssh -{% elif variant in ["almalinux", "alpine", "amazon", "arch", "centos", "eurolinux", +{% elif variant in ["almalinux", "alpine", "amazon", "arch", "centos", "cloudlinux", "eurolinux", "fedora", "rhel", "rocky", "suse", "virtuozzo"] %} # Default user name + that default users groups (if added/used) default_user: diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl index 3dbe5947..7fe009ec 100644 --- a/systemd/cloud-init-generator.tmpl +++ b/systemd/cloud-init-generator.tmpl @@ -83,7 +83,7 @@ default() { check_for_datasource() { local ds_rc="" -{% if variant in ["almalinux", "centos", "eurolinux", "fedora", "rhel", +{% if variant in ["almalinux", "centos", "cloudlinux", "eurolinux", "fedora", "rhel", "rocky", "virtuozzo"] %} local dsidentify="/usr/libexec/cloud-init/ds-identify" {% else %} diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl index 636f59be..6c3a8f20 100644 --- a/systemd/cloud-init.service.tmpl +++ b/systemd/cloud-init.service.tmpl @@ -12,7 +12,7 @@ After=systemd-networkd-wait-online.service {% if variant in ["ubuntu", "unknown", "debian"] %} After=networking.service {% endif %} -{% if variant in ["almalinux", "centos", "eurolinux", "fedora", "rhel", +{% if variant in ["almalinux", "centos", "cloudlinux", "eurolinux", "fedora", "rhel", "rocky", "virtuozzo"] %} After=network.service After=NetworkManager.service diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index a39e1d0c..b8ae9e47 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -224,9 +224,9 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): self._call_main(['cloud-init', 'devel', 'schema', '--docs', 'all']) expected_doc_sections = [ '**Supported distros:** all', - ('**Supported distros:** almalinux, alpine, centos, debian, ' - 'eurolinux, fedora, opensuse, photon, rhel, rocky, sles, ubuntu, ' - 'virtuozzo'), + ('**Supported distros:** almalinux, alpine, centos, cloudlinux, ' + 'debian, eurolinux, fedora, opensuse, photon, rhel, rocky, ' + 'sles, ubuntu, virtuozzo'), '**Config schema**:\n **resize_rootfs:** (true/false/noblock)', '**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n' ] diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index 4fa108aa..ba0c6d79 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -28,6 +28,7 @@ hamalq impl irishgordo izzyleung +JohnKepplers johnsonshi jordimassaguerpla jqueuniet diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg index 30f82521..78a48c30 100755 --- a/tools/render-cloudcfg +++ b/tools/render-cloudcfg @@ -4,7 +4,7 @@ import argparse import os import sys -VARIANTS = ["almalinux", "alpine", "amazon", "arch", "centos", "debian", +VARIANTS = ["almalinux", "alpine", "amazon", "arch", "centos", "cloudlinux", "debian", "eurolinux", "fedora", "freebsd", "netbsd", "openbsd", "photon", "rhel", "suse","rocky", "ubuntu", "unknown", "virtuozzo"] -- cgit v1.2.3 From 7fe0f900adcd45a74a24b7f9b251e2ee35b53b54 Mon Sep 17 00:00:00 2001 From: Andy Fiddaman Date: Thu, 2 Sep 2021 14:26:28 +0000 Subject: cc_update_etc_hosts: Use the distribution-defined path for the hosts file (#983) The distribution class has a field that specifies the location of the system hosts file and this can be overridden in subclasses. While the field is correctly used in distro.update_etc_hosts(), the update_etc_hosts module does not use it and just assumes '/etc/hosts' This fixes the module to use the distribution-specific variable. --- cloudinit/config/cc_update_etc_hosts.py | 32 ++++++++++++++++++-------------- tools/.github-cla-signers | 1 + 2 files changed, 19 insertions(+), 14 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py index 03fffb96..3a78fccc 100644 --- a/cloudinit/config/cc_update_etc_hosts.py +++ b/cloudinit/config/cc_update_etc_hosts.py @@ -9,27 +9,28 @@ """ Update Etc Hosts ---------------- -**Summary:** update ``/etc/hosts`` +**Summary:** update the hosts file (usually ``/etc/hosts``) -This module will update the contents of ``/etc/hosts`` based on the -hostname/fqdn specified in config. Management of ``/etc/hosts`` is controlled -using ``manage_etc_hosts``. If this is set to false, cloud-init will not manage -``/etc/hosts`` at all. This is the default behavior. +This module will update the contents of the local hosts database (hosts file; +usually ``/etc/hosts``) based on the hostname/fqdn specified in config. +Management of the hosts file is controlled using ``manage_etc_hosts``. If this +is set to false, cloud-init will not manage the hosts file at all. This is the +default behavior. -If set to ``true`` or ``template``, cloud-init will generate ``/etc/hosts`` +If set to ``true`` or ``template``, cloud-init will generate the hosts file using the template located in ``/etc/cloud/templates/hosts.tmpl``. In the ``/etc/cloud/templates/hosts.tmpl`` template, the strings ``$hostname`` and ``$fqdn`` will be replaced with the hostname and fqdn respectively. If ``manage_etc_hosts`` is set to ``localhost``, then cloud-init will not -rewrite ``/etc/hosts`` entirely, but rather will ensure that a entry for the -fqdn with a distribution dependent ip is present in ``/etc/hosts`` (i.e. -``ping `` will ping ``127.0.0.1`` or ``127.0.1.1`` or other ip). +rewrite the hosts file entirely, but rather will ensure that a entry for the +fqdn with a distribution dependent ip is present (i.e. ``ping `` will +ping ``127.0.0.1`` or ``127.0.1.1`` or other ip). .. note:: if ``manage_etc_hosts`` is set ``true`` or ``template``, the contents - of ``/etc/hosts`` will be updated every boot. to make any changes to - ``/etc/hosts`` persistant they must be made in + of the hosts file will be updated every boot. To make any changes to + the hosts file persistent they must be made in ``/etc/cloud/templates/hosts.tmpl`` .. note:: @@ -59,6 +60,9 @@ frequency = PER_ALWAYS def handle(name, cfg, cloud, log, _args): manage_hosts = util.get_cfg_option_str(cfg, "manage_etc_hosts", False) + + hosts_fn = cloud.distro.hosts_fn + if util.translate_bool(manage_hosts, addons=['template']): (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) if not hostname: @@ -74,7 +78,7 @@ def handle(name, cfg, cloud, log, _args): " found for distro %s") % (cloud.distro.osfamily)) - templater.render_to_file(tpl_fn_name, '/etc/hosts', + templater.render_to_file(tpl_fn_name, hosts_fn, {'hostname': hostname, 'fqdn': fqdn}) elif manage_hosts == "localhost": @@ -84,10 +88,10 @@ def handle(name, cfg, cloud, log, _args): " but no hostname was found")) return - log.debug("Managing localhost in /etc/hosts") + log.debug("Managing localhost in %s", hosts_fn) cloud.distro.update_etc_hosts(hostname, fqdn) else: log.debug(("Configuration option 'manage_etc_hosts' is not set," - " not managing /etc/hosts in module %s"), name) + " not managing %s in module %s"), hosts_fn, name) # vi: ts=4 expandtab diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index ba0c6d79..d1939718 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -14,6 +14,7 @@ bmhughes candlerb cawamata ciprianbadescu +citrus-it dankenigsberg ddymko dermotbradley -- cgit v1.2.3 From e69a88745e37061e0ab0a1e67ad11015cca610c1 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 3 Sep 2021 12:57:20 -0500 Subject: Set Azure to only update metadata on BOOT_NEW_INSTANCE (#1006) In #834, we refactored the handling of events for fetching new metadata. Previously, in Azure's __init__, the BOOT event was added to the update_events, so it was assumed that Azure required the standard BOOT behavior, which is to apply metadata twice every boot: once during local-init, then again during standard init phase. https://github.com/canonical/cloud-init/blob/21.2/cloudinit/sources/DataSourceAzure.py#L356 However, this line was effectively meaningless. After the metadata was fetched in local-init, it was then pickled out to disk. Because "update_events" was a class variable, the EventType.BOOT was not persisted into the pickle. When the pickle was then unpickled in the init phase, metadata did not get re-fetched because EventType.BOOT was not present, so Azure is effectely only BOOT_NEW_INSTANCE. Fetching metadata twice during boot causes some issue for pre-provisioning on Azure because updating metadata during re-provisioning will cause cloud-init to poll for reprovisiondata again in DataSourceAzure, which will infinitely return 404(reprovisiondata is deleted from IMDS after health signal was sent by cloud-init during init-local). This makes cloud-init stuck in 'init' --- cloudinit/sources/DataSourceAzure.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index caffa944..3fb564c8 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -22,7 +22,7 @@ import requests from cloudinit import dmi from cloudinit import log as logging from cloudinit import net -from cloudinit.event import EventScope, EventType +from cloudinit.event import EventType from cloudinit.net import device_driver from cloudinit.net.dhcp import EphemeralDHCPv4 from cloudinit import sources @@ -339,13 +339,6 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): class DataSourceAzure(sources.DataSource): dsname = 'Azure' - # Regenerate network config new_instance boot and every boot - default_update_events = {EventScope.NETWORK: { - EventType.BOOT_NEW_INSTANCE, - EventType.BOOT, - EventType.BOOT_LEGACY - }} - _negotiated = False _metadata_imds = sources.UNSET _ci_pkl_version = 1 -- cgit v1.2.3 From 2ce857248162957a785af61c135ca8433fdbbcde Mon Sep 17 00:00:00 2001 From: Emanuele Giuseppe Esposito Date: Wed, 8 Sep 2021 02:08:36 +0200 Subject: ssh_utils.py: ignore when sshd_config options are not key/value pairs (#1007) As specified in #LP 1845552, In cloudinit/ssh_util.py, in parse_ssh_config_lines(), we attempt to parse each line of sshd_config. This function expects each line to be one of the following forms: \# comment key value key=value However, options like DenyGroups and DenyUsers are specified to *optionally* accepts values in sshd_config. Cloud-init should comply to this and skip the option if a value is not provided. Signed-off-by: Emanuele Giuseppe Esposito --- cloudinit/ssh_util.py | 8 +++++++- tests/unittests/test_sshutil.py | 8 ++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 9ccadf09..33679dcc 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -484,7 +484,13 @@ def parse_ssh_config_lines(lines): try: key, val = line.split(None, 1) except ValueError: - key, val = line.split('=', 1) + try: + key, val = line.split('=', 1) + except ValueError: + LOG.debug( + "sshd_config: option \"%s\" has no key/value pair," + " skipping it", line) + continue ret.append(SshdConfigLine(line, key, val)) return ret diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py index a66788bf..08e20050 100644 --- a/tests/unittests/test_sshutil.py +++ b/tests/unittests/test_sshutil.py @@ -525,6 +525,14 @@ class TestUpdateSshConfigLines(test_helpers.CiTestCase): self.assertEqual([self.pwauth], result) self.check_line(lines[-1], self.pwauth, "no") + def test_option_without_value(self): + """Implementation only accepts key-value pairs.""" + extended_exlines = self.exlines.copy() + denyusers_opt = "DenyUsers" + extended_exlines.append(denyusers_opt) + lines = ssh_util.parse_ssh_config_lines(list(extended_exlines)) + self.assertNotIn(denyusers_opt, str(lines)) + def test_single_option_updated(self): """A single update should have change made and line updated.""" opt, val = ("UsePAM", "no") -- cgit v1.2.3 From f4c47e3e25d1fb79e2673e37f8fc67750d025be2 Mon Sep 17 00:00:00 2001 From: zhuzaifangxuele <52022596+zhuzaifangxuele@users.noreply.github.com> Date: Tue, 14 Sep 2021 02:21:28 +0800 Subject: Support openEuler OS (#1012) openEuler Homepage: https://www.openeuler.org/en/ --- README.md | 2 +- cloudinit/config/cc_ntp.py | 4 ++-- cloudinit/config/cc_yum_add_repo.py | 4 ++-- cloudinit/distros/__init__.py | 2 +- cloudinit/distros/openEuler.py | 9 +++++++++ cloudinit/net/sysconfig.py | 2 +- cloudinit/tests/test_util.py | 17 +++++++++++++++++ cloudinit/util.py | 4 ++-- config/cloud.cfg.tmpl | 6 +++--- systemd/cloud-init-generator.tmpl | 4 ++-- systemd/cloud-init.service.tmpl | 4 ++-- tests/unittests/test_cli.py | 4 ++-- tools/.github-cla-signers | 1 + tools/render-cloudcfg | 2 +- 14 files changed, 46 insertions(+), 19 deletions(-) create mode 100644 cloudinit/distros/openEuler.py (limited to 'cloudinit') diff --git a/README.md b/README.md index b705a065..e96541ef 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ get in contact with that distribution and send them our way! | Supported OSes | Supported Public Clouds | Supported Private Clouds | | --- | --- | --- | -| Alpine Linux
ArchLinux
Debian
DragonFlyBSD
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo/EuroLinux/CloudLinux
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
DigitalOcean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)
VMware















| +| Alpine Linux
ArchLinux
Debian
DragonFlyBSD
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
openEuler
RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo/EuroLinux/CloudLinux
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
DigitalOcean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)
VMware















| ## To start developing cloud-init diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index e2231cbb..f4468c9d 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -25,8 +25,8 @@ frequency = PER_INSTANCE NTP_CONF = '/etc/ntp.conf' NR_POOL_SERVERS = 4 distros = ['almalinux', 'alpine', 'centos', 'cloudlinux', 'debian', - 'eurolinux', 'fedora', 'opensuse', 'photon', 'rhel', 'rocky', - 'sles', 'ubuntu', 'virtuozzo'] + 'eurolinux', 'fedora', 'openEuler', 'opensuse', 'photon', + 'rhel', 'rocky', 'sles', 'ubuntu', 'virtuozzo'] NTP_CLIENT_CONFIG = { 'chrony': { diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 899cb082..bcca86cb 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -19,7 +19,7 @@ entry, the config entry will be skipped. **Module frequency:** per always **Supported distros:** almalinux, centos, cloudlinux, eurolinux, fedora, - photon, rhel, rocky, virtuozzo + openEuler, photon, rhel, rocky, virtuozzo **Config keys**:: @@ -38,7 +38,7 @@ from configparser import ConfigParser from cloudinit import util distros = ['almalinux', 'centos', 'cloudlinux', 'eurolinux', 'fedora', - 'photon', 'rhel', 'rocky', 'virtuozzo'] + 'openEuler', 'photon', 'rhel', 'rocky', 'virtuozzo'] def _canonicalize_id(repo_id): diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index a0526948..63e78591 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -50,7 +50,7 @@ OSFAMILIES = { 'freebsd': ['freebsd'], 'gentoo': ['gentoo'], 'redhat': ['almalinux', 'amazon', 'centos', 'cloudlinux', 'eurolinux', - 'fedora', 'photon', 'rhel', 'rocky', 'virtuozzo'], + 'fedora', 'openEuler', 'photon', 'rhel', 'rocky', 'virtuozzo'], 'suse': ['opensuse', 'sles'], } diff --git a/cloudinit/distros/openEuler.py b/cloudinit/distros/openEuler.py new file mode 100644 index 00000000..edb3165d --- /dev/null +++ b/cloudinit/distros/openEuler.py @@ -0,0 +1,9 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.distros import rhel + + +class Distro(rhel.Distro): + pass + +# vi: ts=4 expandtab diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 7b8e4da7..ef4543b4 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -19,7 +19,7 @@ from .network_state import ( LOG = logging.getLogger(__name__) KNOWN_DISTROS = ['almalinux', 'centos', 'cloudlinux', 'eurolinux', 'fedora', - 'rhel', 'rocky', 'suse', 'virtuozzo'] + 'openEuler', 'rhel', 'rocky', 'suse', 'virtuozzo'] NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf" diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index f11cfb27..977ad8e0 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -200,6 +200,15 @@ OS_RELEASE_CLOUDLINUX_8 = dedent("""\ BUG_REPORT_URL="https://www.cloudlinux.com/support" """) +OS_RELEASE_OPENEULER_20 = dedent("""\ + NAME="openEuler" + VERSION="20.03 (LTS-SP2)" + ID="openEuler" + VERSION_ID="20.03" + PRETTY_NAME="openEuler 20.03 (LTS-SP2)" + ANSI_COLOR="0;31" +""") + REDHAT_RELEASE_CENTOS_6 = "CentOS release 6.10 (Final)" REDHAT_RELEASE_CENTOS_7 = "CentOS Linux release 7.5.1804 (Core)" REDHAT_RELEASE_REDHAT_6 = ( @@ -718,6 +727,14 @@ class TestGetLinuxDistro(CiTestCase): dist = util.get_linux_distro() self.assertEqual(('debian', '9', 'stretch'), dist) + @mock.patch('cloudinit.util.load_file') + def test_get_linux_openeuler(self, m_os_release, m_path_exists): + """Verify get the correct name and release name on Openeuler.""" + m_os_release.return_value = OS_RELEASE_OPENEULER_20 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('openEuler', '20.03', 'LTS-SP2'), dist) + @mock.patch('cloudinit.util.load_file') def test_get_linux_opensuse(self, m_os_release, m_path_exists): """Verify we get the correct name and machine arch on openSUSE diff --git a/cloudinit/util.py b/cloudinit/util.py index 894245bf..9662b30b 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -549,8 +549,8 @@ def system_info(): linux_dist = info['dist'][0].lower() if linux_dist in ( 'almalinux', 'alpine', 'arch', 'centos', 'cloudlinux', - 'debian', 'eurolinux', 'fedora', 'photon', 'rhel', 'rocky', - 'suse', 'virtuozzo'): + 'debian', 'eurolinux', 'fedora', 'openEuler', 'photon', + 'rhel', 'rocky', 'suse', 'virtuozzo'): var = linux_dist elif linux_dist in ('ubuntu', 'linuxmint', 'mint'): var = 'ubuntu' diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index e5fbc10b..de1d75e5 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -33,7 +33,7 @@ disable_root: true {% endif %} {% if variant in ["almalinux", "alpine", "amazon", "centos", "cloudlinux", "eurolinux", - "fedora", "rhel", "rocky", "virtuozzo"] %} + "fedora", "openEuler", "rhel", "rocky", "virtuozzo"] %} mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2'] {% if variant == "amazon" %} resize_rootfs: noblock @@ -174,7 +174,7 @@ cloud_final_modules: system_info: # This will affect which distro class gets used {% if variant in ["almalinux", "alpine", "amazon", "arch", "centos", "cloudlinux", "debian", - "eurolinux", "fedora", "freebsd", "netbsd", "openbsd", + "eurolinux", "fedora", "freebsd", "netbsd", "openbsd", "openEuler", "photon", "rhel", "rocky", "suse", "ubuntu", "virtuozzo"] %} distro: {{ variant }} {% elif variant in ["dragonfly"] %} @@ -229,7 +229,7 @@ system_info: security: http://ports.ubuntu.com/ubuntu-ports ssh_svcname: ssh {% elif variant in ["almalinux", "alpine", "amazon", "arch", "centos", "cloudlinux", "eurolinux", - "fedora", "rhel", "rocky", "suse", "virtuozzo"] %} + "fedora", "openEuler", "rhel", "rocky", "suse", "virtuozzo"] %} # Default user name + that default users groups (if added/used) default_user: {% if variant == "amazon" %} diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl index 7fe009ec..7d1e7256 100644 --- a/systemd/cloud-init-generator.tmpl +++ b/systemd/cloud-init-generator.tmpl @@ -83,8 +83,8 @@ default() { check_for_datasource() { local ds_rc="" -{% if variant in ["almalinux", "centos", "cloudlinux", "eurolinux", "fedora", "rhel", - "rocky", "virtuozzo"] %} +{% if variant in ["almalinux", "centos", "cloudlinux", "eurolinux", "fedora", + "openEuler", "rhel", "rocky", "virtuozzo"] %} local dsidentify="/usr/libexec/cloud-init/ds-identify" {% else %} local dsidentify="/usr/lib/cloud-init/ds-identify" diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl index 6c3a8f20..de3f3d91 100644 --- a/systemd/cloud-init.service.tmpl +++ b/systemd/cloud-init.service.tmpl @@ -12,8 +12,8 @@ After=systemd-networkd-wait-online.service {% if variant in ["ubuntu", "unknown", "debian"] %} After=networking.service {% endif %} -{% if variant in ["almalinux", "centos", "cloudlinux", "eurolinux", "fedora", "rhel", - "rocky", "virtuozzo"] %} +{% if variant in ["almalinux", "centos", "cloudlinux", "eurolinux", "fedora", + "openEuler", "rhel", "rocky", "virtuozzo"] %} After=network.service After=NetworkManager.service {% endif %} diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index b8ae9e47..1459fd9c 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -225,8 +225,8 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): expected_doc_sections = [ '**Supported distros:** all', ('**Supported distros:** almalinux, alpine, centos, cloudlinux, ' - 'debian, eurolinux, fedora, opensuse, photon, rhel, rocky, ' - 'sles, ubuntu, virtuozzo'), + 'debian, eurolinux, fedora, openEuler, opensuse, photon, rhel, ' + 'rocky, sles, ubuntu, virtuozzo'), '**Config schema**:\n **resize_rootfs:** (true/false/noblock)', '**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n' ] diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index d1939718..fa395553 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -68,3 +68,4 @@ Vultaire WebSpider xiachen-rh xnox +zhuzaifangxuele diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg index 78a48c30..186d61b7 100755 --- a/tools/render-cloudcfg +++ b/tools/render-cloudcfg @@ -5,7 +5,7 @@ import os import sys VARIANTS = ["almalinux", "alpine", "amazon", "arch", "centos", "cloudlinux", "debian", - "eurolinux", "fedora", "freebsd", "netbsd", "openbsd", "photon", + "eurolinux", "fedora", "freebsd", "netbsd", "openbsd", "openEuler", "photon", "rhel", "suse","rocky", "ubuntu", "unknown", "virtuozzo"] -- cgit v1.2.3 From f3cc94949d9f153b4a5135f8b989ff11b36ab7ea Mon Sep 17 00:00:00 2001 From: Shreenidhi Shedi <53473811+sshedi@users.noreply.github.com> Date: Tue, 14 Sep 2021 01:11:45 +0530 Subject: Improve ug_util.py (#1013) No functional changes. --- cloudinit/config/cc_set_passwords.py | 2 +- cloudinit/distros/ug_util.py | 234 ++++++++++++++--------------------- cloudinit/util.py | 2 +- 3 files changed, 93 insertions(+), 145 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index 433de751..803a3aa9 100755 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -133,7 +133,7 @@ def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"): def handle(_name, cfg, cloud, log, args): - if len(args) != 0: + if args: # if run from command line, and give args, wipe the chpasswd['list'] password = args[0] if 'chpasswd' in cfg and 'list' in cfg['chpasswd']: diff --git a/cloudinit/distros/ug_util.py b/cloudinit/distros/ug_util.py index 08446a95..600b743f 100755 --- a/cloudinit/distros/ug_util.py +++ b/cloudinit/distros/ug_util.py @@ -16,77 +16,59 @@ from cloudinit import util LOG = logging.getLogger(__name__) -# Normalizes a input group configuration -# which can be a comma seperated list of -# group names, or a list of group names -# or a python dictionary of group names -# to a list of members of that group. +# Normalizes an input group configuration which can be: +# Comma seperated string or a list or a dictionary # -# The output is a dictionary of group -# names => members of that group which -# is the standard form used in the rest -# of cloud-init +# Returns dictionary of group names => members of that group which is the +# standard form used in the rest of cloud-init def _normalize_groups(grp_cfg): if isinstance(grp_cfg, str): - grp_cfg = grp_cfg.strip().split(",") + grp_cfg = grp_cfg.strip().split(',') + if isinstance(grp_cfg, list): c_grp_cfg = {} for i in grp_cfg: if isinstance(i, dict): for k, v in i.items(): - if k not in c_grp_cfg: - if isinstance(v, list): - c_grp_cfg[k] = list(v) - elif isinstance(v, str): - c_grp_cfg[k] = [v] - else: - raise TypeError("Bad group member type %s" % - type_utils.obj_name(v)) + if not isinstance(v, (list, str)): + raise TypeError('Bad group member type %s' + % (type_utils.obj_name(v))) + + if isinstance(v, list): + c_grp_cfg.setdefault(k, []).extend(v) else: - if isinstance(v, list): - c_grp_cfg[k].extend(v) - elif isinstance(v, str): - c_grp_cfg[k].append(v) - else: - raise TypeError("Bad group member type %s" % - type_utils.obj_name(v)) + c_grp_cfg.setdefault(k, []).append(v) elif isinstance(i, str): if i not in c_grp_cfg: c_grp_cfg[i] = [] else: - raise TypeError("Unknown group name type %s" % - type_utils.obj_name(i)) + raise TypeError('Unknown group name type %s' + % (type_utils.obj_name(i))) grp_cfg = c_grp_cfg + groups = {} if isinstance(grp_cfg, dict): - for (grp_name, grp_members) in grp_cfg.items(): + for grp_name, grp_members in grp_cfg.items(): groups[grp_name] = util.uniq_merge_sorted(grp_members) else: - raise TypeError(("Group config must be list, dict " - " or string types only and not %s") % - type_utils.obj_name(grp_cfg)) + raise TypeError(('Group config must be list, dict or string type only ' + 'but found %s') % (type_utils.obj_name(grp_cfg))) return groups -# Normalizes a input group configuration -# which can be a comma seperated list of -# user names, or a list of string user names -# or a list of dictionaries with components -# that define the user config + 'name' (if -# a 'name' field does not exist then the -# default user is assumed to 'own' that -# configuration. +# Normalizes an input group configuration which can be: a list or a dictionary # -# The output is a dictionary of user -# names => user config which is the standard -# form used in the rest of cloud-init. Note -# the default user will have a special config -# entry 'default' which will be marked as true -# all other users will be marked as false. +# components that define the user config + 'name' (if a 'name' field does not +# exist then the default user is assumed to 'own' that configuration.) +# +# Returns a dictionary of user names => user config which is the standard form +# used in the rest of cloud-init. Note the default user will have a special +# config entry 'default' which will be marked true and all other users will be +# marked false. def _normalize_users(u_cfg, def_user_cfg=None): if isinstance(u_cfg, dict): ad_ucfg = [] - for (k, v) in u_cfg.items(): + for k, v in u_cfg.items(): if isinstance(v, (bool, int, float, str)): if util.is_true(v): ad_ucfg.append(str(k)) @@ -94,8 +76,8 @@ def _normalize_users(u_cfg, def_user_cfg=None): v['name'] = k ad_ucfg.append(v) else: - raise TypeError(("Unmappable user value type %s" - " for key %s") % (type_utils.obj_name(v), k)) + raise TypeError(('Unmappable user value type %s for key %s') + % (type_utils.obj_name(v), k)) u_cfg = ad_ucfg elif isinstance(u_cfg, str): u_cfg = util.uniq_merge_sorted(u_cfg) @@ -107,181 +89,147 @@ def _normalize_users(u_cfg, def_user_cfg=None): if u and u not in users: users[u] = {} elif isinstance(user_config, dict): - if 'name' in user_config: - n = user_config.pop('name') - prev_config = users.get(n) or {} - users[n] = util.mergemanydict([prev_config, - user_config]) - else: - # Assume the default user then - prev_config = users.get('default') or {} - users['default'] = util.mergemanydict([prev_config, - user_config]) + n = user_config.pop('name', 'default') + prev_config = users.get(n) or {} + users[n] = util.mergemanydict([prev_config, user_config]) else: - raise TypeError(("User config must be dictionary/list " - " or string types only and not %s") % - type_utils.obj_name(user_config)) + raise TypeError(('User config must be dictionary/list or string ' + ' types only and not %s') + % (type_utils.obj_name(user_config))) # Ensure user options are in the right python friendly format if users: c_users = {} - for (uname, uconfig) in users.items(): + for uname, uconfig in users.items(): c_uconfig = {} - for (k, v) in uconfig.items(): + for k, v in uconfig.items(): k = k.replace('-', '_').strip() if k: c_uconfig[k] = v c_users[uname] = c_uconfig users = c_users - # Fixup the default user into the real - # default user name and replace it... + # Fix the default user into the actual default user name and replace it. def_user = None if users and 'default' in users: def_config = users.pop('default') if def_user_cfg: - # Pickup what the default 'real name' is - # and any groups that are provided by the - # default config + # Pickup what the default 'real name' is and any groups that are + # provided by the default config def_user_cfg = def_user_cfg.copy() def_user = def_user_cfg.pop('name') def_groups = def_user_cfg.pop('groups', []) - # Pickup any config + groups for that user name - # that we may have previously extracted + # Pick any config + groups for the user name that we may have + # extracted previously parsed_config = users.pop(def_user, {}) parsed_groups = parsed_config.get('groups', []) - # Now merge our extracted groups with - # anything the default config provided + # Now merge the extracted groups with the default config provided users_groups = util.uniq_merge_sorted(parsed_groups, def_groups) - parsed_config['groups'] = ",".join(users_groups) - # The real config for the default user is the - # combination of the default user config provided - # by the distro, the default user config provided - # by the above merging for the user 'default' and - # then the parsed config from the user's 'real name' - # which does not have to be 'default' (but could be) - users[def_user] = util.mergemanydict([def_user_cfg, - def_config, + parsed_config['groups'] = ','.join(users_groups) + # The real config for the default user is the combination of the + # default user config provided by the distro, the default user + # config provided by the above merging for the user 'default' and + # then the parsed config from the user's 'real name' which does not + # have to be 'default' (but could be) + users[def_user] = util.mergemanydict([def_user_cfg, def_config, parsed_config]) - # Ensure that only the default user that we - # found (if any) is actually marked as being - # the default user - if users: - for (uname, uconfig) in users.items(): - if def_user and uname == def_user: - uconfig['default'] = True - else: - uconfig['default'] = False + # Ensure that only the default user that we found (if any) is actually + # marked as the default user + for uname, uconfig in users.items(): + uconfig['default'] = (uname == def_user if def_user else False) return users -# Normalizes a set of user/users and group -# dictionary configuration into a useable -# format that the rest of cloud-init can -# understand using the default user -# provided by the input distrobution (if any) -# to allow for mapping of the 'default' user. +# Normalizes a set of user/users and group dictionary configuration into an +# usable format so that the rest of cloud-init can understand using the default +# user provided by the input distribution (if any) to allow mapping of the +# 'default' user. # # Output is a dictionary of group names -> [member] (list) # and a dictionary of user names -> user configuration (dict) # -# If 'user' exists it will override -# the 'users'[0] entry (if a list) otherwise it will -# just become an entry in the returned dictionary (no override) +# If 'user' exists, it will override +# The 'users'[0] entry (if a list) otherwise it will just become an entry in +# the returned dictionary (no override) def normalize_users_groups(cfg, distro): if not cfg: cfg = {} - users = {} - groups = {} - if 'groups' in cfg: - groups = _normalize_groups(cfg['groups']) - # Handle the previous style of doing this where the first user # overrides the concept of the default user if provided in the user: XYZ # format. old_user = {} if 'user' in cfg and cfg['user']: old_user = cfg['user'] - # Translate it into the format that is more useful - # going forward + # Translate it into a format that will be more useful going forward if isinstance(old_user, str): - old_user = { - 'name': old_user, - } - if not isinstance(old_user, dict): + old_user = {'name': old_user} + elif not isinstance(old_user, dict): LOG.warning(("Format for 'user' key must be a string or dictionary" " and not %s"), type_utils.obj_name(old_user)) old_user = {} - # If no old user format, then assume the distro - # provides what the 'default' user maps to, but notice - # that if this is provided, we won't automatically inject - # a 'default' user into the users list, while if a old user - # format is provided we will. + # If no old user format, then assume the distro provides what the 'default' + # user maps to, but notice that if this is provided, we won't automatically + # inject a 'default' user into the users list, while if an old user format + # is provided we will. distro_user_config = {} try: distro_user_config = distro.get_default_user() except NotImplementedError: - LOG.warning(("Distro has not implemented default user " - "access. No distribution provided default user" - " will be normalized.")) + LOG.warning(('Distro has not implemented default user access. No ' + 'distribution provided default user will be normalized.')) - # Merge the old user (which may just be an empty dict when not - # present with the distro provided default user configuration so - # that the old user style picks up all the distribution specific - # attributes (if any) + # Merge the old user (which may just be an empty dict when not present) + # with the distro provided default user configuration so that the old user + # style picks up all the distribution specific attributes (if any) default_user_config = util.mergemanydict([old_user, distro_user_config]) base_users = cfg.get('users', []) if not isinstance(base_users, (list, dict, str)): LOG.warning(("Format for 'users' key must be a comma separated string" - " or a dictionary or a list and not %s"), + " or a dictionary or a list but found %s"), type_utils.obj_name(base_users)) base_users = [] if old_user: - # Ensure that when user: is provided that this user - # always gets added (as the default user) + # When 'user:' is provided, it should be made as the default user if isinstance(base_users, list): - # Just add it on at the end... base_users.append({'name': 'default'}) elif isinstance(base_users, dict): base_users['default'] = dict(base_users).get('default', True) elif isinstance(base_users, str): - # Just append it on to be re-parsed later - base_users += ",default" + base_users += ',default' + + groups = {} + if 'groups' in cfg: + groups = _normalize_groups(cfg['groups']) users = _normalize_users(base_users, default_user_config) return (users, groups) -# Given a user dictionary config it will -# extract the default user name and user config -# from that list and return that tuple or -# return (None, None) if no default user is -# found in the given input +# Given a user dictionary config, extract the default user name and user config +# and return them or return (None, None) if no default user is found def extract_default(users, default_name=None, default_config=None): if not users: - users = {} + return (default_name, default_config) def safe_find(entry): config = entry[1] if not config or 'default' not in config: return False - else: - return config['default'] + return config['default'] - tmp_users = users.items() - tmp_users = dict(filter(safe_find, tmp_users)) + tmp_users = dict(filter(safe_find, users.items())) if not tmp_users: return (default_name, default_config) - else: - name = list(tmp_users)[0] - config = tmp_users[name] - config.pop('default', None) - return (name, config) + + name = list(tmp_users)[0] + config = tmp_users[name] + config.pop('default', None) + return (name, config) # vi: ts=4 expandtab diff --git a/cloudinit/util.py b/cloudinit/util.py index 9662b30b..22d8917e 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -297,7 +297,7 @@ def uniq_merge(*lists): if isinstance(a_list, str): a_list = a_list.strip().split(",") # Kickout the empty ones - a_list = [a for a in a_list if len(a)] + a_list = [a for a in a_list if a] combined_list.extend(a_list) return uniq_list(combined_list) -- cgit v1.2.3 From 5ea2c669d6e8a9ab30f3107bee45cecc5fa1b081 Mon Sep 17 00:00:00 2001 From: PengpengSun <40026211+PengpengSun@users.noreply.github.com> Date: Fri, 17 Sep 2021 01:43:59 +0800 Subject: VMware: Fix typo introduced in #947 and add test (#1019) --- cloudinit/sources/DataSourceOVF.py | 2 +- tests/unittests/test_datasource/test_ovf.py | 43 +++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index e909f058..3e436dfa 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -358,7 +358,7 @@ class DataSourceOVF(sources.DataSource): if contents: break if contents: - read_network = ('com.vmware.guestinfo' == name) + read_network = ('com.vmware.guestInfo' == name) (md, ud, cfg) = read_ovf_environment(contents, read_network) self.environment = contents if 'network-config' in md and md['network-config']: diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py index 9f52b504..2ca10781 100644 --- a/tests/unittests/test_datasource/test_ovf.py +++ b/tests/unittests/test_datasource/test_ovf.py @@ -518,6 +518,49 @@ class TestDatasourceOVF(CiTestCase): 'vmware (%s/seed/ovf-env.xml)' % self.tdir, ds.subplatform) + def test_get_data_vmware_guestinfo_with_network_config(self): + network_config = dedent("""\ + network: + version: 2 + ethernets: + nics: + nameservers: + addresses: + - 127.0.0.53 + search: + - vmware.com + match: + name: eth* + gateway4: 10.10.10.253 + dhcp4: false + addresses: + - 10.10.10.1/24 + """) + network_config_b64 = base64.b64encode(network_config.encode()).decode() + props = {"network-config": network_config_b64, + "password": "passw0rd", + "instance-id": "inst-001"} + env = fill_properties(props) + paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir}) + ds = self.datasource(sys_cfg={}, distro={}, paths=paths) + with mock.patch(MPATH + 'transport_vmware_guestinfo', + return_value=env): + with mock.patch(MPATH + 'transport_iso9660', + return_value=NOT_FOUND): + self.assertTrue(ds.get_data()) + self.assertEqual('inst-001', ds.metadata['instance-id']) + self.assertEqual( + {'version': 2, 'ethernets': + {'nics': + {'nameservers': + {'addresses': ['127.0.0.53'], + 'search': ['vmware.com']}, + 'match': {'name': 'eth*'}, + 'gateway4': '10.10.10.253', + 'dhcp4': False, + 'addresses': ['10.10.10.1/24']}}}, + ds.network_config) + def test_get_data_cloudinit_metadata_json(self): """Test metadata can be loaded to cloud-init metadata and network. The metadata format is json. -- cgit v1.2.3 From cb82a4508a4c56c3814fa633166d944762071bcf Mon Sep 17 00:00:00 2001 From: Renan Rodrigo Date: Fri, 17 Sep 2021 10:06:49 -0300 Subject: docs: fix typo and include sudo for report bugs commands (#1022) Remove a duplicate "a" in the docs, and change the bug reporting documentation to tell users to run the commands with sudo. LP: #1940236 --- cloudinit/config/cc_ssh.py | 2 +- doc/rtd/topics/bugs.rst | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index 05a16dbc..43f64290 100755 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -17,7 +17,7 @@ keys. Authorized Keys ^^^^^^^^^^^^^^^ -Authorized keys are a list of public SSH keys that are allowed to connect to a +Authorized keys are a list of public SSH keys that are allowed to connect to a user account on a system. They are stored in `.ssh/authorized_keys` in that account's home directory. Authorized keys for the default user defined in ``users`` can be specified using ``ssh_authorized_keys``. Keys diff --git a/doc/rtd/topics/bugs.rst b/doc/rtd/topics/bugs.rst index 4b60776b..ee3828de 100644 --- a/doc/rtd/topics/bugs.rst +++ b/doc/rtd/topics/bugs.rst @@ -17,7 +17,7 @@ To aid in debugging, please collect the necessary logs. To do so, run the .. code-block:: shell-session - $ cloud-init collect-logs + $ sudo cloud-init collect-logs Wrote /home/ubuntu/cloud-init.tar.gz If your version of cloud-init does not have the `collect-logs` subcommand, @@ -25,7 +25,7 @@ then please manually collect the base log files by doing the following: .. code-block:: shell-session - $ dmesg > dmesg.txt + $ sudo dmesg > dmesg.txt $ sudo journalctl -o short-precise > journal.txt $ sudo tar -cvf cloud-init.tar dmesg.txt journal.txt /run/cloud-init \ /var/log/cloud-init.log /var/log/cloud-init-output.log -- cgit v1.2.3 From 612e39087aee3b1242765e7c4f463f54a6ebd723 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 17 Sep 2021 13:04:07 -0500 Subject: Add connectivity_url to Oracle's EphemeralDHCPv4 (#988) Add connectivity_url to Oracle's EphemeralDHCPv4 On bionic, when trying to bring up the EphemeralDHCPv4, it's possible that we already have a route defined, which will result in an error when trying to add the DHCP route. Use the connectivity_url to check if we can reach the metadata service, and if so, skip the EphemeralDHCPv4. The has_url_connectivity function has also been modified to take a dict of kwargs to send to readurl. LP: #1939603 --- cloudinit/net/__init__.py | 37 +++++++++++++++++++++++++--------- cloudinit/net/dhcp.py | 20 +++++++++++------- cloudinit/net/tests/test_dhcp.py | 8 ++++++-- cloudinit/net/tests/test_init.py | 20 +++++++++++------- cloudinit/sources/DataSourceOracle.py | 13 ++++++++---- cloudinit/sources/helpers/vultr.py | 2 +- cloudinit/sources/tests/test_oracle.py | 10 ++++++++- 7 files changed, 78 insertions(+), 32 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 017c50c5..7558745f 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -11,6 +11,7 @@ import ipaddress import logging import os import re +from typing import Any, Dict from cloudinit import subp from cloudinit import util @@ -971,18 +972,33 @@ def get_ib_hwaddrs_by_interface(): return ret -def has_url_connectivity(url): - """Return true when the instance has access to the provided URL +def has_url_connectivity(url_data: Dict[str, Any]) -> bool: + """Return true when the instance has access to the provided URL. Logs a warning if url is not the expected format. + + url_data is a dictionary of kwargs to send to readurl. E.g.: + + has_url_connectivity({ + "url": "http://example.invalid", + "headers": {"some": "header"}, + "timeout": 10 + }) """ + if 'url' not in url_data: + LOG.warning( + "Ignoring connectivity check. No 'url' to check in %s", url_data) + return False + url = url_data['url'] if not any([url.startswith('http://'), url.startswith('https://')]): LOG.warning( "Ignoring connectivity check. Expected URL beginning with http*://" " received '%s'", url) return False + if 'timeout' not in url_data: + url_data['timeout'] = 5 try: - readurl(url, timeout=5) + readurl(**url_data) except UrlError: return False return True @@ -1025,14 +1041,15 @@ class EphemeralIPv4Network(object): No operations are performed if the provided interface already has the specified configuration. - This can be verified with the connectivity_url. + This can be verified with the connectivity_url_data. If unconnected, bring up the interface with valid ip, prefix and broadcast. If router is provided setup a default route for that interface. Upon context exit, clean up the interface leaving no configuration behind. """ def __init__(self, interface, ip, prefix_or_mask, broadcast, router=None, - connectivity_url=None, static_routes=None): + connectivity_url_data: Dict[str, Any] = None, + static_routes=None): """Setup context manager and validate call signature. @param interface: Name of the network interface to bring up. @@ -1041,7 +1058,7 @@ class EphemeralIPv4Network(object): prefix. @param broadcast: Broadcast address for the IPv4 network. @param router: Optionally the default gateway IP. - @param connectivity_url: Optionally, a URL to verify if a usable + @param connectivity_url_data: Optionally, a URL to verify if a usable connection already exists. @param static_routes: Optionally a list of static routes from DHCP """ @@ -1056,7 +1073,7 @@ class EphemeralIPv4Network(object): 'Cannot setup network: {0}'.format(e) ) from e - self.connectivity_url = connectivity_url + self.connectivity_url_data = connectivity_url_data self.interface = interface self.ip = ip self.broadcast = broadcast @@ -1066,11 +1083,11 @@ class EphemeralIPv4Network(object): def __enter__(self): """Perform ephemeral network setup if interface is not connected.""" - if self.connectivity_url: - if has_url_connectivity(self.connectivity_url): + if self.connectivity_url_data: + if has_url_connectivity(self.connectivity_url_data): LOG.debug( 'Skip ephemeral network setup, instance has connectivity' - ' to %s', self.connectivity_url) + ' to %s', self.connectivity_url_data['url']) return self._bringup_device() diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py index 9b94c9a0..3f4b0418 100644 --- a/cloudinit/net/dhcp.py +++ b/cloudinit/net/dhcp.py @@ -4,6 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. +from typing import Dict, Any import configobj import logging import os @@ -38,21 +39,26 @@ class NoDHCPLeaseError(Exception): class EphemeralDHCPv4(object): - def __init__(self, iface=None, connectivity_url=None, dhcp_log_func=None): + def __init__( + self, + iface=None, + connectivity_url_data: Dict[str, Any] = None, + dhcp_log_func=None + ): self.iface = iface self._ephipv4 = None self.lease = None self.dhcp_log_func = dhcp_log_func - self.connectivity_url = connectivity_url + self.connectivity_url_data = connectivity_url_data def __enter__(self): """Setup sandboxed dhcp context, unless connectivity_url can already be reached.""" - if self.connectivity_url: - if has_url_connectivity(self.connectivity_url): + if self.connectivity_url_data: + if has_url_connectivity(self.connectivity_url_data): LOG.debug( 'Skip ephemeral DHCP setup, instance has connectivity' - ' to %s', self.connectivity_url) + ' to %s', self.connectivity_url_data) return return self.obtain_lease() @@ -104,8 +110,8 @@ class EphemeralDHCPv4(object): if kwargs['static_routes']: kwargs['static_routes'] = ( parse_static_routes(kwargs['static_routes'])) - if self.connectivity_url: - kwargs['connectivity_url'] = self.connectivity_url + if self.connectivity_url_data: + kwargs['connectivity_url_data'] = self.connectivity_url_data ephipv4 = EphemeralIPv4Network(**kwargs) ephipv4.__enter__() self._ephipv4 = ephipv4 diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py index 5ae048e2..28b4ecf7 100644 --- a/cloudinit/net/tests/test_dhcp.py +++ b/cloudinit/net/tests/test_dhcp.py @@ -617,7 +617,9 @@ class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase): url = 'http://example.org/index.html' httpretty.register_uri(httpretty.GET, url) - with net.dhcp.EphemeralDHCPv4(connectivity_url=url) as lease: + with net.dhcp.EphemeralDHCPv4( + connectivity_url_data={'url': url}, + ) as lease: self.assertIsNone(lease) # Ensure that no teardown happens: m_dhcp.assert_not_called() @@ -635,7 +637,9 @@ class TestEphemeralDhcpNoNetworkSetup(HttprettyTestCase): m_subp.return_value = ('', '') httpretty.register_uri(httpretty.GET, url, body={}, status=404) - with net.dhcp.EphemeralDHCPv4(connectivity_url=url) as lease: + with net.dhcp.EphemeralDHCPv4( + connectivity_url_data={'url': url}, + ) as lease: self.assertEqual(fake_lease, lease) # Ensure that dhcp discovery occurs m_dhcp.called_once_with() diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py index ad9c90ff..f9102f7b 100644 --- a/cloudinit/net/tests/test_init.py +++ b/cloudinit/net/tests/test_init.py @@ -622,11 +622,14 @@ class TestEphemeralIPV4Network(CiTestCase): params = { 'interface': 'eth0', 'ip': '192.168.2.2', 'prefix_or_mask': '255.255.255.0', 'broadcast': '192.168.2.255', - 'connectivity_url': 'http://example.org/index.html'} + 'connectivity_url_data': {'url': 'http://example.org/index.html'} + } with net.EphemeralIPv4Network(**params): - self.assertEqual([mock.call('http://example.org/index.html', - timeout=5)], m_readurl.call_args_list) + self.assertEqual( + [mock.call(url='http://example.org/index.html', timeout=5)], + m_readurl.call_args_list + ) # Ensure that no teardown happens: m_subp.assert_has_calls([]) @@ -850,25 +853,28 @@ class TestHasURLConnectivity(HttprettyTestCase): def test_url_timeout_on_connectivity_check(self, m_readurl): """A timeout of 5 seconds is provided when reading a url.""" self.assertTrue( - net.has_url_connectivity(self.url), 'Expected True on url connect') + net.has_url_connectivity({'url': self.url}), + 'Expected True on url connect') def test_true_on_url_connectivity_success(self): httpretty.register_uri(httpretty.GET, self.url) self.assertTrue( - net.has_url_connectivity(self.url), 'Expected True on url connect') + net.has_url_connectivity({'url': self.url}), + 'Expected True on url connect') @mock.patch('requests.Session.request') def test_true_on_url_connectivity_timeout(self, m_request): """A timeout raised accessing the url will return False.""" m_request.side_effect = requests.Timeout('Fake Connection Timeout') self.assertFalse( - net.has_url_connectivity(self.url), + net.has_url_connectivity({'url': self.url}), 'Expected False on url timeout') def test_true_on_url_connectivity_failure(self): httpretty.register_uri(httpretty.GET, self.url, body={}, status=404) self.assertFalse( - net.has_url_connectivity(self.url), 'Expected False on url fail') + net.has_url_connectivity({'url': self.url}), + 'Expected False on url fail') def _mk_v1_phys(mac, name, driver, device_id): diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py index bf81b10b..fbb5312a 100644 --- a/cloudinit/sources/DataSourceOracle.py +++ b/cloudinit/sources/DataSourceOracle.py @@ -40,6 +40,7 @@ METADATA_PATTERN = METADATA_ROOT + "{path}/" # https://docs.cloud.oracle.com/iaas/Content/Network/Troubleshoot/connectionhang.htm#Overview, # indicates that an MTU of 9000 is used within OCI MTU = 9000 +V2_HEADERS = {"Authorization": "Bearer Oracle"} OpcMetadata = namedtuple("OpcMetadata", "version instance_data vnics_data") @@ -134,7 +135,13 @@ class DataSourceOracle(sources.DataSource): ) network_context = noop() if not _is_iscsi_root(): - network_context = dhcp.EphemeralDHCPv4(net.find_fallback_nic()) + network_context = dhcp.EphemeralDHCPv4( + iface=net.find_fallback_nic(), + connectivity_url_data={ + "url": METADATA_PATTERN.format(version=2, path="instance"), + "headers": V2_HEADERS, + } + ) with network_context: fetched_metadata = read_opc_metadata( fetch_vnics_data=fetch_vnics_data @@ -304,11 +311,9 @@ def read_opc_metadata(*, fetch_vnics_data: bool = False): retries = 2 def _fetch(metadata_version: int, path: str) -> dict: - headers = { - "Authorization": "Bearer Oracle"} if metadata_version > 1 else None return readurl( url=METADATA_PATTERN.format(version=metadata_version, path=path), - headers=headers, + headers=V2_HEADERS if metadata_version > 1 else None, retries=retries, )._response.json() diff --git a/cloudinit/sources/helpers/vultr.py b/cloudinit/sources/helpers/vultr.py index c22cd0b1..2521ec2f 100644 --- a/cloudinit/sources/helpers/vultr.py +++ b/cloudinit/sources/helpers/vultr.py @@ -20,7 +20,7 @@ LOG = log.getLogger(__name__) def get_metadata(url, timeout, retries, sec_between): # Bring up interface try: - with EphemeralDHCPv4(connectivity_url=url): + with EphemeralDHCPv4(connectivity_url_data={"url": url}): # Fetch the metadata v1 = read_metadata(url, timeout, retries, sec_between) except (NoDHCPLeaseError) as exc: diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py index dcf33b9b..5f608cbb 100644 --- a/cloudinit/sources/tests/test_oracle.py +++ b/cloudinit/sources/tests/test_oracle.py @@ -694,7 +694,15 @@ class TestNonIscsiRoot_GetDataBehaviour: assert oracle_ds._get_data() assert [ - mock.call(m_find_fallback_nic.return_value) + mock.call( + iface=m_find_fallback_nic.return_value, + connectivity_url_data={ + 'headers': { + 'Authorization': 'Bearer Oracle' + }, + 'url': 'http://169.254.169.254/opc/v2/instance/' + } + ) ] == m_EphemeralDHCPv4.call_args_list -- cgit v1.2.3 From dc22786980a05129c5971e68ae37b1a9f76f882d Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 17 Sep 2021 16:25:22 -0500 Subject: Set Azure to apply networking config every BOOT (#1023) In #1006, we set Azure to apply networking config every BOOT_NEW_INSTANCE because the BOOT_LEGACY option was causing problems applying networking the second time per boot. However, BOOT_NEW_INSTANCE is also wrong as Azure needs to apply networking once per boot, during init-local phase. --- cloudinit/sources/DataSourceAzure.py | 6 +++++- tests/integration_tests/modules/test_user_events.py | 10 ++++++---- 2 files changed, 11 insertions(+), 5 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 3fb564c8..f8641dfd 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -22,7 +22,7 @@ import requests from cloudinit import dmi from cloudinit import log as logging from cloudinit import net -from cloudinit.event import EventType +from cloudinit.event import EventScope, EventType from cloudinit.net import device_driver from cloudinit.net.dhcp import EphemeralDHCPv4 from cloudinit import sources @@ -339,6 +339,10 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): class DataSourceAzure(sources.DataSource): dsname = 'Azure' + default_update_events = {EventScope.NETWORK: { + EventType.BOOT_NEW_INSTANCE, + EventType.BOOT, + }} _negotiated = False _metadata_imds = sources.UNSET _ci_pkl_version = 1 diff --git a/tests/integration_tests/modules/test_user_events.py b/tests/integration_tests/modules/test_user_events.py index ee8f05ae..fffa0746 100644 --- a/tests/integration_tests/modules/test_user_events.py +++ b/tests/integration_tests/modules/test_user_events.py @@ -31,8 +31,6 @@ def _add_dummy_bridge_to_netplan(client: IntegrationInstance): @pytest.mark.gce @pytest.mark.oci @pytest.mark.openstack -@pytest.mark.azure -@pytest.mark.not_xenial def test_boot_event_disabled_by_default(client: IntegrationInstance): log = client.read_from_file('/var/log/cloud-init.log') if 'network config is disabled' in log: @@ -77,7 +75,7 @@ def _test_network_config_applied_on_reboot(client: IntegrationInstance): assert 'dummy0' not in client.execute('ls /sys/class/net') _add_dummy_bridge_to_netplan(client) - client.execute('rm /var/log/cloud-init.log') + client.execute('echo "" > /var/log/cloud-init.log') client.restart() log = client.read_from_file('/var/log/cloud-init.log') @@ -92,6 +90,11 @@ def _test_network_config_applied_on_reboot(client: IntegrationInstance): assert 'dummy0' not in client.execute('ls /sys/class/net') +@pytest.mark.azure +def test_boot_event_enabled_by_default(client: IntegrationInstance): + _test_network_config_applied_on_reboot(client) + + USER_DATA = """\ #cloud-config updates: @@ -100,7 +103,6 @@ updates: """ -@pytest.mark.not_xenial @pytest.mark.user_data(USER_DATA) def test_boot_event_enabled(client: IntegrationInstance): _test_network_config_applied_on_reboot(client) -- cgit v1.2.3 From e27c30748e88409b1646a552f994edf9ed9d017e Mon Sep 17 00:00:00 2001 From: vteratipally <67723486+vteratipally@users.noreply.github.com> Date: Mon, 20 Sep 2021 21:53:05 -0700 Subject: Add retries to DataSourceGCE.py when connecting to GCE (#1005) Add retries to DatasourceGCE when connecting to GCE. Sometimes when the trying to fetch the metadata, cloud-init fails and the fallback datasource NoCloud is used which is not expected. Add retries to ensure loading of the data source. --- cloudinit/sources/DataSourceGCE.py | 19 +++++++++++++------ cloudinit/sources/__init__.py | 23 ++++++++++++++++++----- cloudinit/sources/tests/test_init.py | 17 +++++++++++------ doc/rtd/topics/datasources/gce.rst | 22 ++++++++++++++++++++++ tools/.github-cla-signers | 1 + 5 files changed, 65 insertions(+), 17 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index 746caddb..ecdc458b 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -27,8 +27,10 @@ HEADERS = {'Metadata-Flavor': 'Google'} class GoogleMetadataFetcher(object): - def __init__(self, metadata_address): + def __init__(self, metadata_address, num_retries, sec_between_retries): self.metadata_address = metadata_address + self.num_retries = num_retries + self.sec_between_retries = sec_between_retries def get_value(self, path, is_text, is_recursive=False): value = None @@ -36,7 +38,9 @@ class GoogleMetadataFetcher(object): url = self.metadata_address + path if is_recursive: url += '/?recursive=True' - resp = url_helper.readurl(url=url, headers=HEADERS) + resp = url_helper.readurl(url=url, headers=HEADERS, + retries=self.num_retries, + sec_between=self.sec_between_retries) except url_helper.UrlError as exc: msg = "url %s raised exception %s" LOG.debug(msg, path, exc) @@ -68,9 +72,11 @@ class DataSourceGCE(sources.DataSource): self.metadata_address = self.ds_cfg['metadata_url'] def _get_data(self): + url_params = self.get_url_params() ret = util.log_time( LOG.debug, 'Crawl of GCE metadata service', - read_md, kwargs={'address': self.metadata_address}) + read_md, kwargs={'address': self.metadata_address, + 'url_params': url_params}) if not ret['success']: if ret['platform_reports_gce']: @@ -176,7 +182,7 @@ def _parse_public_keys(public_keys_data, default_user=None): return public_keys -def read_md(address=None, platform_check=True): +def read_md(address=None, url_params=None, platform_check=True): if address is None: address = MD_V1_URL @@ -203,8 +209,9 @@ def read_md(address=None, platform_check=True): ('instance-data', ('instance/attributes',), False, False, True), ('project-data', ('project/attributes',), False, False, True), ] - - metadata_fetcher = GoogleMetadataFetcher(address) + metadata_fetcher = GoogleMetadataFetcher(address, + url_params.num_retries, + url_params.sec_between_retries) md = {} # Iterate over url_map keys to get metadata items. for (mkey, paths, required, is_text, is_recursive) in url_map: diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 54b8240a..d61d280d 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -138,7 +138,8 @@ def redact_sensitive_keys(metadata, redact_value=REDACT_SENSITIVE_VALUE): URLParams = namedtuple( - 'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries']) + 'URLParms', ['max_wait_seconds', 'timeout_seconds', + 'num_retries', 'sec_between_retries']) class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): @@ -175,9 +176,10 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): NetworkConfigSource.ds) # read_url_params - url_max_wait = -1 # max_wait < 0 means do not wait - url_timeout = 10 # timeout for each metadata url read attempt - url_retries = 5 # number of times to retry url upon 404 + url_max_wait = -1 # max_wait < 0 means do not wait + url_timeout = 10 # timeout for each metadata url read attempt + url_retries = 5 # number of times to retry url upon 404 + url_sec_between_retries = 1 # amount of seconds to wait between retries # The datasource defines a set of supported EventTypes during which # the datasource can react to changes in metadata and regenerate @@ -422,7 +424,18 @@ class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta): LOG, "Config retries '%s' is not an int, using default '%s'", self.ds_cfg.get('retries'), retries) - return URLParams(max_wait, timeout, retries) + sec_between_retries = self.url_sec_between_retries + try: + sec_between_retries = int(self.ds_cfg.get( + "sec_between_retries", + self.url_sec_between_retries)) + except Exception: + util.logexc( + LOG, "Config sec_between_retries '%s' is not an int," + " using default '%s'", + self.ds_cfg.get("sec_between_retries"), sec_between_retries) + + return URLParams(max_wait, timeout, retries, sec_between_retries) def get_userdata(self, apply_filter=False): if self.userdata is None: diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index a2b052a6..ae09cb17 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -97,6 +97,8 @@ class TestDataSource(CiTestCase): self.assertEqual(params.max_wait_seconds, self.datasource.url_max_wait) self.assertEqual(params.timeout_seconds, self.datasource.url_timeout) self.assertEqual(params.num_retries, self.datasource.url_retries) + self.assertEqual(params.sec_between_retries, + self.datasource.url_sec_between_retries) def test_datasource_get_url_params_subclassed(self): """Subclasses can override get_url_params defaults.""" @@ -104,7 +106,7 @@ class TestDataSource(CiTestCase): distro = 'distrotest' # generally should be a Distro object datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths) expected = (datasource.url_max_wait, datasource.url_timeout, - datasource.url_retries) + datasource.url_retries, datasource.url_sec_between_retries) url_params = datasource.get_url_params() self.assertNotEqual(self.datasource.get_url_params(), url_params) self.assertEqual(expected, url_params) @@ -114,14 +116,16 @@ class TestDataSource(CiTestCase): sys_cfg = { 'datasource': { 'MyTestSubclass': { - 'max_wait': '1', 'timeout': '2', 'retries': '3'}}} + 'max_wait': '1', 'timeout': '2', + 'retries': '3', 'sec_between_retries': 4 + }}} datasource = DataSourceTestSubclassNet( sys_cfg, self.distro, self.paths) - expected = (1, 2, 3) + expected = (1, 2, 3, 4) url_params = datasource.get_url_params() self.assertNotEqual( (datasource.url_max_wait, datasource.url_timeout, - datasource.url_retries), + datasource.url_retries, datasource.url_sec_between_retries), url_params) self.assertEqual(expected, url_params) @@ -130,7 +134,8 @@ class TestDataSource(CiTestCase): # Set an override that is below 0 which gets ignored. sys_cfg = {'datasource': {'_undef': {'timeout': '-1'}}} datasource = DataSource(sys_cfg, self.distro, self.paths) - (_max_wait, timeout, _retries) = datasource.get_url_params() + (_max_wait, timeout, _retries, + _sec_between_retries) = datasource.get_url_params() self.assertEqual(0, timeout) def test_datasource_get_url_uses_defaults_on_errors(self): @@ -142,7 +147,7 @@ class TestDataSource(CiTestCase): datasource = DataSource(sys_cfg, self.distro, self.paths) url_params = datasource.get_url_params() expected = (datasource.url_max_wait, datasource.url_timeout, - datasource.url_retries) + datasource.url_retries, datasource.url_sec_between_retries) self.assertEqual(expected, url_params) logs = self.logs.getvalue() expected_logs = [ diff --git a/doc/rtd/topics/datasources/gce.rst b/doc/rtd/topics/datasources/gce.rst index 8406695c..f3590282 100644 --- a/doc/rtd/topics/datasources/gce.rst +++ b/doc/rtd/topics/datasources/gce.rst @@ -15,6 +15,28 @@ to provide ``public-keys``. ``user-data`` and ``user-data-encoding`` can be provided to cloud-init by setting those custom metadata keys for an *instance*. +Configuration +------------- +The following configuration can be set for the datasource in system +configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`). + +The settings that may be configured are: + + * **retries**: The number of retries that should be done for an http request. + This value is used only after metadata_url is selected. (default: 5) + * **sec_between_retries**: The amount of wait time between the retries when + crawling the metadata service. (default: 1) + + +An example configuration with the default values is provided below: + +.. sourcecode:: yaml + + datasource: + GCE: + retries: 5 + sec_between_retries: 1 + .. _GCE metadata docs: https://cloud.google.com/compute/docs/storing-retrieving-metadata#querying .. vi: textwidth=78 diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index d9d43ba9..4f668112 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -66,6 +66,7 @@ timothegenzmer tnt-dev tomponline tsanghan +vteratipally Vultaire WebSpider xiachen-rh -- cgit v1.2.3 From 089a307db1fc572461eea1589f1876132c058311 Mon Sep 17 00:00:00 2001 From: Paride Legovini Date: Tue, 21 Sep 2021 22:28:30 +0200 Subject: tox: bump the pinned flake8 and pylint version (#1029) tox: bump the pinned flake8 and pylint version * pylint: fix W1406 (redundant-u-string-prefix) The u prefix for strings is no longer necessary in Python >=3.0. * pylint: disable W1514 (unspecified-encoding) From https://www.python.org/dev/peps/pep-0597/ (Python 3.10): The new warning stems form https://www.python.org/dev/peps/pep-0597, which says: Developers using macOS or Linux may forget that the default encoding is not always UTF-8. [...] Even Python experts may assume that the default encoding is UTF-8. This creates bugs that only happen on Windows. The warning could be fixed by always specifying encoding='utf-8', however we should be careful to not break environments which are not utf-8 (or explicitly state that only utf-8 is supported). Let's silence the warning for now. * _quick_read_instance_id: cover the case where load_yaml() returns None Spotted by pylint: - E1135 (unsupported-membership-test) - E1136 (unsubscriptable-object) LP: #1944414 --- .pylintrc | 3 ++- cloudinit/reporting/handlers.py | 10 +++++----- cloudinit/safeyaml.py | 2 +- cloudinit/sources/DataSourceNoCloud.py | 2 +- cloudinit/templater.py | 4 ++-- cloudinit/tests/test_subp.py | 8 ++++---- tests/unittests/test_datasource/test_azure.py | 6 +++--- .../unittests/test_datasource/test_configdrive.py | 2 +- tests/unittests/test_datasource/test_openstack.py | 2 +- tests/unittests/test_datasource/test_scaleway.py | 22 +++++++++++----------- .../test_handler/test_handler_apt_source_v3.py | 2 +- tests/unittests/test_handler/test_handler_debug.py | 2 +- tests/unittests/test_util.py | 2 +- tox.ini | 4 ++-- 14 files changed, 36 insertions(+), 35 deletions(-) (limited to 'cloudinit') diff --git a/.pylintrc b/.pylintrc index 94a81d0e..3edb0092 100644 --- a/.pylintrc +++ b/.pylintrc @@ -24,8 +24,9 @@ jobs=4 # W0631(undefined-loop-variable) # W0703(broad-except) # W1401(anomalous-backslash-in-string) +# W1514(unspecified-encoding) -disable=C, F, I, R, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0613, W0621, W0622, W0631, W0703, W1401 +disable=C, F, I, R, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0613, W0621, W0622, W0631, W0703, W1401, W1514 [REPORTS] diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py index 0a8c7af3..e32739ef 100755 --- a/cloudinit/reporting/handlers.py +++ b/cloudinit/reporting/handlers.py @@ -137,8 +137,8 @@ class HyperVKvpReportingHandler(ReportingHandler): self._event_types = event_types self.q = queue.Queue() self.incarnation_no = self._get_incarnation_no() - self.event_key_prefix = u"{0}|{1}".format(self.EVENT_PREFIX, - self.incarnation_no) + self.event_key_prefix = "{0}|{1}".format(self.EVENT_PREFIX, + self.incarnation_no) self.publish_thread = threading.Thread( target=self._publish_event_routine ) @@ -200,9 +200,9 @@ class HyperVKvpReportingHandler(ReportingHandler): CLOUD_INIT|||| [|subevent_index] """ - return u"{0}|{1}|{2}|{3}".format(self.event_key_prefix, - event.event_type, event.name, - uuid.uuid4()) + return "{0}|{1}|{2}|{3}".format(self.event_key_prefix, + event.event_type, event.name, + uuid.uuid4()) def _encode_kvp_item(self, key, value): data = struct.pack( diff --git a/cloudinit/safeyaml.py b/cloudinit/safeyaml.py index d6f5f95b..b95df27d 100644 --- a/cloudinit/safeyaml.py +++ b/cloudinit/safeyaml.py @@ -15,7 +15,7 @@ class _CustomSafeLoader(yaml.SafeLoader): _CustomSafeLoader.add_constructor( - u'tag:yaml.org,2002:python/unicode', + 'tag:yaml.org,2002:python/unicode', _CustomSafeLoader.construct_python_unicode) diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index a126aad3..2d9e86b4 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -247,7 +247,7 @@ def _quick_read_instance_id(dirs=None): try: data = util.pathprefix2dict(d, required=['meta-data']) md = util.load_yaml(data['meta-data']) - if iid_key in md: + if md and iid_key in md: return md[iid_key] except ValueError: pass diff --git a/cloudinit/templater.py b/cloudinit/templater.py index a00ade20..009bed32 100644 --- a/cloudinit/templater.py +++ b/cloudinit/templater.py @@ -36,14 +36,14 @@ from cloudinit import util LOG = logging.getLogger(__name__) TYPE_MATCHER = re.compile(r"##\s*template:(.*)", re.I) BASIC_MATCHER = re.compile(r'\$\{([A-Za-z0-9_.]+)\}|\$([A-Za-z0-9_.]+)') -MISSING_JINJA_PREFIX = u'CI_MISSING_JINJA_VAR/' +MISSING_JINJA_PREFIX = 'CI_MISSING_JINJA_VAR/' class UndefinedJinjaVariable(JUndefined): """Class used to represent any undefined jinja template variable.""" def __str__(self): - return u'%s%s' % (MISSING_JINJA_PREFIX, self._undefined_name) + return '%s%s' % (MISSING_JINJA_PREFIX, self._undefined_name) def __sub__(self, other): other = str(other).replace(MISSING_JINJA_PREFIX, '') diff --git a/cloudinit/tests/test_subp.py b/cloudinit/tests/test_subp.py index 911c1f3d..515d5d64 100644 --- a/cloudinit/tests/test_subp.py +++ b/cloudinit/tests/test_subp.py @@ -91,8 +91,8 @@ class TestSubp(CiTestCase): tmp_file = self.tmp_path('test.out') cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file) (out, _err) = subp.subp(cmd.encode('utf-8'), shell=True) - self.assertEqual(u'', out) - self.assertEqual(u'', _err) + self.assertEqual('', out) + self.assertEqual('', _err) self.assertEqual('HI MOM\n', util.load_file(tmp_file)) def test_subp_handles_strings(self): @@ -100,8 +100,8 @@ class TestSubp(CiTestCase): tmp_file = self.tmp_path('test.out') cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file) (out, _err) = subp.subp(cmd, shell=True) - self.assertEqual(u'', out) - self.assertEqual(u'', _err) + self.assertEqual('', out) + self.assertEqual('', _err) self.assertEqual('HI MOM\n', util.load_file(tmp_file)) def test_subp_handles_utf8(self): diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index a4296bf6..d7206c72 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -912,13 +912,13 @@ scbus-1 on xpt0 bus 0 'PreprovisionedVMType': None, 'PreprovisionedVm': False, 'datasource': {'Azure': {'agent_command': 'my_command'}}, - 'system_info': {'default_user': {'name': u'myuser'}}} + 'system_info': {'default_user': {'name': 'myuser'}}} expected_metadata = { 'azure_data': { 'configurationsettype': 'LinuxProvisioningConfiguration'}, 'imds': NETWORK_METADATA, 'instance-id': EXAMPLE_UUID, - 'local-hostname': u'myhost', + 'local-hostname': 'myhost', 'random_seed': 'wild'} crawled_metadata = dsrc.crawl_metadata() @@ -1385,7 +1385,7 @@ scbus-1 on xpt0 bus 0 def test_ovf_can_include_unicode(self): xml = construct_valid_ovf_env(data={}) - xml = u'\ufeff{0}'.format(xml) + xml = '\ufeff{0}'.format(xml) dsrc = self._get_ds({'ovfcontent': xml}) dsrc.get_data() diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 2e2b7847..51097231 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -15,7 +15,7 @@ from cloudinit import util from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, populate_dir -PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' +PUBKEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' EC2_META = { 'ami-id': 'ami-00000001', 'ami-launch-index': 0, diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index 478f3503..a9829c75 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -21,7 +21,7 @@ from cloudinit.sources.helpers import openstack from cloudinit import util BASE_URL = "http://169.254.169.254" -PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' +PUBKEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' EC2_META = { 'ami-id': 'ami-00000001', 'ami-launch-index': '0', diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py index 32f3274a..f9e968c5 100644 --- a/tests/unittests/test_datasource/test_scaleway.py +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -209,9 +209,9 @@ class TestDataSourceScaleway(HttprettyTestCase): self.assertEqual(self.datasource.get_instance_id(), MetadataResponses.FAKE_METADATA['id']) self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ - u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', - u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', - u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', ].sort()) self.assertEqual(self.datasource.get_hostname(), MetadataResponses.FAKE_METADATA['hostname']) @@ -242,8 +242,8 @@ class TestDataSourceScaleway(HttprettyTestCase): ] self.datasource.metadata['ssh_public_keys'] = [] self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ - u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', - u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', ].sort()) def test_ssh_keys_only_conf(self): @@ -260,9 +260,9 @@ class TestDataSourceScaleway(HttprettyTestCase): 'fingerprint': '2048 06:ff:... login2 (RSA)' }] self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ - u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', - u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', - u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', ].sort()) def test_ssh_keys_both(self): @@ -282,9 +282,9 @@ class TestDataSourceScaleway(HttprettyTestCase): 'fingerprint': '2048 06:ff:... login2 (RSA)' }] self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ - u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', - u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', - u'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', ].sort()) @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py index abb0a9b6..687cfbf1 100644 --- a/tests/unittests/test_handler/test_handler_apt_source_v3.py +++ b/tests/unittests/test_handler/test_handler_apt_source_v3.py @@ -26,7 +26,7 @@ from cloudinit.sources import DataSourceNone from cloudinit.tests import helpers as t_help -EXPECTEDKEY = u"""-----BEGIN PGP PUBLIC KEY BLOCK----- +EXPECTEDKEY = """-----BEGIN PGP PUBLIC KEY BLOCK----- Version: GnuPG v1 mI0ESuZLUgEEAKkqq3idtFP7g9hzOu1a8+v8ImawQN4TrvlygfScMU1TIS1eC7UQ diff --git a/tests/unittests/test_handler/test_handler_debug.py b/tests/unittests/test_handler/test_handler_debug.py index 787ba350..7d43e020 100644 --- a/tests/unittests/test_handler/test_handler_debug.py +++ b/tests/unittests/test_handler/test_handler_debug.py @@ -41,7 +41,7 @@ class TestDebug(FilesystemMockingTestCase): m_locale.return_value = 'en_US.UTF-8' cfg = { 'abc': '123', - 'c': u'\u20a0', + 'c': '\u20a0', 'debug': { 'verbose': True, # Does not actually write here due to mocking... diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 2290cab7..bc30c90b 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -604,7 +604,7 @@ class TestMultiLog(helpers.FilesystemMockingTestCase): class TestMessageFromString(helpers.TestCase): def test_unicode_not_messed_up(self): - roundtripped = util.message_from_string(u'\n').as_string() + roundtripped = util.message_from_string('\n').as_string() self.assertNotIn('\x00', roundtripped) diff --git a/tox.ini b/tox.ini index aad286ff..45ccadce 100644 --- a/tox.ini +++ b/tox.ini @@ -12,7 +12,7 @@ passenv= [testenv:flake8] basepython = python3 deps = - flake8==3.8.2 + flake8==3.9.2 commands = {envpython} -m flake8 {posargs:cloudinit/ tests/ tools/ setup.py} # https://github.com/gabrielfalcao/HTTPretty/issues/223 @@ -23,7 +23,7 @@ setenv = basepython = python3 deps = # requirements - pylint==2.9.3 + pylint==2.11.1 # test-requirements because unit tests are now present in cloudinit tree -r{toxinidir}/test-requirements.txt -r{toxinidir}/integration-requirements.txt -- cgit v1.2.3 From 24a15e9cd3acd7c93efe9755b98897ee8c0476c7 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 22 Sep 2021 16:30:12 -0500 Subject: docs: update cc_disk_setup for fs to raw disk (#1017) --- cloudinit/config/cc_disk_setup.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index 3ec49ca5..440f05f1 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -50,15 +50,18 @@ filesystem on may be specified either as a path or as an alias in the format ``.`` where ```` denotes the partition number on the device. The partition can also be specified by setting ``partition`` to the desired partition number. The ``partition`` option may also be set to ``auto``, in -which this module will search for the existance of a filesystem matching the +which this module will search for the existence of a filesystem matching the ``label``, ``type`` and ``device`` of the ``fs_setup`` entry and will skip creating the filesystem if one is found. The ``partition`` option may also be set to ``any``, in which case any file system that matches ``type`` and ``device`` will cause this module to skip filesystem creation for the ``fs_setup`` entry, regardless of ``label`` matching or not. To write a -filesystem directly to a device, use ``partition: none``. A label can be -specified for the filesystem using ``label``, and the filesystem type can be -specified using ``filesystem``. +filesystem directly to a device, use ``partition: none``. ``partition: none`` +will **always** write the filesystem, even when the ``label`` and +``filesystem`` are matched, and ``overwrite`` is ``false``. + +A label can be specified for the filesystem using +``label``, and the filesystem type can be specified using ``filesystem``. .. note:: If specifying device using the ``.`` format, -- cgit v1.2.3 From 244af3f4971c8f89c741aa90306f0fb1b4459940 Mon Sep 17 00:00:00 2001 From: eb3095 <45504889+eb3095@users.noreply.github.com> Date: Fri, 24 Sep 2021 09:57:33 -0400 Subject: Cleanup Vultr support (#987) Offload Vultr's vendordata assembly to the backend, correct vendordata storage and parsing, allow passing critical data via the useragent, better networking configuration for additional interfaces. --- cloudinit/sources/DataSourceVultr.py | 47 +++++++++--------- cloudinit/sources/helpers/vultr.py | 68 +++++++-------------------- tests/unittests/test_datasource/test_vultr.py | 30 +++++------- 3 files changed, 50 insertions(+), 95 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceVultr.py b/cloudinit/sources/DataSourceVultr.py index c08ff848..92765c72 100644 --- a/cloudinit/sources/DataSourceVultr.py +++ b/cloudinit/sources/DataSourceVultr.py @@ -8,6 +8,7 @@ from cloudinit import log as log from cloudinit import sources from cloudinit import util +from cloudinit import version import cloudinit.sources.helpers.vultr as vultr @@ -16,7 +17,11 @@ BUILTIN_DS_CONFIG = { 'url': 'http://169.254.169.254', 'retries': 30, 'timeout': 2, - 'wait': 2 + 'wait': 2, + 'user-agent': 'Cloud-Init/%s - OS: %s Variant: %s' % + (version.version_string(), + util.system_info()['system'], + util.system_info()['variant']) } @@ -40,21 +45,18 @@ class DataSourceVultr(sources.DataSource): LOG.debug("Machine is a Vultr instance") # Fetch metadata - md = self.get_metadata() - - self.metadata_full = md - self.metadata['instanceid'] = md['instanceid'] - self.metadata['local-hostname'] = md['hostname'] - self.metadata['public-keys'] = md["public-keys"] - self.userdata_raw = md["user-data"] + self.metadata = self.get_metadata() + self.metadata['instance-id'] = self.metadata['instanceid'] + self.metadata['local-hostname'] = self.metadata['hostname'] + self.userdata_raw = self.metadata["user-data"] # Generate config and process data - self.get_datasource_data(md) + self.get_datasource_data(self.metadata) # Dump some data so diagnosing failures is manageable LOG.debug("Vultr Vendor Config:") - LOG.debug(md['vendor-data']['config']) - LOG.debug("SUBID: %s", self.metadata['instanceid']) + LOG.debug(util.json_dumps(self.metadata['vendor-data'])) + LOG.debug("SUBID: %s", self.metadata['instance-id']) LOG.debug("Hostname: %s", self.metadata['local-hostname']) if self.userdata_raw is not None: LOG.debug("User-Data:") @@ -64,14 +66,11 @@ class DataSourceVultr(sources.DataSource): # Process metadata def get_datasource_data(self, md): - # Grab config - config = md['vendor-data']['config'] - # Generate network config self.netcfg = vultr.generate_network_config(md['interfaces']) - # This requires info generated in the vendor config - user_scripts = vultr.generate_user_scripts(md, self.netcfg['config']) + # Grab vendordata + self.vendordata_raw = md['vendor-data'] # Default hostname is "guest" for whitelabel if self.metadata['local-hostname'] == "": @@ -81,18 +80,13 @@ class DataSourceVultr(sources.DataSource): if self.userdata_raw == "": self.userdata_raw = None - # Assemble vendor-data - # This adds provided scripts and the config - self.vendordata_raw = [] - self.vendordata_raw.extend(user_scripts) - self.vendordata_raw.append("#cloud-config\n%s" % config) - # Get the metadata by flag def get_metadata(self): return vultr.get_metadata(self.ds_cfg['url'], self.ds_cfg['timeout'], self.ds_cfg['retries'], - self.ds_cfg['wait']) + self.ds_cfg['wait'], + self.ds_cfg['user-agent']) # Compare subid as instance id def check_instance_id(self, sys_cfg): @@ -137,11 +131,12 @@ if __name__ == "__main__": md = vultr.get_metadata(BUILTIN_DS_CONFIG['url'], BUILTIN_DS_CONFIG['timeout'], BUILTIN_DS_CONFIG['retries'], - BUILTIN_DS_CONFIG['wait']) - config = md['vendor-data']['config'] + BUILTIN_DS_CONFIG['wait'], + BUILTIN_DS_CONFIG['user-agent']) + config = md['vendor-data'] sysinfo = vultr.get_sysinfo() print(util.json_dumps(sysinfo)) - print(config) + print(util.json_dumps(config)) # vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vultr.py b/cloudinit/sources/helpers/vultr.py index 2521ec2f..9effb0d9 100644 --- a/cloudinit/sources/helpers/vultr.py +++ b/cloudinit/sources/helpers/vultr.py @@ -17,20 +17,17 @@ LOG = log.getLogger(__name__) @lru_cache() -def get_metadata(url, timeout, retries, sec_between): +def get_metadata(url, timeout, retries, sec_between, agent): # Bring up interface try: with EphemeralDHCPv4(connectivity_url_data={"url": url}): # Fetch the metadata - v1 = read_metadata(url, timeout, retries, sec_between) + v1 = read_metadata(url, timeout, retries, sec_between, agent) except (NoDHCPLeaseError) as exc: LOG.error("Bailing, DHCP Exception: %s", exc) raise - v1_json = json.loads(v1) - metadata = v1_json - - return metadata + return json.loads(v1) # Read the system information from SMBIOS @@ -64,12 +61,20 @@ def is_vultr(): # Read Metadata endpoint -def read_metadata(url, timeout, retries, sec_between): +def read_metadata(url, timeout, retries, sec_between, agent): url = "%s/v1.json" % url + + # Announce os details so we can handle non Vultr origin + # images and provide correct vendordata generation. + headers = { + 'Metadata-Token': 'cloudinit', + 'User-Agent': agent + } + response = url_helper.readurl(url, timeout=timeout, retries=retries, - headers={'Metadata-Token': 'vultr'}, + headers=headers, sec_between=sec_between) if not response.ok(): @@ -114,9 +119,9 @@ def generate_network_config(interfaces): public = generate_public_network_interface(interfaces[0]) network['config'].append(public) - # Prepare interface 1, private - if len(interfaces) > 1: - private = generate_private_network_interface(interfaces[1]) + # Prepare additional interfaces, private + for i in range(1, len(interfaces)): + private = generate_private_network_interface(interfaces[i]) network['config'].append(private) return network @@ -141,7 +146,7 @@ def generate_public_network_interface(interface): "control": "auto" }, { - "type": "dhcp6", + "type": "ipv6_slaac", "control": "auto" }, ] @@ -187,7 +192,6 @@ def generate_private_network_interface(interface): "name": interface_name, "type": "physical", "mac_address": interface['mac'], - "accept-ra": 1, "subnets": [ { "type": "static", @@ -201,42 +205,4 @@ def generate_private_network_interface(interface): return netcfg -# This is for the vendor and startup scripts -def generate_user_scripts(md, network_config): - user_scripts = [] - - # Raid 1 script - if md['vendor-data']['raid1-script']: - user_scripts.append(md['vendor-data']['raid1-script']) - - # Enable multi-queue on linux - if util.is_Linux() and md['vendor-data']['ethtool-script']: - ethtool_script = md['vendor-data']['ethtool-script'] - - # Tool location - tool = "/opt/vultr/ethtool" - - # Go through the interfaces - for netcfg in network_config: - # If the interface has a mac and is physical - if "mac_address" in netcfg and netcfg['type'] == "physical": - # Set its multi-queue to num of cores as per RHEL Docs - name = netcfg['name'] - command = "%s -L %s combined $(nproc --all)" % (tool, name) - ethtool_script = '%s\n%s' % (ethtool_script, command) - - user_scripts.append(ethtool_script) - - # This is for vendor scripts - if md['vendor-data']['vendor-script']: - user_scripts.append(md['vendor-data']['vendor-script']) - - # Startup script - script = md['startup-script'] - if script and script != "echo No configured startup script": - user_scripts.append(script) - - return user_scripts - - # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_vultr.py b/tests/unittests/test_datasource/test_vultr.py index bbea2aa3..63235009 100644 --- a/tests/unittests/test_datasource/test_vultr.py +++ b/tests/unittests/test_datasource/test_vultr.py @@ -64,10 +64,8 @@ VULTR_V1_1 = { 'raid1-script': '', 'user-data': [ ], - 'vendor-data': { - 'vendor-script': '', - 'ethtool-script': '', - 'config': { + 'vendor-data': [ + { 'package_upgrade': 'true', 'disable_root': 0, 'ssh_pwauth': 1, @@ -83,7 +81,7 @@ VULTR_V1_1 = { } } } - } + ] } VULTR_V1_2 = { @@ -155,11 +153,8 @@ VULTR_V1_2 = { 'user-data': [ ], - 'vendor-data': { - 'vendor-script': '', - 'ethtool-script': '', - 'raid1-script': '', - 'config': { + 'vendor-data': [ + { 'package_upgrade': 'true', 'disable_root': 0, 'ssh_pwauth': 1, @@ -175,7 +170,7 @@ VULTR_V1_2 = { } } } - } + ] } SSH_KEYS_1 = [ @@ -217,7 +212,7 @@ EXPECTED_VULTR_NETWORK_1 = { 'accept-ra': 1, 'subnets': [ {'type': 'dhcp', 'control': 'auto'}, - {'type': 'dhcp6', 'control': 'auto'} + {'type': 'ipv6_slaac', 'control': 'auto'} ], } ] @@ -237,14 +232,13 @@ EXPECTED_VULTR_NETWORK_2 = { 'accept-ra': 1, 'subnets': [ {'type': 'dhcp', 'control': 'auto'}, - {'type': 'dhcp6', 'control': 'auto'} + {'type': 'ipv6_slaac', 'control': 'auto'} ], }, { 'name': 'eth1', 'type': 'physical', 'mac_address': '5a:00:03:1b:4e:ca', - 'accept-ra': 1, 'subnets': [ { "type": "static", @@ -270,12 +264,12 @@ class TestDataSourceVultr(CiTestCase): super(TestDataSourceVultr, self).setUp() # Stored as a dict to make it easier to maintain - raw1 = json.dumps(VULTR_V1_1['vendor-data']['config']) - raw2 = json.dumps(VULTR_V1_2['vendor-data']['config']) + raw1 = json.dumps(VULTR_V1_1['vendor-data'][0]) + raw2 = json.dumps(VULTR_V1_2['vendor-data'][0]) # Make expected format - VULTR_V1_1['vendor-data']['config'] = raw1 - VULTR_V1_2['vendor-data']['config'] = raw2 + VULTR_V1_1['vendor-data'] = [raw1] + VULTR_V1_2['vendor-data'] = [raw2] self.tmp = self.tmp_dir() -- cgit v1.2.3 From 2d67c1b9a4f2d2ad6642f253fc3a25331c334894 Mon Sep 17 00:00:00 2001 From: jshen28 Date: Thu, 30 Sep 2021 02:33:48 +0800 Subject: Use ascii code for growpart (#1036) growpart not working well for environment using UTF-8 encoding. This patch forces growpart command to use C locale. Root issue likely: https://bugs.launchpad.net/ubuntu/+source/cloud-utils/+bug/1928167 --- cloudinit/config/cc_growpart.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 9f5525a1..ce7ca78b 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -142,9 +142,11 @@ class ResizeGrowPart(object): return False def resize(self, diskdev, partnum, partdev): + myenv = os.environ.copy() + myenv['LANG'] = 'C' before = get_size(partdev) try: - subp.subp(["growpart", '--dry-run', diskdev, partnum]) + subp.subp(["growpart", '--dry-run', diskdev, partnum], env=myenv) except subp.ProcessExecutionError as e: if e.exit_code != 1: util.logexc(LOG, "Failed growpart --dry-run for (%s, %s)", @@ -153,7 +155,7 @@ class ResizeGrowPart(object): return (before, before) try: - subp.subp(["growpart", diskdev, partnum]) + subp.subp(["growpart", diskdev, partnum], env=myenv) except subp.ProcessExecutionError as e: util.logexc(LOG, "Failed: growpart %s %s", diskdev, partnum) raise ResizeFailedException(e) from e -- cgit v1.2.3 From 82d6f5644f3a755e17e7ea22298bdd04e56c5ea0 Mon Sep 17 00:00:00 2001 From: Nicolas Bock Date: Wed, 29 Sep 2021 13:42:03 -0600 Subject: Make wording for module frequency consistent (#1039) Some modules' frequency are documented as `always` while others as `per always`. The difference in wording can be confusing. This change updates all such modules to use `always`. Signed-off-by: Nicolas Bock --- cloudinit/config/cc_disable_ec2_metadata.py | 2 +- cloudinit/config/cc_emit_upstart.py | 2 +- cloudinit/config/cc_final_message.py | 2 +- cloudinit/config/cc_growpart.py | 2 +- cloudinit/config/cc_migrator.py | 2 +- cloudinit/config/cc_refresh_rmc_and_interface.py | 2 +- cloudinit/config/cc_scripts_per_boot.py | 2 +- cloudinit/config/cc_set_hostname.py | 2 +- cloudinit/config/cc_update_etc_hosts.py | 2 +- cloudinit/config/cc_update_hostname.py | 2 +- cloudinit/config/cc_yum_add_repo.py | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py index dff93245..61c769b3 100644 --- a/cloudinit/config/cc_disable_ec2_metadata.py +++ b/cloudinit/config/cc_disable_ec2_metadata.py @@ -17,7 +17,7 @@ by default. **Internal name:** ``cc_disable_ec2_metadata`` -**Module frequency:** per always +**Module frequency:** always **Supported distros:** all diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py index b1d99f97..40eee052 100644 --- a/cloudinit/config/cc_emit_upstart.py +++ b/cloudinit/config/cc_emit_upstart.py @@ -16,7 +16,7 @@ user configuration should be required. **Internal name:** ``cc_emit_upstart`` -**Module frequency:** per always +**Module frequency:** always **Supported distros:** ubuntu, debian """ diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py index 3441f7a9..4fa5297e 100644 --- a/cloudinit/config/cc_final_message.py +++ b/cloudinit/config/cc_final_message.py @@ -21,7 +21,7 @@ specified as a jinja template with the following variables set: **Internal name:** ``cc_final_message`` -**Module frequency:** per always +**Module frequency:** always **Supported distros:** all diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index ce7ca78b..2d01175f 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -50,7 +50,7 @@ growpart is:: **Internal name:** ``cc_growpart`` -**Module frequency:** per always +**Module frequency:** always **Supported distros:** all diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py index 3995704a..79bcc27d 100644 --- a/cloudinit/config/cc_migrator.py +++ b/cloudinit/config/cc_migrator.py @@ -17,7 +17,7 @@ false`` in config. **Internal name:** ``cc_migrator`` -**Module frequency:** per always +**Module frequency:** always **Supported distros:** all diff --git a/cloudinit/config/cc_refresh_rmc_and_interface.py b/cloudinit/config/cc_refresh_rmc_and_interface.py index 146758ad..d5e0ecb2 100644 --- a/cloudinit/config/cc_refresh_rmc_and_interface.py +++ b/cloudinit/config/cc_refresh_rmc_and_interface.py @@ -28,7 +28,7 @@ This module handles **Internal name:** ``cc_refresh_rmc_and_interface`` -**Module frequency:** per always +**Module frequency:** always **Supported distros:** RHEL diff --git a/cloudinit/config/cc_scripts_per_boot.py b/cloudinit/config/cc_scripts_per_boot.py index 1e3f419e..1bf3f508 100644 --- a/cloudinit/config/cc_scripts_per_boot.py +++ b/cloudinit/config/cc_scripts_per_boot.py @@ -17,7 +17,7 @@ module does not accept any config keys. **Internal name:** ``cc_scripts_per_boot`` -**Module frequency:** per always +**Module frequency:** always **Supported distros:** all """ diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py index 5a59dc32..a96bcc18 100644 --- a/cloudinit/config/cc_set_hostname.py +++ b/cloudinit/config/cc_set_hostname.py @@ -34,7 +34,7 @@ based on initial hostname. **Internal name:** ``cc_set_hostname`` -**Module frequency:** per always +**Module frequency:** always **Supported distros:** all diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py index 3a78fccc..32368bbb 100644 --- a/cloudinit/config/cc_update_etc_hosts.py +++ b/cloudinit/config/cc_update_etc_hosts.py @@ -39,7 +39,7 @@ ping ``127.0.0.1`` or ``127.0.1.1`` or other ip). **Internal name:** ``cc_update_etc_hosts`` -**Module frequency:** per always +**Module frequency:** always **Supported distros:** all diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py index f4120356..370de73a 100644 --- a/cloudinit/config/cc_update_hostname.py +++ b/cloudinit/config/cc_update_hostname.py @@ -20,7 +20,7 @@ is set, then the hostname will not be altered. **Internal name:** ``cc_update_hostname`` -**Module frequency:** per always +**Module frequency:** always **Supported distros:** all diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index bcca86cb..d66d3ae4 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -16,7 +16,7 @@ entry, the config entry will be skipped. **Internal name:** ``cc_yum_add_repo`` -**Module frequency:** per always +**Module frequency:** always **Supported distros:** almalinux, centos, cloudlinux, eurolinux, fedora, openEuler, photon, rhel, rocky, virtuozzo -- cgit v1.2.3 From 591e97dad5cf5a6ea8211f34c7d8135aaaf947f6 Mon Sep 17 00:00:00 2001 From: eb3095 <45504889+eb3095@users.noreply.github.com> Date: Mon, 4 Oct 2021 10:23:12 -0400 Subject: Allow Vultr to set MTU and use as-is configs (#1037) Add MTU, accept-ra, routes, options and a direct way to provide intact cloud configs for networking opposed to relying on configurations that may need changed often. --- cloudinit/sources/DataSourceVultr.py | 7 +++++- cloudinit/sources/helpers/vultr.py | 41 ++++++++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceVultr.py b/cloudinit/sources/DataSourceVultr.py index 92765c72..68e1ff0b 100644 --- a/cloudinit/sources/DataSourceVultr.py +++ b/cloudinit/sources/DataSourceVultr.py @@ -67,7 +67,12 @@ class DataSourceVultr(sources.DataSource): # Process metadata def get_datasource_data(self, md): # Generate network config - self.netcfg = vultr.generate_network_config(md['interfaces']) + if "cloud_interfaces" in md: + # In the future we will just drop pre-configured + # network configs into the array. They need names though. + self.netcfg = vultr.add_interface_names(md['cloud_interfaces']) + else: + self.netcfg = vultr.generate_network_config(md['interfaces']) # Grab vendordata self.vendordata_raw = md['vendor-data'] diff --git a/cloudinit/sources/helpers/vultr.py b/cloudinit/sources/helpers/vultr.py index 9effb0d9..55487ac3 100644 --- a/cloudinit/sources/helpers/vultr.py +++ b/cloudinit/sources/helpers/vultr.py @@ -152,6 +152,16 @@ def generate_public_network_interface(interface): ] } + # Options that may or may not be used + if "mtu" in interface: + netcfg['mtu'] = interface['mtu'] + + if "accept-ra" in interface: + netcfg['accept-ra'] = interface['accept-ra'] + + if "routes" in interface: + netcfg['subnets'][0]['routes'] = interface['routes'] + # Check for additional IP's additional_count = len(interface['ipv4']['additional']) if "ipv4" in interface and additional_count > 0: @@ -162,6 +172,10 @@ def generate_public_network_interface(interface): "address": additional['address'], "netmask": additional['netmask'] } + + if "routes" in additional: + add['routes'] = additional['routes'] + netcfg['subnets'].append(add) # Check for additional IPv6's @@ -174,6 +188,10 @@ def generate_public_network_interface(interface): "address": additional['address'], "netmask": additional['netmask'] } + + if "routes" in additional: + add['routes'] = additional['routes'] + netcfg['subnets'].append(add) # Add config to template @@ -202,7 +220,30 @@ def generate_private_network_interface(interface): ] } + # Options that may or may not be used + if "mtu" in interface: + netcfg['mtu'] = interface['mtu'] + + if "accept-ra" in interface: + netcfg['accept-ra'] = interface['accept-ra'] + + if "routes" in interface: + netcfg['subnets'][0]['routes'] = interface['routes'] + return netcfg +# Make required adjustments to the network configs provided +def add_interface_names(interfaces): + for interface in interfaces: + interface_name = get_interface_name(interface['mac']) + if not interface_name: + raise RuntimeError( + "Interface: %s could not be found on the system" % + interface['mac']) + interface['name'] = interface_name + + return interfaces + + # vi: ts=4 expandtab -- cgit v1.2.3 From 1bbb67ca200e53d98d7f14904b986240a2fca4b5 Mon Sep 17 00:00:00 2001 From: Vlastimil Holer Date: Thu, 7 Oct 2021 16:01:59 +0200 Subject: Support ETHx_IP6_GATEWAY, SET_HOSTNAME on OpenNebula (#1045) OpenNebula 6.1.80 (current dev. version) is introducing new IPv6 gateway contextualization variable ETHx_IP6_GATEWAY, which mimics existing variable ETHx_GATEWAY6. The ETHx_GATEWAY6 used until now will be depracated in future relase (ET spring 2022). See: - new variable - https://github.com/OpenNebula/one/commit/e4d2cc11b9f3c6d01b53774b831f48d9d089c1cc - deprecation tracking issue - https://github.com/OpenNebula/one/issues/5536 Also, added support for SET_HOSTNAME context variable, which is currently widely used variable to configure guest VM hostname. See https://docs.opennebula.io/6.0/management_and_operations/references/template.html#context-section --- cloudinit/sources/DataSourceOpenNebula.py | 8 ++++++-- doc/rtd/topics/datasources/opennebula.rst | 8 ++++++++ tests/unittests/test_datasource/test_opennebula.py | 12 +++++++----- 3 files changed, 21 insertions(+), 7 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index 730ec586..21603fbd 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -195,7 +195,11 @@ class OpenNebulaNetwork(object): return self.get_field(dev, "gateway") def get_gateway6(self, dev): - return self.get_field(dev, "gateway6") + # OpenNebula 6.1.80 introduced new context parameter ETHx_IP6_GATEWAY + # to replace old ETHx_GATEWAY6. Old ETHx_GATEWAY6 will be removed in + # OpenNebula 6.4.0 (https://github.com/OpenNebula/one/issues/5536). + return self.get_field(dev, "ip6_gateway", + self.get_field(dev, "gateway6")) def get_mask(self, dev): return self.get_field(dev, "mask", "255.255.255.0") @@ -440,7 +444,7 @@ def read_context_disk_dir(source_dir, distro, asuser=None): # custom hostname -- try hostname or leave cloud-init # itself create hostname from IP address later - for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'): + for k in ('SET_HOSTNAME', 'HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'): if k in context: results['metadata']['local-hostname'] = context[k] break diff --git a/doc/rtd/topics/datasources/opennebula.rst b/doc/rtd/topics/datasources/opennebula.rst index 350a3e93..9daa0462 100644 --- a/doc/rtd/topics/datasources/opennebula.rst +++ b/doc/rtd/topics/datasources/opennebula.rst @@ -69,13 +69,21 @@ Datasource mode configuration override. Values: local, net, disabled. ETH_NETWORK ETH_MASK ETH_GATEWAY + ETH_GATEWAY6 ETH_DOMAIN ETH_DNS + ETH_SEARCH_DOMAIN + ETH_MTU + ETH_IP6 + ETH_IP6_ULA + ETH_IP6_PREFIX_LENGTH + ETH_IP6_GATEWAY Static `network configuration`_. :: + SET_HOSTNAME HOSTNAME Instance hostname. diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py index 9c6070a5..283b65c2 100644 --- a/tests/unittests/test_datasource/test_opennebula.py +++ b/tests/unittests/test_datasource/test_opennebula.py @@ -211,7 +211,8 @@ class TestOpenNebulaDataSource(CiTestCase): def test_hostname(self, m_get_phys_by_mac): for dev in ('eth0', 'ens3'): m_get_phys_by_mac.return_value = {MACADDR: dev} - for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'): + for k in ('SET_HOSTNAME', 'HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', + 'ETH0_IP'): my_d = os.path.join(self.tmp, k) populate_context_dir(my_d, {k: PUBLIC_IP}) results = ds.read_context_disk_dir(my_d, mock.Mock()) @@ -488,10 +489,11 @@ class TestOpenNebulaNetwork(unittest.TestCase): Verify get_gateway6('device') correctly returns IPv6 default gateway address. """ - context = {'ETH0_GATEWAY6': IP6_GW} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_gateway6('eth0') - self.assertEqual(IP6_GW, val) + for k in ('GATEWAY6', 'IP6_GATEWAY'): + context = {'ETH0_' + k: IP6_GW} + net = ds.OpenNebulaNetwork(context, mock.Mock()) + val = net.get_gateway6('eth0') + self.assertEqual(IP6_GW, val) def test_get_mask(self): """ -- cgit v1.2.3 From 3d2bac8bf6d0c53f56e14cb2c15b0c695cf0a647 Mon Sep 17 00:00:00 2001 From: Paride Legovini Date: Thu, 7 Oct 2021 18:07:43 +0200 Subject: renderer: convert relative imports to absolute (#1052) Fixes the following pylint error: cloudinit/net/renderer.py:12: [E0611(no-name-in-module), ] No name 'generate_udev_rule' in module 'udev' Likely a false positive, but we don't really need to keep the imports relative, so let's convert them to absolute as a workaround. --- cloudinit/net/renderer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py index 27447bc2..54a83b51 100644 --- a/cloudinit/net/renderer.py +++ b/cloudinit/net/renderer.py @@ -8,8 +8,8 @@ import abc import io -from .network_state import parse_net_config_data -from .udev import generate_udev_rule +from cloudinit.net.network_state import parse_net_config_data +from cloudinit.net.udev import generate_udev_rule def filter_by_type(match_type): -- cgit v1.2.3 From 9c147e8341e287366790e60658f646cdcc59bef2 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Thu, 7 Oct 2021 11:27:36 -0500 Subject: Allow disabling of network activation (SC-307) (#1048) In #919 (81299de), we refactored some of the code used to bring up networks across distros. Previously, the call to bring up network interfaces during 'init' stage unintentionally resulted in a no-op such that network interfaces were NEVER brought up by cloud-init, even if new network interfaces were found after crawling the metadata. The code was altered to bring up these discovered network interfaces. On ubuntu, this results in a 'netplan apply' call during 'init' stage for any ubuntu-based distro on a datasource that has a NETWORK dependency. On GCE, this additional 'netplan apply' conflicts with the google-guest-agent service, resulting in an instance that can no be connected to. This commit adds a 'disable_network_activation' option that can be enabled in /etc/cloud.cfg to disable the activation of network interfaces in 'init' stage. LP: #1938299 --- cloudinit/cmd/main.py | 11 +++++- cloudinit/cmd/tests/test_main.py | 23 ++++++++++++ cloudinit/distros/__init__.py | 3 ++ doc/rtd/topics/network-config.rst | 11 ++++++ .../datasources/test_network_dependency.py | 43 ++++++++++++++++++++++ 5 files changed, 89 insertions(+), 2 deletions(-) create mode 100644 tests/integration_tests/datasources/test_network_dependency.py (limited to 'cloudinit') diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 1de1de99..63186d34 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -239,6 +239,12 @@ def purge_cache_on_python_version_change(init): util.write_file(python_version_path, current_python_version) +def _should_bring_up_interfaces(init, args): + if util.get_cfg_option_bool(init.cfg, 'disable_network_activation'): + return False + return not args.local + + def main_init(name, args): deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK] if args.local: @@ -348,6 +354,7 @@ def main_init(name, args): util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net")) # Stage 5 + bring_up_interfaces = _should_bring_up_interfaces(init, args) try: init.fetch(existing=existing) # if in network mode, and the datasource is local @@ -367,7 +374,7 @@ def main_init(name, args): util.logexc(LOG, ("No instance datasource found!" " Likely bad things to come!")) if not args.force: - init.apply_network_config(bring_up=not args.local) + init.apply_network_config(bring_up=bring_up_interfaces) LOG.debug("[%s] Exiting without datasource", mode) if mode == sources.DSMODE_LOCAL: return (None, []) @@ -388,7 +395,7 @@ def main_init(name, args): # dhcp clients to advertize this hostname to any DDNS services # LP: #1746455. _maybe_set_hostname(init, stage='local', retry_stage='network') - init.apply_network_config(bring_up=bool(mode != sources.DSMODE_LOCAL)) + init.apply_network_config(bring_up=bring_up_interfaces) if mode == sources.DSMODE_LOCAL: if init.datasource.dsmode != mode: diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py index 1f5975b0..2e380848 100644 --- a/cloudinit/cmd/tests/test_main.py +++ b/cloudinit/cmd/tests/test_main.py @@ -4,6 +4,9 @@ from collections import namedtuple import copy import os from io import StringIO +from unittest import mock + +import pytest from cloudinit.cmd import main from cloudinit import safeyaml @@ -162,4 +165,24 @@ class TestMain(FilesystemMockingTestCase): for log in expected_logs: self.assertIn(log, self.stderr.getvalue()) + +class TestShouldBringUpInterfaces: + @pytest.mark.parametrize('cfg_disable,args_local,expected', [ + (True, True, False), + (True, False, False), + (False, True, False), + (False, False, True), + ]) + def test_should_bring_up_interfaces( + self, cfg_disable, args_local, expected + ): + init = mock.Mock() + init.cfg = {'disable_network_activation': cfg_disable} + + args = mock.Mock() + args.local = args_local + + result = main._should_bring_up_interfaces(init, args) + assert result == expected + # vi: ts=4 expandtab diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 63e78591..426a2cf4 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -227,8 +227,11 @@ class Distro(persistence.CloudInitPickleMixin, metaclass=abc.ABCMeta): # Now try to bring them up if bring_up: + LOG.debug('Bringing up newly configured network interfaces') network_activator = activators.select_activator() network_activator.bring_up_all_interfaces(network_state) + else: + LOG.debug("Not bringing up newly configured network interfaces") return False def apply_network_config_names(self, netconfig): diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst index 8eb7a31b..494b687a 100644 --- a/doc/rtd/topics/network-config.rst +++ b/doc/rtd/topics/network-config.rst @@ -75,6 +75,17 @@ If `Cloud-init`_ 's networking config has not been disabled, and no other network information is found, then it will proceed to generate a fallback networking configuration. +Disabling Network Activation +---------------------------- + +Some datasources may not be initialized until after network has been brought +up. In this case, cloud-init will attempt to bring up the interfaces specified +by the datasource metadata. + +This behavior can be disabled in the cloud-init configuration dictionary, +merged from ``/etc/cloud/cloud.cfg`` and ``/etc/cloud/cloud.cfg.d/*``:: + + disable_network_activation: true Fallback Network Configuration ============================== diff --git a/tests/integration_tests/datasources/test_network_dependency.py b/tests/integration_tests/datasources/test_network_dependency.py new file mode 100644 index 00000000..2e5e3121 --- /dev/null +++ b/tests/integration_tests/datasources/test_network_dependency.py @@ -0,0 +1,43 @@ +import pytest + +from tests.integration_tests.clouds import IntegrationCloud +from tests.integration_tests.conftest import get_validated_source + + +def _setup_custom_image(session_cloud: IntegrationCloud): + """Like `setup_image` in conftest.py, but with customized content.""" + source = get_validated_source(session_cloud) + if not source.installs_new_version(): + return + client = session_cloud.launch() + + # Insert our "disable_network_activation" file here + client.write_to_file( + '/etc/cloud/cloud.cfg.d/99-disable-network-activation.cfg', + 'disable_network_activation: true\n', + ) + + client.install_new_cloud_init(source) + # Even if we're keeping instances, we don't want to keep this + # one around as it was just for image creation + client.destroy() + + +# This test should be able to work on any cloud whose datasource specifies +# a NETWORK dependency +@pytest.mark.gce +@pytest.mark.ubuntu # Because netplan +def test_network_activation_disabled(session_cloud: IntegrationCloud): + """Test that the network is not activated during init mode.""" + _setup_custom_image(session_cloud) + with session_cloud.launch() as client: + result = client.execute('systemctl status google-guest-agent.service') + if not result.ok: + raise AssertionError('google-guest-agent is not active:\n%s', + result.stdout) + log = client.read_from_file('/var/log/cloud-init.log') + + assert "Running command ['netplan', 'apply']" not in log + + assert 'Not bringing up newly configured network interfaces' in log + assert 'Bringing up newly configured network interfaces' not in log -- cgit v1.2.3 From 725a7f7f19eb39b472e1f24b447fc9a596bf1748 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Thu, 7 Oct 2021 14:08:13 -0600 Subject: Allow comments in runcmd and report failed commands correctly (#1049) Allow comments in runcmd and report failed commands correctly A `runcmd` script may fail to parse properly, but does not mark `runcmd` as failed when that occurs. Additionally `shellify()` fails to correctly parse scripts that contain a comment line. Rectify both issues and add unit tests to verify correct behavior. LP: #1853146 --- cloudinit/config/cc_runcmd.py | 7 +++--- cloudinit/tests/test_util.py | 5 ++++ cloudinit/util.py | 3 +++ .../unittests/test_handler/test_handler_runcmd.py | 27 +++++++++++++++++----- tools/.github-cla-signers | 1 + 5 files changed, 34 insertions(+), 9 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py index 1f75d6c5..15960c7d 100644 --- a/cloudinit/config/cc_runcmd.py +++ b/cloudinit/config/cc_runcmd.py @@ -65,7 +65,8 @@ schema = { 'items': { 'oneOf': [ {'type': 'array', 'items': {'type': 'string'}}, - {'type': 'string'}] + {'type': 'string'}, + {'type': 'null'}] }, 'additionalItems': False, # Reject items of non-string non-list 'additionalProperties': False, @@ -90,7 +91,7 @@ def handle(name, cfg, cloud, log, _args): try: content = util.shellify(cmd) util.write_file(out_fn, content, 0o700) - except Exception: - util.logexc(log, "Failed to shellify %s into file %s", cmd, out_fn) + except Exception as e: + raise type(e)('Failed to shellify {} into file {}'.format(cmd, out_fn)) # vi: ts=4 expandtab diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index 977ad8e0..ab5eb35c 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -349,6 +349,11 @@ class TestShellify(CiTestCase): util.shellify(["echo hi mom", ["echo", "hi dad"], ('echo', 'hi', 'sis')])) + def test_supports_comments(self): + self.assertEqual( + '\n'.join(["#!/bin/sh", "echo start", "echo end", ""]), + util.shellify(["echo start", None, "echo end"])) + class TestGetHostnameFqdn(CiTestCase): diff --git a/cloudinit/util.py b/cloudinit/util.py index 22d8917e..1b4384e1 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2031,6 +2031,9 @@ def shellify(cmdlist, add_header=True): elif isinstance(args, str): content = "%s%s\n" % (content, args) cmds_made += 1 + # Yaml parsing of a comment results in None + elif args is None: + pass else: raise TypeError( "Unable to shellify type '%s'. Expected list, string, tuple. " diff --git a/tests/unittests/test_handler/test_handler_runcmd.py b/tests/unittests/test_handler/test_handler_runcmd.py index 73237d68..c03efa67 100644 --- a/tests/unittests/test_handler/test_handler_runcmd.py +++ b/tests/unittests/test_handler/test_handler_runcmd.py @@ -7,6 +7,7 @@ from cloudinit.tests.helpers import ( CiTestCase, FilesystemMockingTestCase, SchemaTestCaseMixin, skipUnlessJsonSchema) +from unittest.mock import patch import logging import os import stat @@ -41,15 +42,27 @@ class TestRuncmd(FilesystemMockingTestCase): "Skipping module named notimportant, no 'runcmd' key", self.logs.getvalue()) + @patch('cloudinit.util.shellify') + def test_runcmd_shellify_fails(self, cls): + """When shellify fails throw exception""" + cls.side_effect = TypeError("patched shellify") + valid_config = {'runcmd': ['echo 42']} + cc = self._get_cloud('ubuntu') + with self.assertRaises(TypeError) as cm: + with self.allow_subp(['/bin/sh']): + handle('cc_runcmd', valid_config, cc, LOG, None) + self.assertIn("Failed to shellify", str(cm.exception)) + def test_handler_invalid_command_set(self): """Commands which can't be converted to shell will raise errors.""" invalid_config = {'runcmd': 1} cc = self._get_cloud('ubuntu') - handle('cc_runcmd', invalid_config, cc, LOG, []) + with self.assertRaises(TypeError) as cm: + handle('cc_runcmd', invalid_config, cc, LOG, []) self.assertIn( 'Failed to shellify 1 into file' ' /var/lib/cloud/instances/iid-datasource-none/scripts/runcmd', - self.logs.getvalue()) + str(cm.exception)) @skipUnlessJsonSchema() def test_handler_schema_validation_warns_non_array_type(self): @@ -60,11 +73,12 @@ class TestRuncmd(FilesystemMockingTestCase): """ invalid_config = {'runcmd': 1} cc = self._get_cloud('ubuntu') - handle('cc_runcmd', invalid_config, cc, LOG, []) + with self.assertRaises(TypeError) as cm: + handle('cc_runcmd', invalid_config, cc, LOG, []) self.assertIn( 'Invalid config:\nruncmd: 1 is not of type \'array\'', self.logs.getvalue()) - self.assertIn('Failed to shellify', self.logs.getvalue()) + self.assertIn('Failed to shellify', str(cm.exception)) @skipUnlessJsonSchema() def test_handler_schema_validation_warns_non_array_item_type(self): @@ -76,7 +90,8 @@ class TestRuncmd(FilesystemMockingTestCase): invalid_config = { 'runcmd': ['ls /', 20, ['wget', 'http://stuff/blah'], {'a': 'n'}]} cc = self._get_cloud('ubuntu') - handle('cc_runcmd', invalid_config, cc, LOG, []) + with self.assertRaises(TypeError) as cm: + handle('cc_runcmd', invalid_config, cc, LOG, []) expected_warnings = [ 'runcmd.1: 20 is not valid under any of the given schemas', 'runcmd.3: {\'a\': \'n\'} is not valid under any of the given' @@ -85,7 +100,7 @@ class TestRuncmd(FilesystemMockingTestCase): logs = self.logs.getvalue() for warning in expected_warnings: self.assertIn(warning, logs) - self.assertIn('Failed to shellify', logs) + self.assertIn('Failed to shellify', str(cm.exception)) def test_handler_write_valid_runcmd_schema_to_file(self): """Valid runcmd schema is written to a runcmd shell script.""" diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index 3e16ddf3..0aa168d6 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -27,6 +27,7 @@ esposem GabrielNagy giggsoff hamalq +holmanb impl irishgordo izzyleung -- cgit v1.2.3 From 1ee2f3c3f96e5639a00aeea87100d7fb3681c76f Mon Sep 17 00:00:00 2001 From: jshen28 Date: Fri, 8 Oct 2021 22:55:51 +0800 Subject: Use specified tmp location for growpart (#1046) Growpart uses mktemp internally to save some date. This could lead to conflicts with tmpfile clean service during boot. This patch explicitly make it uses a tmp file under /var/tmp Signed-off-by: ushen --- cloudinit/config/cc_growpart.py | 36 +++++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 13 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 2d01175f..1ddc9dc7 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -72,6 +72,7 @@ import stat from cloudinit import log as logging from cloudinit.settings import PER_ALWAYS from cloudinit import subp +from cloudinit import temp_utils from cloudinit import util frequency = PER_ALWAYS @@ -145,20 +146,29 @@ class ResizeGrowPart(object): myenv = os.environ.copy() myenv['LANG'] = 'C' before = get_size(partdev) - try: - subp.subp(["growpart", '--dry-run', diskdev, partnum], env=myenv) - except subp.ProcessExecutionError as e: - if e.exit_code != 1: - util.logexc(LOG, "Failed growpart --dry-run for (%s, %s)", - diskdev, partnum) - raise ResizeFailedException(e) from e - return (before, before) - try: - subp.subp(["growpart", diskdev, partnum], env=myenv) - except subp.ProcessExecutionError as e: - util.logexc(LOG, "Failed: growpart %s %s", diskdev, partnum) - raise ResizeFailedException(e) from e + # growpart uses tmp dir to store intermediate states + # and may conflict with systemd-tmpfiles-clean + with temp_utils.tempdir(needs_exe=True) as tmpd: + growpart_tmp = os.path.join(tmpd, "growpart") + if not os.path.exists(growpart_tmp): + os.mkdir(growpart_tmp, 0o700) + myenv['TMPDIR'] = growpart_tmp + try: + subp.subp(["growpart", '--dry-run', diskdev, partnum], + env=myenv) + except subp.ProcessExecutionError as e: + if e.exit_code != 1: + util.logexc(LOG, "Failed growpart --dry-run for (%s, %s)", + diskdev, partnum) + raise ResizeFailedException(e) from e + return (before, before) + + try: + subp.subp(["growpart", diskdev, partnum], env=myenv) + except subp.ProcessExecutionError as e: + util.logexc(LOG, "Failed: growpart %s %s", diskdev, partnum) + raise ResizeFailedException(e) from e return (before, get_size(partdev)) -- cgit v1.2.3 From ca0da042e73d1cf078435befa02a6e09d004b62d Mon Sep 17 00:00:00 2001 From: Andrew Kutz <101085+akutz@users.noreply.github.com> Date: Fri, 8 Oct 2021 12:49:58 -0500 Subject: Fix set-name/interface DNS bug (#1058) This patch addresses an issue caused when the v2 network config directive "set-name" was used in conjunction with interface- specific DNS settings. The patch adds a test to validate the fix. For more information please see bug 1946493 as well as the issue https://github.com/kubernetes-sigs/image-builder/issues/712. LP: #1946493 --- cloudinit/net/network_state.py | 4 ++++ cloudinit/net/tests/test_network_state.py | 1 + 2 files changed, 5 insertions(+) (limited to 'cloudinit') diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 95b064f0..4862bf91 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -710,6 +710,10 @@ class NetworkStateInterpreter(metaclass=CommandHandlerMeta): def _v2_common(self, cfg): LOG.debug('v2_common: handling config:\n%s', cfg) for iface, dev_cfg in cfg.items(): + if 'set-name' in dev_cfg: + set_name_iface = dev_cfg.get('set-name') + if set_name_iface: + iface = set_name_iface if 'nameservers' in dev_cfg: search = dev_cfg.get('nameservers').get('search', []) dns = dev_cfg.get('nameservers').get('addresses', []) diff --git a/cloudinit/net/tests/test_network_state.py b/cloudinit/net/tests/test_network_state.py index 84e8308a..45e99171 100644 --- a/cloudinit/net/tests/test_network_state.py +++ b/cloudinit/net/tests/test_network_state.py @@ -52,6 +52,7 @@ network: eth1: match: macaddress: '66:77:88:99:00:11' + set-name: "ens92" nameservers: search: [foo.local, bar.local] addresses: [4.4.4.4] -- cgit v1.2.3 From 76166caff42b82aa55c6bcd9528f2c1e3575232a Mon Sep 17 00:00:00 2001 From: xiaofengw-vmware <42736879+xiaofengw-vmware@users.noreply.github.com> Date: Tue, 12 Oct 2021 11:54:31 +0800 Subject: VMWARE: search the deployPkg plugin in multiarch dir (#1061) Due to multiarch, the libdeployPkgPlugin.so is deployed into dir /usr/lib//open-vm-tools, we need to add this path into search_paths. LP: #1944946 --- cloudinit/sources/DataSourceOVF.py | 4 +++- tests/unittests/test_ds_identify.py | 24 ++++++++++++++++++++++++ tools/ds-identify | 5 +++++ 3 files changed, 32 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 3e436dfa..08a205f1 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -115,7 +115,9 @@ class DataSourceOVF(sources.DataSource): else: search_paths = ( "/usr/lib/vmware-tools", "/usr/lib64/vmware-tools", - "/usr/lib/open-vm-tools", "/usr/lib64/open-vm-tools") + "/usr/lib/open-vm-tools", "/usr/lib64/open-vm-tools", + "/usr/lib/x86_64-linux-gnu/open-vm-tools", + "/usr/lib/aarch64-linux-gnu/open-vm-tools") plugin = "libdeployPkgPlugin.so" deployPkgPluginPath = None diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 8617d7bd..43603ea5 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -534,6 +534,30 @@ class TestDsIdentify(DsIdentifyBase): return self._check_via_dict( cust64, RC_FOUND, dslist=[cust64.get('ds'), DS_NONE]) + def test_ovf_on_vmware_iso_found_open_vm_tools_x86_64_linux_gnu(self): + """OVF is identified when open-vm-tools installed in + /usr/lib/x86_64-linux-gnu.""" + cust64 = copy.deepcopy(VALID_CFG['OVF-vmware-customization']) + p32 = 'usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so' + x86 = 'usr/lib/x86_64-linux-gnu/open-vm-tools/plugins/vmsvc/' \ + 'libdeployPkgPlugin.so' + cust64['files'][x86] = cust64['files'][p32] + del cust64['files'][p32] + return self._check_via_dict( + cust64, RC_FOUND, dslist=[cust64.get('ds'), DS_NONE]) + + def test_ovf_on_vmware_iso_found_open_vm_tools_aarch64_linux_gnu(self): + """OVF is identified when open-vm-tools installed in + /usr/lib/aarch64-linux-gnu.""" + cust64 = copy.deepcopy(VALID_CFG['OVF-vmware-customization']) + p32 = 'usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so' + aarch64 = 'usr/lib/aarch64-linux-gnu/open-vm-tools/plugins/vmsvc/' \ + 'libdeployPkgPlugin.so' + cust64['files'][aarch64] = cust64['files'][p32] + del cust64['files'][p32] + return self._check_via_dict( + cust64, RC_FOUND, dslist=[cust64.get('ds'), DS_NONE]) + def test_ovf_on_vmware_iso_found_by_cdrom_with_matching_fs_label(self): """OVF is identified by well-known iso9660 labels.""" ovf_cdrom_by_label = copy.deepcopy(VALID_CFG['OVF']) diff --git a/tools/ds-identify b/tools/ds-identify index 63d2f0c8..c2f710e9 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -899,11 +899,16 @@ ovf_vmware_guest_customization() { # we have to have the plugin to do vmware customization local found="" pkg="" pre="${PATH_ROOT}/usr/lib" + local x86="x86_64-linux-gnu" aarch="aarch64-linux-gnu" local ppath="plugins/vmsvc/libdeployPkgPlugin.so" for pkg in vmware-tools open-vm-tools; do if [ -f "$pre/$pkg/$ppath" -o -f "${pre}64/$pkg/$ppath" ]; then found="$pkg"; break; fi + # search in multiarch dir + if [ -f "$pre/$x86/$pkg/$ppath" -o -f "$pre/$aarch/$pkg/$ppath" ]; then + found="$pkg"; break; + fi done [ -n "$found" ] || return 1 # vmware customization is disabled by default -- cgit v1.2.3 From a9501251aadf6d30192f7bd7debeabc9c3e29420 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Fri, 15 Oct 2021 19:53:42 -0500 Subject: testing: add get_cloud function (SC-461) (#1038) Also added supporting distro/datasource classes and updated tests that have a `get_cloud` call. --- cloudinit/config/tests/test_resolv_conf.py | 4 +- tests/unittests/test_distros/test_create_users.py | 43 +------ .../test_handler_apt_configure_sources_list_v1.py | 19 +-- .../test_handler_apt_configure_sources_list_v3.py | 60 ++++----- .../test_handler/test_handler_apt_source_v3.py | 24 +--- .../unittests/test_handler/test_handler_bootcmd.py | 29 ++--- .../test_handler/test_handler_ca_certs.py | 33 ++--- tests/unittests/test_handler/test_handler_chef.py | 25 ++-- tests/unittests/test_handler/test_handler_debug.py | 29 +---- .../test_handler/test_handler_landscape.py | 28 ++-- .../unittests/test_handler/test_handler_locale.py | 41 ++---- tests/unittests/test_handler/test_handler_lxd.py | 21 +-- .../test_handler/test_handler_mcollective.py | 23 +--- tests/unittests/test_handler/test_handler_ntp.py | 29 ++--- .../unittests/test_handler/test_handler_puppet.py | 111 ++++++++-------- .../unittests/test_handler/test_handler_runcmd.py | 33 ++--- .../test_handler/test_handler_seed_random.py | 46 +++---- .../test_handler/test_handler_timezone.py | 24 +--- tests/unittests/util.py | 143 +++++++++++++++++++++ 19 files changed, 354 insertions(+), 411 deletions(-) create mode 100644 tests/unittests/util.py (limited to 'cloudinit') diff --git a/cloudinit/config/tests/test_resolv_conf.py b/cloudinit/config/tests/test_resolv_conf.py index 45a06c22..aff110e5 100644 --- a/cloudinit/config/tests/test_resolv_conf.py +++ b/cloudinit/config/tests/test_resolv_conf.py @@ -2,7 +2,7 @@ import pytest from unittest import mock from cloudinit.config.cc_resolv_conf import generate_resolv_conf -from tests.unittests.test_distros.test_create_users import MyBaseDistro +from tests.unittests.util import TestingDistro EXPECTED_HEADER = """\ # Your system has been configured with 'manage-resolv-conf' set to true. @@ -14,7 +14,7 @@ EXPECTED_HEADER = """\ class TestGenerateResolvConf: - dist = MyBaseDistro() + dist = TestingDistro() tmpl_fn = "templates/resolv.conf.tmpl" @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/test_distros/test_create_users.py index 021866b7..685f08ba 100644 --- a/tests/unittests/test_distros/test_create_users.py +++ b/tests/unittests/test_distros/test_create_users.py @@ -5,44 +5,7 @@ import re from cloudinit import distros from cloudinit import ssh_util from cloudinit.tests.helpers import (CiTestCase, mock) - - -class MyBaseDistro(distros.Distro): - # MyBaseDistro is here to test base Distro class implementations - - def __init__(self, name="basedistro", cfg=None, paths=None): - if not cfg: - cfg = {} - if not paths: - paths = {} - super(MyBaseDistro, self).__init__(name, cfg, paths) - - def install_packages(self, pkglist): - raise NotImplementedError() - - def _write_network(self, settings): - raise NotImplementedError() - - def package_command(self, command, args=None, pkgs=None): - raise NotImplementedError() - - def update_package_sources(self): - raise NotImplementedError() - - def apply_locale(self, locale, out_fn=None): - raise NotImplementedError() - - def set_timezone(self, tz): - raise NotImplementedError() - - def _read_hostname(self, filename, default=None): - raise NotImplementedError() - - def _write_hostname(self, hostname, filename): - raise NotImplementedError() - - def _read_system_hostname(self): - raise NotImplementedError() +from tests.unittests.util import abstract_to_concrete @mock.patch("cloudinit.distros.util.system_is_snappy", return_value=False) @@ -53,7 +16,9 @@ class TestCreateUser(CiTestCase): def setUp(self): super(TestCreateUser, self).setUp() - self.dist = MyBaseDistro() + self.dist = abstract_to_concrete(distros.Distro)( + name='test', cfg=None, paths=None + ) def _useradd2call(self, args): # return a mock call for the useradd command in args diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py index 369480be..d69916f9 100644 --- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py +++ b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py @@ -9,19 +9,16 @@ import shutil import tempfile from unittest import mock -from cloudinit import cloud -from cloudinit import distros -from cloudinit import helpers from cloudinit import templater from cloudinit import subp from cloudinit import util from cloudinit.config import cc_apt_configure -from cloudinit.sources import DataSourceNone from cloudinit.distros.debian import Distro from cloudinit.tests import helpers as t_help +from tests.unittests.util import get_cloud LOG = logging.getLogger(__name__) @@ -80,16 +77,6 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): get_arch.return_value = 'amd64' self.addCleanup(apatcher.stop) - def _get_cloud(self, distro, metadata=None): - self.patchUtils(self.new_root) - paths = helpers.Paths({}) - cls = distros.fetch(distro) - mydist = cls(distro, {}, paths) - myds = DataSourceNone.DataSourceNone({}, mydist, paths) - if metadata: - myds.metadata.update(metadata) - return cloud.Cloud(myds, paths, {}, mydist, None) - def apt_source_list(self, distro, mirror, mirrorcheck=None): """apt_source_list Test rendering of a source.list from template for a given distro @@ -102,7 +89,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): else: cfg = {'apt_mirror': mirror} - mycloud = self._get_cloud(distro) + mycloud = get_cloud(distro) with mock.patch.object(util, 'write_file') as mockwf: with mock.patch.object(util, 'load_file', @@ -175,7 +162,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): def test_apt_v1_srcl_custom(self): """Test rendering from a custom source.list template""" cfg = util.load_yaml(YAML_TEXT_CUSTOM_SL) - mycloud = self._get_cloud('ubuntu') + mycloud = get_cloud() # the second mock restores the original subp with mock.patch.object(util, 'write_file') as mockwrite: diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py index b96fd4d4..cd6f9239 100644 --- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py +++ b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py @@ -3,6 +3,7 @@ """ test_apt_custom_sources_list Test templating of custom sources list """ +from contextlib import ExitStack import logging import os import shutil @@ -10,19 +11,14 @@ import tempfile from unittest import mock from unittest.mock import call -from cloudinit import cloud -from cloudinit import distros -from cloudinit import helpers from cloudinit import subp from cloudinit import util - from cloudinit.config import cc_apt_configure -from cloudinit.sources import DataSourceNone - from cloudinit.distros.debian import Distro - from cloudinit.tests import helpers as t_help +from tests.unittests.util import get_cloud + LOG = logging.getLogger(__name__) TARGET = "/" @@ -108,37 +104,29 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): get_arch.return_value = 'amd64' self.addCleanup(apatcher.stop) - def _get_cloud(self, distro, metadata=None): - self.patchUtils(self.new_root) - paths = helpers.Paths({}) - cls = distros.fetch(distro) - mydist = cls(distro, {}, paths) - myds = DataSourceNone.DataSourceNone({}, mydist, paths) - if metadata: - myds.metadata.update(metadata) - return cloud.Cloud(myds, paths, {}, mydist, None) - def _apt_source_list(self, distro, cfg, cfg_on_empty=False): """_apt_source_list - Test rendering from template (generic)""" # entry at top level now, wrap in 'apt' key cfg = {'apt': cfg} - mycloud = self._get_cloud(distro) - - with mock.patch.object(util, 'write_file') as mock_writefile: - with mock.patch.object(util, 'load_file', - return_value=MOCKED_APT_SRC_LIST - ) as mock_loadfile: - with mock.patch.object(os.path, 'isfile', - return_value=True) as mock_isfile: - cfg_func = ('cloudinit.config.cc_apt_configure.' + - '_should_configure_on_empty_apt') - with mock.patch(cfg_func, - return_value=(cfg_on_empty, "test") - ) as mock_shouldcfg: - cc_apt_configure.handle("test", cfg, mycloud, LOG, - None) - - return mock_writefile, mock_loadfile, mock_isfile, mock_shouldcfg + mycloud = get_cloud(distro) + + with ExitStack() as stack: + mock_writefile = stack.enter_context(mock.patch.object( + util, 'write_file')) + mock_loadfile = stack.enter_context(mock.patch.object( + util, 'load_file', return_value=MOCKED_APT_SRC_LIST)) + mock_isfile = stack.enter_context(mock.patch.object( + os.path, 'isfile', return_value=True)) + stack.enter_context(mock.patch.object( + util, 'del_file')) + cfg_func = ('cloudinit.config.cc_apt_configure.' + '_should_configure_on_empty_apt') + mock_shouldcfg = stack.enter_context(mock.patch( + cfg_func, return_value=(cfg_on_empty, 'test') + )) + cc_apt_configure.handle("test", cfg, mycloud, LOG, None) + + return mock_writefile, mock_loadfile, mock_isfile, mock_shouldcfg def test_apt_v3_source_list_debian(self): """test_apt_v3_source_list_debian - without custom sources or parms""" @@ -176,7 +164,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): """test_apt_v3_source_list_ubuntu_snappy - without custom sources or parms""" cfg = {'apt': {}} - mycloud = self._get_cloud('ubuntu') + mycloud = get_cloud() with mock.patch.object(util, 'write_file') as mock_writefile: with mock.patch.object(util, 'system_is_snappy', @@ -219,7 +207,7 @@ class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): def test_apt_v3_srcl_custom(self): """test_apt_v3_srcl_custom - Test rendering a custom source template""" cfg = util.load_yaml(YAML_TEXT_CUSTOM_SL) - mycloud = self._get_cloud('ubuntu') + mycloud = get_cloud() # the second mock restores the original subp with mock.patch.object(util, 'write_file') as mockwrite: diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py index 687cfbf1..d4db610f 100644 --- a/tests/unittests/test_handler/test_handler_apt_source_v3.py +++ b/tests/unittests/test_handler/test_handler_apt_source_v3.py @@ -14,18 +14,14 @@ import tempfile from unittest import TestCase, mock from unittest.mock import call -from cloudinit import cloud -from cloudinit import distros from cloudinit import gpg -from cloudinit import helpers from cloudinit import subp from cloudinit import util - from cloudinit.config import cc_apt_configure -from cloudinit.sources import DataSourceNone - from cloudinit.tests import helpers as t_help +from tests.unittests.util import get_cloud + EXPECTEDKEY = """-----BEGIN PGP PUBLIC KEY BLOCK----- Version: GnuPG v1 @@ -106,16 +102,6 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): else: return self.join(*args, **kwargs) - def _get_cloud(self, distro, metadata=None): - self.patchUtils(self.new_root) - paths = helpers.Paths({}) - cls = distros.fetch(distro) - mydist = cls(distro, {}, paths) - myds = DataSourceNone.DataSourceNone({}, mydist, paths) - if metadata: - myds.metadata.update(metadata) - return cloud.Cloud(myds, paths, {}, mydist, None) - def _apt_src_basic(self, filename, cfg): """_apt_src_basic Test Fix deb source string, has to overwrite mirror conf in params @@ -587,7 +573,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): default_mirrors = cc_apt_configure.get_default_mirrors(arch) pmir = default_mirrors["PRIMARY"] smir = default_mirrors["SECURITY"] - mycloud = self._get_cloud('ubuntu') + mycloud = get_cloud() mirrors = cc_apt_configure.find_apt_mirror_info({}, mycloud, arch) self.assertEqual(mirrors['MIRROR'], @@ -659,7 +645,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): default_mirrors = cc_apt_configure.get_default_mirrors(arch) pmir = default_mirrors["PRIMARY"] smir = default_mirrors["SECURITY"] - mycloud = self._get_cloud('ubuntu') + mycloud = get_cloud() cfg = {"primary": [{'arches': ["thisarchdoesntexist_64"], "uri": "notthis"}, {'arches': ["thisarchdoesntexist"], @@ -969,7 +955,7 @@ deb http://ubuntu.com/ubuntu/ xenial-proposed main""") pmir = "phit" smir = "shit" arch = 'amd64' - mycloud = self._get_cloud('ubuntu') + mycloud = get_cloud('ubuntu') cfg = {"primary": [{'arches': ["default"], "search_dns": True}], "security": [{'arches': ["default"], diff --git a/tests/unittests/test_handler/test_handler_bootcmd.py b/tests/unittests/test_handler/test_handler_bootcmd.py index b53d60d4..8cd3a5e1 100644 --- a/tests/unittests/test_handler/test_handler_bootcmd.py +++ b/tests/unittests/test_handler/test_handler_bootcmd.py @@ -1,14 +1,13 @@ # This file is part of cloud-init. See LICENSE file for license information. +import logging +import tempfile from cloudinit.config.cc_bootcmd import handle, schema -from cloudinit.sources import DataSourceNone -from cloudinit import (distros, helpers, cloud, subp, util) +from cloudinit import (subp, util) from cloudinit.tests.helpers import ( CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema) -import logging -import tempfile - +from tests.unittests.util import get_cloud LOG = logging.getLogger(__name__) @@ -39,18 +38,10 @@ class TestBootcmd(CiTestCase): self.subp = subp.subp self.new_root = self.tmp_dir() - def _get_cloud(self, distro): - paths = helpers.Paths({}) - cls = distros.fetch(distro) - mydist = cls(distro, {}, paths) - myds = DataSourceNone.DataSourceNone({}, mydist, paths) - paths.datasource = myds - return cloud.Cloud(myds, paths, {}, mydist, None) - def test_handler_skip_if_no_bootcmd(self): """When the provided config doesn't contain bootcmd, skip it.""" cfg = {} - mycloud = self._get_cloud('ubuntu') + mycloud = get_cloud() handle('notimportant', cfg, mycloud, LOG, None) self.assertIn( "Skipping module named notimportant, no 'bootcmd' key", @@ -59,7 +50,7 @@ class TestBootcmd(CiTestCase): def test_handler_invalid_command_set(self): """Commands which can't be converted to shell will raise errors.""" invalid_config = {'bootcmd': 1} - cc = self._get_cloud('ubuntu') + cc = get_cloud() with self.assertRaises(TypeError) as context_manager: handle('cc_bootcmd', invalid_config, cc, LOG, []) self.assertIn('Failed to shellify bootcmd', self.logs.getvalue()) @@ -75,7 +66,7 @@ class TestBootcmd(CiTestCase): invalid content. """ invalid_config = {'bootcmd': 1} - cc = self._get_cloud('ubuntu') + cc = get_cloud() with self.assertRaises(TypeError): handle('cc_bootcmd', invalid_config, cc, LOG, []) self.assertIn( @@ -92,7 +83,7 @@ class TestBootcmd(CiTestCase): """ invalid_config = { 'bootcmd': ['ls /', 20, ['wget', 'http://stuff/blah'], {'a': 'n'}]} - cc = self._get_cloud('ubuntu') + cc = get_cloud() with self.assertRaises(TypeError) as context_manager: handle('cc_bootcmd', invalid_config, cc, LOG, []) expected_warnings = [ @@ -111,7 +102,7 @@ class TestBootcmd(CiTestCase): def test_handler_creates_and_runs_bootcmd_script_with_instance_id(self): """Valid schema runs a bootcmd script with INSTANCE_ID in the env.""" - cc = self._get_cloud('ubuntu') + cc = get_cloud() out_file = self.tmp_path('bootcmd.out', self.new_root) my_id = "b6ea0f59-e27d-49c6-9f87-79f19765a425" valid_config = {'bootcmd': [ @@ -125,7 +116,7 @@ class TestBootcmd(CiTestCase): def test_handler_runs_bootcmd_script_with_error(self): """When a valid script generates an error, that error is raised.""" - cc = self._get_cloud('ubuntu') + cc = get_cloud() valid_config = {'bootcmd': ['exit 1']} # Script with error with mock.patch(self._etmpfile_path, FakeExtendedTempFile): diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py index 6e3831ed..2a4ab49e 100644 --- a/tests/unittests/test_handler/test_handler_ca_certs.py +++ b/tests/unittests/test_handler/test_handler_ca_certs.py @@ -1,20 +1,19 @@ # This file is part of cloud-init. See LICENSE file for license information. +import logging +import shutil +import tempfile +import unittest +from contextlib import ExitStack +from unittest import mock -from cloudinit import cloud from cloudinit import distros from cloudinit.config import cc_ca_certs from cloudinit import helpers from cloudinit import subp from cloudinit import util - from cloudinit.tests.helpers import TestCase -import logging -import shutil -import tempfile -import unittest -from contextlib import ExitStack -from unittest import mock +from tests.unittests.util import get_cloud class TestNoConfig(unittest.TestCase): @@ -56,10 +55,6 @@ class TestConfig(TestCase): paths = helpers.Paths({}) return cls(kind, {}, paths) - def _get_cloud(self, kind): - distro = self._fetch_distro(kind) - return cloud.Cloud(None, self.paths, None, distro, None) - def _mock_init(self): self.mocks = ExitStack() self.addCleanup(self.mocks.close) @@ -81,7 +76,7 @@ class TestConfig(TestCase): for distro_name in cc_ca_certs.distros: self._mock_init() - cloud = self._get_cloud(distro_name) + cloud = get_cloud(distro_name) cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) self.assertEqual(self.mock_add.call_count, 0) @@ -94,7 +89,7 @@ class TestConfig(TestCase): for distro_name in cc_ca_certs.distros: self._mock_init() - cloud = self._get_cloud(distro_name) + cloud = get_cloud(distro_name) cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) self.assertEqual(self.mock_add.call_count, 0) @@ -107,7 +102,7 @@ class TestConfig(TestCase): for distro_name in cc_ca_certs.distros: self._mock_init() - cloud = self._get_cloud(distro_name) + cloud = get_cloud(distro_name) conf = cc_ca_certs._distro_ca_certs_configs(distro_name) cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) @@ -121,7 +116,7 @@ class TestConfig(TestCase): for distro_name in cc_ca_certs.distros: self._mock_init() - cloud = self._get_cloud(distro_name) + cloud = get_cloud(distro_name) conf = cc_ca_certs._distro_ca_certs_configs(distro_name) cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) @@ -135,7 +130,7 @@ class TestConfig(TestCase): for distro_name in cc_ca_certs.distros: self._mock_init() - cloud = self._get_cloud(distro_name) + cloud = get_cloud(distro_name) cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) self.assertEqual(self.mock_add.call_count, 0) @@ -148,7 +143,7 @@ class TestConfig(TestCase): for distro_name in cc_ca_certs.distros: self._mock_init() - cloud = self._get_cloud(distro_name) + cloud = get_cloud(distro_name) cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) self.assertEqual(self.mock_add.call_count, 0) @@ -161,7 +156,7 @@ class TestConfig(TestCase): for distro_name in cc_ca_certs.distros: self._mock_init() - cloud = self._get_cloud(distro_name) + cloud = get_cloud(distro_name) conf = cc_ca_certs._distro_ca_certs_configs(distro_name) cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) diff --git a/tests/unittests/test_handler/test_handler_chef.py b/tests/unittests/test_handler/test_handler_chef.py index 7918c609..0672cebc 100644 --- a/tests/unittests/test_handler/test_handler_chef.py +++ b/tests/unittests/test_handler/test_handler_chef.py @@ -5,16 +5,14 @@ import json import logging import os -from cloudinit import cloud from cloudinit.config import cc_chef -from cloudinit import distros -from cloudinit import helpers -from cloudinit.sources import DataSourceNone from cloudinit import util from cloudinit.tests.helpers import ( HttprettyTestCase, FilesystemMockingTestCase, mock, skipIf) +from tests.unittests.util import get_cloud + LOG = logging.getLogger(__name__) CLIENT_TEMPL = os.path.sep.join(["templates", "chef_client.rb.tmpl"]) @@ -106,19 +104,12 @@ class TestChef(FilesystemMockingTestCase): super(TestChef, self).setUp() self.tmp = self.tmp_dir() - def fetch_cloud(self, distro_kind): - cls = distros.fetch(distro_kind) - paths = helpers.Paths({}) - distro = cls(distro_kind, {}, paths) - ds = DataSourceNone.DataSourceNone({}, distro, paths, None) - return cloud.Cloud(ds, paths, {}, distro, None) - def test_no_config(self): self.patchUtils(self.tmp) self.patchOS(self.tmp) cfg = {} - cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, []) + cc_chef.handle('chef', cfg, get_cloud(), LOG, []) for d in cc_chef.CHEF_DIRS: self.assertFalse(os.path.isdir(d)) @@ -163,7 +154,7 @@ class TestChef(FilesystemMockingTestCase): '/etc/chef/encrypted_data_bag_secret' }, } - cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, []) + cc_chef.handle('chef', cfg, get_cloud(), LOG, []) for d in cc_chef.CHEF_DIRS: self.assertTrue(os.path.isdir(d)) c = util.load_file(cc_chef.CHEF_RB_PATH) @@ -198,7 +189,7 @@ class TestChef(FilesystemMockingTestCase): } }, } - cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, []) + cc_chef.handle('chef', cfg, get_cloud(), LOG, []) c = util.load_file(cc_chef.CHEF_FB_PATH) self.assertEqual( { @@ -222,7 +213,7 @@ class TestChef(FilesystemMockingTestCase): 'show_time': None, }, } - cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, []) + cc_chef.handle('chef', cfg, get_cloud(), LOG, []) c = util.load_file(cc_chef.CHEF_RB_PATH) self.assertNotIn('json_attribs', c) self.assertNotIn('Formatter.show_time', c) @@ -246,7 +237,7 @@ class TestChef(FilesystemMockingTestCase): 'validation_cert': v_cert }, } - cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, []) + cc_chef.handle('chef', cfg, get_cloud(), LOG, []) content = util.load_file(cc_chef.CHEF_RB_PATH) self.assertIn(v_path, content) util.load_file(v_path) @@ -271,7 +262,7 @@ class TestChef(FilesystemMockingTestCase): } util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file) util.write_file(v_path, expected_cert) - cc_chef.handle('chef', cfg, self.fetch_cloud('ubuntu'), LOG, []) + cc_chef.handle('chef', cfg, get_cloud(), LOG, []) content = util.load_file(cc_chef.CHEF_RB_PATH) self.assertIn(v_path, content) util.load_file(v_path) diff --git a/tests/unittests/test_handler/test_handler_debug.py b/tests/unittests/test_handler/test_handler_debug.py index 7d43e020..41e9d9bd 100644 --- a/tests/unittests/test_handler/test_handler_debug.py +++ b/tests/unittests/test_handler/test_handler_debug.py @@ -1,21 +1,15 @@ # Copyright (C) 2014 Yahoo! Inc. # # This file is part of cloud-init. See LICENSE file for license information. +import logging +import shutil +import tempfile -from cloudinit.config import cc_debug - -from cloudinit import cloud -from cloudinit import distros -from cloudinit import helpers from cloudinit import util - -from cloudinit.sources import DataSourceNone - +from cloudinit.config import cc_debug from cloudinit.tests.helpers import (FilesystemMockingTestCase, mock) -import logging -import shutil -import tempfile +from tests.unittests.util import get_cloud LOG = logging.getLogger(__name__) @@ -26,16 +20,7 @@ class TestDebug(FilesystemMockingTestCase): super(TestDebug, self).setUp() self.new_root = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.new_root) - - def _get_cloud(self, distro, metadata=None): self.patchUtils(self.new_root) - paths = helpers.Paths({}) - cls = distros.fetch(distro) - d = cls(distro, {}, paths) - ds = DataSourceNone.DataSourceNone({}, d, paths) - if metadata: - ds.metadata.update(metadata) - return cloud.Cloud(ds, paths, {}, d, None) def test_debug_write(self, m_locale): m_locale.return_value = 'en_US.UTF-8' @@ -48,7 +33,7 @@ class TestDebug(FilesystemMockingTestCase): 'output': '/var/log/cloud-init-debug.log', }, } - cc = self._get_cloud('ubuntu') + cc = get_cloud() cc_debug.handle('cc_debug', cfg, cc, LOG, []) contents = util.load_file('/var/log/cloud-init-debug.log') # Some basic sanity tests... @@ -66,7 +51,7 @@ class TestDebug(FilesystemMockingTestCase): 'output': '/var/log/cloud-init-debug.log', }, } - cc = self._get_cloud('ubuntu') + cc = get_cloud() cc_debug.handle('cc_debug', cfg, cc, LOG, []) self.assertRaises(IOError, util.load_file, '/var/log/cloud-init-debug.log') diff --git a/tests/unittests/test_handler/test_handler_landscape.py b/tests/unittests/test_handler/test_handler_landscape.py index 7d165687..00333985 100644 --- a/tests/unittests/test_handler/test_handler_landscape.py +++ b/tests/unittests/test_handler/test_handler_landscape.py @@ -1,14 +1,13 @@ # This file is part of cloud-init. See LICENSE file for license information. +import logging +from configobj import ConfigObj from cloudinit.config import cc_landscape -from cloudinit import (distros, helpers, cloud, util) -from cloudinit.sources import DataSourceNone +from cloudinit import util from cloudinit.tests.helpers import (FilesystemMockingTestCase, mock, wrap_and_call) -from configobj import ConfigObj -import logging - +from tests.unittests.util import get_cloud LOG = logging.getLogger(__name__) @@ -22,18 +21,11 @@ class TestLandscape(FilesystemMockingTestCase): self.new_root = self.tmp_dir() self.conf = self.tmp_path('client.conf', self.new_root) self.default_file = self.tmp_path('default_landscape', self.new_root) - - def _get_cloud(self, distro): self.patchUtils(self.new_root) - paths = helpers.Paths({'templates_dir': self.new_root}) - cls = distros.fetch(distro) - mydist = cls(distro, {}, paths) - myds = DataSourceNone.DataSourceNone({}, mydist, paths) - return cloud.Cloud(myds, paths, {}, mydist, None) def test_handler_skips_empty_landscape_cloudconfig(self): """Empty landscape cloud-config section does no work.""" - mycloud = self._get_cloud('ubuntu') + mycloud = get_cloud('ubuntu') mycloud.distro = mock.MagicMock() cfg = {'landscape': {}} cc_landscape.handle('notimportant', cfg, mycloud, LOG, None) @@ -41,7 +33,7 @@ class TestLandscape(FilesystemMockingTestCase): def test_handler_error_on_invalid_landscape_type(self): """Raise an error when landscape configuraiton option is invalid.""" - mycloud = self._get_cloud('ubuntu') + mycloud = get_cloud('ubuntu') cfg = {'landscape': 'wrongtype'} with self.assertRaises(RuntimeError) as context_manager: cc_landscape.handle('notimportant', cfg, mycloud, LOG, None) @@ -52,7 +44,7 @@ class TestLandscape(FilesystemMockingTestCase): @mock.patch('cloudinit.config.cc_landscape.subp') def test_handler_restarts_landscape_client(self, m_subp): """handler restarts lansdscape-client after install.""" - mycloud = self._get_cloud('ubuntu') + mycloud = get_cloud('ubuntu') cfg = {'landscape': {'client': {}}} wrap_and_call( 'cloudinit.config.cc_landscape', @@ -64,7 +56,7 @@ class TestLandscape(FilesystemMockingTestCase): def test_handler_installs_client_and_creates_config_file(self): """Write landscape client.conf and install landscape-client.""" - mycloud = self._get_cloud('ubuntu') + mycloud = get_cloud('ubuntu') cfg = {'landscape': {'client': {}}} expected = {'client': { 'log_level': 'info', @@ -91,7 +83,7 @@ class TestLandscape(FilesystemMockingTestCase): """Merge and write options from LSC_CLIENT_CFG_FILE with defaults.""" # Write existing sparse client.conf file util.write_file(self.conf, '[client]\ncomputer_title = My PC\n') - mycloud = self._get_cloud('ubuntu') + mycloud = get_cloud('ubuntu') cfg = {'landscape': {'client': {}}} expected = {'client': { 'log_level': 'info', @@ -112,7 +104,7 @@ class TestLandscape(FilesystemMockingTestCase): """Merge and write options from cloud-config options with defaults.""" # Write empty sparse client.conf file util.write_file(self.conf, '') - mycloud = self._get_cloud('ubuntu') + mycloud = get_cloud('ubuntu') cfg = {'landscape': {'client': {'computer_title': 'My PC'}}} expected = {'client': { 'log_level': 'info', diff --git a/tests/unittests/test_handler/test_handler_locale.py b/tests/unittests/test_handler/test_handler_locale.py index 15fe7b23..3c17927e 100644 --- a/tests/unittests/test_handler/test_handler_locale.py +++ b/tests/unittests/test_handler/test_handler_locale.py @@ -3,27 +3,21 @@ # Author: Juerg Haefliger # # This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.config import cc_locale - -from cloudinit import cloud -from cloudinit import distros -from cloudinit import helpers -from cloudinit import util - -from cloudinit.sources import DataSourceNoCloud - -from cloudinit.tests import helpers as t_help - -from configobj import ConfigObj - import logging import os import shutil import tempfile from io import BytesIO +from configobj import ConfigObj from unittest import mock +from cloudinit import util +from cloudinit.config import cc_locale +from cloudinit.tests import helpers as t_help + +from tests.unittests.util import get_cloud + + LOG = logging.getLogger(__name__) @@ -33,16 +27,7 @@ class TestLocale(t_help.FilesystemMockingTestCase): super(TestLocale, self).setUp() self.new_root = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.new_root) - - def _get_cloud(self, distro): self.patchUtils(self.new_root) - paths = helpers.Paths({}) - - cls = distros.fetch(distro) - d = cls(distro, {}, paths) - ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths) - cc = cloud.Cloud(ds, paths, {}, d, None) - return cc def test_set_locale_arch(self): locale = 'en_GB.UTF-8' @@ -51,7 +36,7 @@ class TestLocale(t_help.FilesystemMockingTestCase): 'locale': locale, 'locale_configfile': locale_configfile, } - cc = self._get_cloud('arch') + cc = get_cloud('arch') with mock.patch('cloudinit.distros.arch.subp.subp') as m_subp: with mock.patch('cloudinit.distros.arch.LOG.warning') as m_LOG: @@ -72,7 +57,7 @@ class TestLocale(t_help.FilesystemMockingTestCase): cfg = { 'locale': 'My.Locale', } - cc = self._get_cloud('sles') + cc = get_cloud('sles') cc_locale.handle('cc_locale', cfg, cc, LOG, []) if cc.distro.uses_systemd(): locale_conf = cc.distro.systemd_locale_conf_fn @@ -87,7 +72,7 @@ class TestLocale(t_help.FilesystemMockingTestCase): def test_set_locale_sles_default(self): cfg = {} - cc = self._get_cloud('sles') + cc = get_cloud('sles') cc_locale.handle('cc_locale', cfg, cc, LOG, []) if cc.distro.uses_systemd(): @@ -106,7 +91,7 @@ class TestLocale(t_help.FilesystemMockingTestCase): locale_conf = os.path.join(self.new_root, "etc/default/locale") util.write_file(locale_conf, 'LANG="en_US.UTF-8"\n') cfg = {'locale': 'C.UTF-8'} - cc = self._get_cloud('ubuntu') + cc = get_cloud('ubuntu') with mock.patch('cloudinit.distros.debian.subp.subp') as m_subp: with mock.patch('cloudinit.distros.debian.LOCALE_CONF_FN', locale_conf): @@ -118,7 +103,7 @@ class TestLocale(t_help.FilesystemMockingTestCase): def test_locale_rhel_defaults_en_us_utf8(self): """Test cc_locale gets en_US.UTF-8 from distro get_locale fallback""" cfg = {} - cc = self._get_cloud('rhel') + cc = get_cloud('rhel') update_sysconfig = 'cloudinit.distros.rhel_util.update_sysconfig_file' with mock.patch.object(cc.distro, 'uses_systemd') as m_use_sd: m_use_sd.return_value = True diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py index b2181992..ea8b6e90 100644 --- a/tests/unittests/test_handler/test_handler_lxd.py +++ b/tests/unittests/test_handler/test_handler_lxd.py @@ -1,11 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. +from unittest import mock from cloudinit.config import cc_lxd -from cloudinit.sources import DataSourceNoCloud -from cloudinit import (distros, helpers, cloud) from cloudinit.tests import helpers as t_help -from unittest import mock +from tests.unittests.util import get_cloud class TestLxd(t_help.CiTestCase): @@ -22,18 +21,10 @@ class TestLxd(t_help.CiTestCase): } } - def _get_cloud(self, distro): - cls = distros.fetch(distro) - paths = helpers.Paths({}) - d = cls(distro, {}, paths) - ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths) - cc = cloud.Cloud(ds, paths, {}, d, None) - return cc - @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") @mock.patch("cloudinit.config.cc_lxd.subp") def test_lxd_init(self, mock_subp, m_maybe_clean): - cc = self._get_cloud('ubuntu') + cc = get_cloud() mock_subp.which.return_value = True m_maybe_clean.return_value = None cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, []) @@ -50,7 +41,7 @@ class TestLxd(t_help.CiTestCase): @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") @mock.patch("cloudinit.config.cc_lxd.subp") def test_lxd_install(self, mock_subp, m_maybe_clean): - cc = self._get_cloud('ubuntu') + cc = get_cloud() cc.distro = mock.MagicMock() mock_subp.which.return_value = None cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, []) @@ -64,7 +55,7 @@ class TestLxd(t_help.CiTestCase): @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") @mock.patch("cloudinit.config.cc_lxd.subp") def test_no_init_does_nothing(self, mock_subp, m_maybe_clean): - cc = self._get_cloud('ubuntu') + cc = get_cloud() cc.distro = mock.MagicMock() cc_lxd.handle('cc_lxd', {'lxd': {}}, cc, self.logger, []) self.assertFalse(cc.distro.install_packages.called) @@ -74,7 +65,7 @@ class TestLxd(t_help.CiTestCase): @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") @mock.patch("cloudinit.config.cc_lxd.subp") def test_no_lxd_does_nothing(self, mock_subp, m_maybe_clean): - cc = self._get_cloud('ubuntu') + cc = get_cloud() cc.distro = mock.MagicMock() cc_lxd.handle('cc_lxd', {'package_update': True}, cc, self.logger, []) self.assertFalse(cc.distro.install_packages.called) diff --git a/tests/unittests/test_handler/test_handler_mcollective.py b/tests/unittests/test_handler/test_handler_mcollective.py index 6891e15f..9cda6fbe 100644 --- a/tests/unittests/test_handler/test_handler_mcollective.py +++ b/tests/unittests/test_handler/test_handler_mcollective.py @@ -1,11 +1,4 @@ # This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit import (cloud, distros, helpers, util) -from cloudinit.config import cc_mcollective -from cloudinit.sources import DataSourceNoCloud - -from cloudinit.tests import helpers as t_help - import configobj import logging import os @@ -13,6 +6,12 @@ import shutil import tempfile from io import BytesIO +from cloudinit import (util) +from cloudinit.config import cc_mcollective +from cloudinit.tests import helpers as t_help + +from tests.unittests.util import get_cloud + LOG = logging.getLogger(__name__) @@ -128,18 +127,10 @@ class TestConfig(t_help.FilesystemMockingTestCase): class TestHandler(t_help.TestCase): - def _get_cloud(self, distro): - cls = distros.fetch(distro) - paths = helpers.Paths({}) - d = cls(distro, {}, paths) - ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths) - cc = cloud.Cloud(ds, paths, {}, d, None) - return cc - @t_help.mock.patch("cloudinit.config.cc_mcollective.subp") @t_help.mock.patch("cloudinit.config.cc_mcollective.util") def test_mcollective_install(self, mock_util, mock_subp): - cc = self._get_cloud('ubuntu') + cc = get_cloud() cc.distro = t_help.mock.MagicMock() mock_util.load_file.return_value = b"" mycfg = {'mcollective': {'conf': {'loglevel': 'debug'}}} diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py index 6b9c8377..5dfa564f 100644 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ b/tests/unittests/test_handler/test_handler_ntp.py @@ -1,17 +1,17 @@ # This file is part of cloud-init. See LICENSE file for license information. +import copy +import os +import shutil +from functools import partial +from os.path import dirname +from cloudinit import (helpers, util) from cloudinit.config import cc_ntp -from cloudinit.sources import DataSourceNone -from cloudinit import (distros, helpers, cloud, util) - from cloudinit.tests.helpers import ( CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema) +from tests.unittests.util import get_cloud -import copy -import os -from os.path import dirname -import shutil NTP_TEMPLATE = """\ ## template: jinja @@ -39,16 +39,11 @@ class TestNtp(FilesystemMockingTestCase): self.m_snappy.return_value = False self.add_patch('cloudinit.util.system_info', 'm_sysinfo') self.m_sysinfo.return_value = {'dist': ('Distro', '99.1', 'Codename')} - - def _get_cloud(self, distro, sys_cfg=None): - self.new_root = self.reRoot(root=self.new_root) - paths = helpers.Paths({'templates_dir': self.new_root}) - cls = distros.fetch(distro) - if not sys_cfg: - sys_cfg = {} - mydist = cls(distro, sys_cfg, paths) - myds = DataSourceNone.DataSourceNone(sys_cfg, mydist, paths) - return cloud.Cloud(myds, paths, sys_cfg, mydist, None) + self.new_root = self.reRoot() + self._get_cloud = partial( + get_cloud, + paths=helpers.Paths({'templates_dir': self.new_root}) + ) def _get_template_path(self, template_name, distro, basepath=None): # ntp.conf.{distro} -> ntp.conf.debian.tmpl diff --git a/tests/unittests/test_handler/test_handler_puppet.py b/tests/unittests/test_handler/test_handler_puppet.py index 19f72a0c..8d99f535 100644 --- a/tests/unittests/test_handler/test_handler_puppet.py +++ b/tests/unittests/test_handler/test_handler_puppet.py @@ -1,13 +1,12 @@ # This file is part of cloud-init. See LICENSE file for license information. +import logging +import textwrap from cloudinit.config import cc_puppet -from cloudinit.sources import DataSourceNone -from cloudinit import (distros, helpers, cloud, util) +from cloudinit import util from cloudinit.tests.helpers import CiTestCase, HttprettyTestCase, mock -import logging -import textwrap - +from tests.unittests.util import get_cloud LOG = logging.getLogger(__name__) @@ -65,19 +64,13 @@ class TestPuppetHandle(CiTestCase): self.conf = self.tmp_path('puppet.conf') self.csr_attributes_path = self.tmp_path( 'csr_attributes.yaml') - - def _get_cloud(self, distro): - paths = helpers.Paths({'templates_dir': self.new_root}) - cls = distros.fetch(distro) - mydist = cls(distro, {}, paths) - myds = DataSourceNone.DataSourceNone({}, mydist, paths) - return cloud.Cloud(myds, paths, {}, mydist, None) + self.cloud = get_cloud() def test_skips_missing_puppet_key_in_cloudconfig(self, m_auto): """Cloud-config containing no 'puppet' key is skipped.""" - mycloud = self._get_cloud('ubuntu') + cfg = {} - cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) self.assertIn( "no 'puppet' configuration found", self.logs.getvalue()) self.assertEqual(0, m_auto.call_count) @@ -85,9 +78,9 @@ class TestPuppetHandle(CiTestCase): @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) def test_puppet_config_starts_puppet_service(self, m_subp, m_auto): """Cloud-config 'puppet' configuration starts puppet.""" - mycloud = self._get_cloud('ubuntu') + cfg = {'puppet': {'install': False}} - cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) self.assertEqual(1, m_auto.call_count) self.assertIn( [mock.call(['service', 'puppet', 'start'], capture=False)], @@ -96,34 +89,34 @@ class TestPuppetHandle(CiTestCase): @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) def test_empty_puppet_config_installs_puppet(self, m_subp, m_auto): """Cloud-config empty 'puppet' configuration installs latest puppet.""" - mycloud = self._get_cloud('ubuntu') - mycloud.distro = mock.MagicMock() + + self.cloud.distro = mock.MagicMock() cfg = {'puppet': {}} - cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) self.assertEqual( [mock.call(('puppet', None))], - mycloud.distro.install_packages.call_args_list) + self.cloud.distro.install_packages.call_args_list) @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) def test_puppet_config_installs_puppet_on_true(self, m_subp, _): """Cloud-config with 'puppet' key installs when 'install' is True.""" - mycloud = self._get_cloud('ubuntu') - mycloud.distro = mock.MagicMock() + + self.cloud.distro = mock.MagicMock() cfg = {'puppet': {'install': True}} - cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) self.assertEqual( [mock.call(('puppet', None))], - mycloud.distro.install_packages.call_args_list) + self.cloud.distro.install_packages.call_args_list) @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) def test_puppet_config_installs_puppet_aio(self, m_subp, m_aio, _): """Cloud-config with 'puppet' key installs when 'install_type' is 'aio'.""" - mycloud = self._get_cloud('ubuntu') - mycloud.distro = mock.MagicMock() + + self.cloud.distro = mock.MagicMock() cfg = {'puppet': {'install': True, 'install_type': 'aio'}} - cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) m_aio.assert_called_with( cc_puppet.AIO_INSTALL_URL, None, None, True) @@ -134,11 +127,11 @@ class TestPuppetHandle(CiTestCase): m_subp, m_aio, _): """Cloud-config with 'puppet' key installs when 'install_type' is 'aio' and 'version' is specified.""" - mycloud = self._get_cloud('ubuntu') - mycloud.distro = mock.MagicMock() + + self.cloud.distro = mock.MagicMock() cfg = {'puppet': {'install': True, 'version': '6.24.0', 'install_type': 'aio'}} - cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) m_aio.assert_called_with( cc_puppet.AIO_INSTALL_URL, '6.24.0', None, True) @@ -150,11 +143,11 @@ class TestPuppetHandle(CiTestCase): m_aio, _): """Cloud-config with 'puppet' key installs when 'install_type' is 'aio' and 'collection' is specified.""" - mycloud = self._get_cloud('ubuntu') - mycloud.distro = mock.MagicMock() + + self.cloud.distro = mock.MagicMock() cfg = {'puppet': {'install': True, 'collection': 'puppet6', 'install_type': 'aio'}} - cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) m_aio.assert_called_with( cc_puppet.AIO_INSTALL_URL, None, 'puppet6', True) @@ -166,13 +159,13 @@ class TestPuppetHandle(CiTestCase): m_aio, _): """Cloud-config with 'puppet' key installs when 'install_type' is 'aio' and 'aio_install_url' is specified.""" - mycloud = self._get_cloud('ubuntu') - mycloud.distro = mock.MagicMock() + + self.cloud.distro = mock.MagicMock() cfg = {'puppet': {'install': True, 'aio_install_url': 'http://test.url/path/to/script.sh', 'install_type': 'aio'}} - cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) m_aio.assert_called_with( 'http://test.url/path/to/script.sh', None, None, True) @@ -183,11 +176,11 @@ class TestPuppetHandle(CiTestCase): m_aio, _): """Cloud-config with 'puppet' key installs when 'install_type' is 'aio' and no cleanup.""" - mycloud = self._get_cloud('ubuntu') - mycloud.distro = mock.MagicMock() + + self.cloud.distro = mock.MagicMock() cfg = {'puppet': {'install': True, 'cleanup': False, 'install_type': 'aio'}} - cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) m_aio.assert_called_with( cc_puppet.AIO_INSTALL_URL, None, None, False) @@ -195,13 +188,13 @@ class TestPuppetHandle(CiTestCase): @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) def test_puppet_config_installs_puppet_version(self, m_subp, _): """Cloud-config 'puppet' configuration can specify a version.""" - mycloud = self._get_cloud('ubuntu') - mycloud.distro = mock.MagicMock() + + self.cloud.distro = mock.MagicMock() cfg = {'puppet': {'version': '3.8'}} - cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) self.assertEqual( [mock.call(('puppet', '3.8'))], - mycloud.distro.install_packages.call_args_list) + self.cloud.distro.install_packages.call_args_list) @mock.patch('cloudinit.config.cc_puppet.get_config_value') @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) @@ -213,14 +206,14 @@ class TestPuppetHandle(CiTestCase): return self.conf m_default.side_effect = _fake_get_config_value - mycloud = self._get_cloud('ubuntu') + cfg = { 'puppet': { 'conf': {'agent': {'server': 'puppetserver.example.org'}}}} util.write_file( self.conf, '[agent]\nserver = origpuppet\nother = 3') - mycloud.distro = mock.MagicMock() - cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + self.cloud.distro = mock.MagicMock() + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) content = util.load_file(self.conf) expected = '[agent]\nserver = puppetserver.example.org\nother = 3\n\n' self.assertEqual(expected, content) @@ -236,8 +229,8 @@ class TestPuppetHandle(CiTestCase): return self.csr_attributes_path m_default.side_effect = _fake_get_config_value - mycloud = self._get_cloud('ubuntu') - mycloud.distro = mock.MagicMock() + + self.cloud.distro = mock.MagicMock() cfg = { 'puppet': { 'csr_attributes': { @@ -254,7 +247,7 @@ class TestPuppetHandle(CiTestCase): } } } - cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) content = util.load_file(self.csr_attributes_path) expected = textwrap.dedent("""\ custom_attributes: @@ -269,9 +262,9 @@ class TestPuppetHandle(CiTestCase): @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) def test_puppet_runs_puppet_if_requested(self, m_subp, m_auto): """Run puppet with default args if 'exec' is set to True.""" - mycloud = self._get_cloud('ubuntu') + cfg = {'puppet': {'exec': True}} - cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) self.assertEqual(1, m_auto.call_count) self.assertIn( [mock.call(['puppet', 'agent', '--test'], capture=False)], @@ -280,9 +273,9 @@ class TestPuppetHandle(CiTestCase): @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) def test_puppet_starts_puppetd(self, m_subp, m_auto): """Run puppet with default args if 'exec' is set to True.""" - mycloud = self._get_cloud('ubuntu') + cfg = {'puppet': {}} - cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) self.assertEqual(1, m_auto.call_count) self.assertIn( [mock.call(['service', 'puppet', 'start'], capture=False)], @@ -291,9 +284,9 @@ class TestPuppetHandle(CiTestCase): @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) def test_puppet_skips_puppetd(self, m_subp, m_auto): """Run puppet with default args if 'exec' is set to True.""" - mycloud = self._get_cloud('ubuntu') + cfg = {'puppet': {'start_service': False}} - cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) self.assertEqual(0, m_auto.call_count) self.assertNotIn( [mock.call(['service', 'puppet', 'start'], capture=False)], @@ -303,10 +296,10 @@ class TestPuppetHandle(CiTestCase): def test_puppet_runs_puppet_with_args_list_if_requested(self, m_subp, m_auto): """Run puppet with 'exec_args' list if 'exec' is set to True.""" - mycloud = self._get_cloud('ubuntu') + cfg = {'puppet': {'exec': True, 'exec_args': [ '--onetime', '--detailed-exitcodes']}} - cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) self.assertEqual(1, m_auto.call_count) self.assertIn( [mock.call( @@ -318,10 +311,10 @@ class TestPuppetHandle(CiTestCase): def test_puppet_runs_puppet_with_args_string_if_requested(self, m_subp, m_auto): """Run puppet with 'exec_args' string if 'exec' is set to True.""" - mycloud = self._get_cloud('ubuntu') + cfg = {'puppet': {'exec': True, 'exec_args': '--onetime --detailed-exitcodes'}} - cc_puppet.handle('notimportant', cfg, mycloud, LOG, None) + cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) self.assertEqual(1, m_auto.call_count) self.assertIn( [mock.call( diff --git a/tests/unittests/test_handler/test_handler_runcmd.py b/tests/unittests/test_handler/test_handler_runcmd.py index c03efa67..672e8093 100644 --- a/tests/unittests/test_handler/test_handler_runcmd.py +++ b/tests/unittests/test_handler/test_handler_runcmd.py @@ -1,16 +1,16 @@ # This file is part of cloud-init. See LICENSE file for license information. +import logging +import os +import stat +from unittest.mock import patch from cloudinit.config.cc_runcmd import handle, schema -from cloudinit.sources import DataSourceNone -from cloudinit import (distros, helpers, cloud, subp, util) +from cloudinit import (helpers, subp, util) from cloudinit.tests.helpers import ( CiTestCase, FilesystemMockingTestCase, SchemaTestCaseMixin, skipUnlessJsonSchema) -from unittest.mock import patch -import logging -import os -import stat +from tests.unittests.util import get_cloud LOG = logging.getLogger(__name__) @@ -23,20 +23,13 @@ class TestRuncmd(FilesystemMockingTestCase): super(TestRuncmd, self).setUp() self.subp = subp.subp self.new_root = self.tmp_dir() - - def _get_cloud(self, distro): self.patchUtils(self.new_root) - paths = helpers.Paths({'scripts': self.new_root}) - cls = distros.fetch(distro) - mydist = cls(distro, {}, paths) - myds = DataSourceNone.DataSourceNone({}, mydist, paths) - paths.datasource = myds - return cloud.Cloud(myds, paths, {}, mydist, None) + self.paths = helpers.Paths({'scripts': self.new_root}) def test_handler_skip_if_no_runcmd(self): """When the provided config doesn't contain runcmd, skip it.""" cfg = {} - mycloud = self._get_cloud('ubuntu') + mycloud = get_cloud(paths=self.paths) handle('notimportant', cfg, mycloud, LOG, None) self.assertIn( "Skipping module named notimportant, no 'runcmd' key", @@ -47,7 +40,7 @@ class TestRuncmd(FilesystemMockingTestCase): """When shellify fails throw exception""" cls.side_effect = TypeError("patched shellify") valid_config = {'runcmd': ['echo 42']} - cc = self._get_cloud('ubuntu') + cc = get_cloud(paths=self.paths) with self.assertRaises(TypeError) as cm: with self.allow_subp(['/bin/sh']): handle('cc_runcmd', valid_config, cc, LOG, None) @@ -56,7 +49,7 @@ class TestRuncmd(FilesystemMockingTestCase): def test_handler_invalid_command_set(self): """Commands which can't be converted to shell will raise errors.""" invalid_config = {'runcmd': 1} - cc = self._get_cloud('ubuntu') + cc = get_cloud(paths=self.paths) with self.assertRaises(TypeError) as cm: handle('cc_runcmd', invalid_config, cc, LOG, []) self.assertIn( @@ -72,7 +65,7 @@ class TestRuncmd(FilesystemMockingTestCase): invalid content. """ invalid_config = {'runcmd': 1} - cc = self._get_cloud('ubuntu') + cc = get_cloud(paths=self.paths) with self.assertRaises(TypeError) as cm: handle('cc_runcmd', invalid_config, cc, LOG, []) self.assertIn( @@ -89,7 +82,7 @@ class TestRuncmd(FilesystemMockingTestCase): """ invalid_config = { 'runcmd': ['ls /', 20, ['wget', 'http://stuff/blah'], {'a': 'n'}]} - cc = self._get_cloud('ubuntu') + cc = get_cloud(paths=self.paths) with self.assertRaises(TypeError) as cm: handle('cc_runcmd', invalid_config, cc, LOG, []) expected_warnings = [ @@ -105,7 +98,7 @@ class TestRuncmd(FilesystemMockingTestCase): def test_handler_write_valid_runcmd_schema_to_file(self): """Valid runcmd schema is written to a runcmd shell script.""" valid_config = {'runcmd': [['ls', '/']]} - cc = self._get_cloud('ubuntu') + cc = get_cloud(paths=self.paths) handle('cc_runcmd', valid_config, cc, LOG, []) runcmd_file = os.path.join( self.new_root, diff --git a/tests/unittests/test_handler/test_handler_seed_random.py b/tests/unittests/test_handler/test_handler_seed_random.py index 85167f19..2ab153d2 100644 --- a/tests/unittests/test_handler/test_handler_seed_random.py +++ b/tests/unittests/test_handler/test_handler_seed_random.py @@ -7,24 +7,17 @@ # Based on test_handler_set_hostname.py # # This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.config import cc_seed_random - import gzip +import logging import tempfile from io import BytesIO -from cloudinit import cloud -from cloudinit import distros -from cloudinit import helpers from cloudinit import subp from cloudinit import util - -from cloudinit.sources import DataSourceNone - +from cloudinit.config import cc_seed_random from cloudinit.tests import helpers as t_help -import logging +from tests.unittests.util import get_cloud LOG = logging.getLogger(__name__) @@ -66,15 +59,6 @@ class TestRandomSeed(t_help.TestCase): gz_fh.close() return contents.getvalue() - def _get_cloud(self, distro, metadata=None): - paths = helpers.Paths({}) - cls = distros.fetch(distro) - ubuntu_distro = cls(distro, {}, paths) - ds = DataSourceNone.DataSourceNone({}, ubuntu_distro, paths) - if metadata: - ds.metadata = metadata - return cloud.Cloud(ds, paths, {}, ubuntu_distro, None) - def test_append_random(self): cfg = { 'random_seed': { @@ -82,7 +66,7 @@ class TestRandomSeed(t_help.TestCase): 'data': 'tiny-tim-was-here', } } - cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, []) + cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, []) contents = util.load_file(self._seed_file) self.assertEqual("tiny-tim-was-here", contents) @@ -96,7 +80,7 @@ class TestRandomSeed(t_help.TestCase): } } self.assertRaises(IOError, cc_seed_random.handle, 'test', cfg, - self._get_cloud('ubuntu'), LOG, []) + get_cloud('ubuntu'), LOG, []) def test_append_random_gzip(self): data = self._compress(b"tiny-toe") @@ -107,7 +91,7 @@ class TestRandomSeed(t_help.TestCase): 'encoding': 'gzip', } } - cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, []) + cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, []) contents = util.load_file(self._seed_file) self.assertEqual("tiny-toe", contents) @@ -120,7 +104,7 @@ class TestRandomSeed(t_help.TestCase): 'encoding': 'gz', } } - cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, []) + cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, []) contents = util.load_file(self._seed_file) self.assertEqual("big-toe", contents) @@ -133,7 +117,7 @@ class TestRandomSeed(t_help.TestCase): 'encoding': 'base64', } } - cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, []) + cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, []) contents = util.load_file(self._seed_file) self.assertEqual("bubbles", contents) @@ -146,7 +130,7 @@ class TestRandomSeed(t_help.TestCase): 'encoding': 'b64', } } - cc_seed_random.handle('test', cfg, self._get_cloud('ubuntu'), LOG, []) + cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, []) contents = util.load_file(self._seed_file) self.assertEqual("kit-kat", contents) @@ -157,13 +141,13 @@ class TestRandomSeed(t_help.TestCase): 'data': 'tiny-tim-was-here', } } - c = self._get_cloud('ubuntu', {'random_seed': '-so-was-josh'}) + c = get_cloud('ubuntu', metadata={'random_seed': '-so-was-josh'}) cc_seed_random.handle('test', cfg, c, LOG, []) contents = util.load_file(self._seed_file) self.assertEqual('tiny-tim-was-here-so-was-josh', contents) def test_seed_command_provided_and_available(self): - c = self._get_cloud('ubuntu', {}) + c = get_cloud('ubuntu') self.whichdata = {'pollinate': '/usr/bin/pollinate'} cfg = {'random_seed': {'command': ['pollinate', '-q']}} cc_seed_random.handle('test', cfg, c, LOG, []) @@ -172,7 +156,7 @@ class TestRandomSeed(t_help.TestCase): self.assertIn(['pollinate', '-q'], subp_args) def test_seed_command_not_provided(self): - c = self._get_cloud('ubuntu', {}) + c = get_cloud('ubuntu') self.whichdata = {} cc_seed_random.handle('test', {}, c, LOG, []) @@ -180,7 +164,7 @@ class TestRandomSeed(t_help.TestCase): self.assertFalse(self.subp_called) def test_unavailable_seed_command_and_required_raises_error(self): - c = self._get_cloud('ubuntu', {}) + c = get_cloud('ubuntu') self.whichdata = {} cfg = {'random_seed': {'command': ['THIS_NO_COMMAND'], 'command_required': True}} @@ -188,7 +172,7 @@ class TestRandomSeed(t_help.TestCase): 'test', cfg, c, LOG, []) def test_seed_command_and_required(self): - c = self._get_cloud('ubuntu', {}) + c = get_cloud('ubuntu') self.whichdata = {'foo': 'foo'} cfg = {'random_seed': {'command_required': True, 'command': ['foo']}} cc_seed_random.handle('test', cfg, c, LOG, []) @@ -196,7 +180,7 @@ class TestRandomSeed(t_help.TestCase): self.assertIn(['foo'], [f['args'] for f in self.subp_called]) def test_file_in_environment_for_command(self): - c = self._get_cloud('ubuntu', {}) + c = get_cloud('ubuntu') self.whichdata = {'foo': 'foo'} cfg = {'random_seed': {'command_required': True, 'command': ['foo'], 'file': self._seed_file}} diff --git a/tests/unittests/test_handler/test_handler_timezone.py b/tests/unittests/test_handler/test_handler_timezone.py index 50c45363..77cdb0c2 100644 --- a/tests/unittests/test_handler/test_handler_timezone.py +++ b/tests/unittests/test_handler/test_handler_timezone.py @@ -6,21 +6,19 @@ from cloudinit.config import cc_timezone -from cloudinit import cloud -from cloudinit import distros -from cloudinit import helpers from cloudinit import util -from cloudinit.sources import DataSourceNoCloud -from cloudinit.tests import helpers as t_help - -from configobj import ConfigObj import logging import shutil import tempfile +from configobj import ConfigObj from io import BytesIO +from cloudinit.tests import helpers as t_help + +from tests.unittests.util import get_cloud + LOG = logging.getLogger(__name__) @@ -29,25 +27,15 @@ class TestTimezone(t_help.FilesystemMockingTestCase): super(TestTimezone, self).setUp() self.new_root = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, self.new_root) - - def _get_cloud(self, distro): self.patchUtils(self.new_root) self.patchOS(self.new_root) - paths = helpers.Paths({}) - - cls = distros.fetch(distro) - d = cls(distro, {}, paths) - ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths) - cc = cloud.Cloud(ds, paths, {}, d, None) - return cc - def test_set_timezone_sles(self): cfg = { 'timezone': 'Tatooine/Bestine', } - cc = self._get_cloud('sles') + cc = get_cloud('sles') # Create a dummy timezone file dummy_contents = '0123456789abcdefgh' diff --git a/tests/unittests/util.py b/tests/unittests/util.py new file mode 100644 index 00000000..383f5f5c --- /dev/null +++ b/tests/unittests/util.py @@ -0,0 +1,143 @@ +# This file is part of cloud-init. See LICENSE file for license information. +from cloudinit import cloud, distros, helpers +from cloudinit.sources.DataSourceNone import DataSourceNone + + +def get_cloud(distro=None, paths=None, sys_cfg=None, metadata=None): + """Obtain a "cloud" that can be used for testing. + + Modules take a 'cloud' parameter to call into things that are + datasource/distro specific. In most cases, the specifics of this cloud + implementation aren't needed to test the module, so provide a fake + datasource/distro with stubbed calls to methods that may attempt to + read/write files or shell out. If a specific distro is needed, it can + be passed in as the distro parameter. + """ + paths = paths or helpers.Paths({}) + sys_cfg = sys_cfg or {} + cls = distros.fetch(distro) if distro else TestingDistro + mydist = cls(distro, sys_cfg, paths) + myds = DataSourceTesting(sys_cfg, mydist, paths) + if metadata: + myds.metadata.update(metadata) + if paths: + paths.datasource = myds + return cloud.Cloud(myds, paths, sys_cfg, mydist, None) + + +def abstract_to_concrete(abclass): + """Takes an abstract class and returns a concrete version of it.""" + class concreteCls(abclass): + pass + concreteCls.__abstractmethods__ = frozenset() + return type('DummyConcrete' + abclass.__name__, (concreteCls,), {}) + + +class DataSourceTesting(DataSourceNone): + def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): + return 'hostname' + + def persist_instance_data(self): + return True + + @property + def fallback_interface(self): + return None + + @property + def cloud_name(self): + return 'testing' + + +class TestingDistro(distros.Distro): + # TestingDistro is here to test base Distro class implementations + def __init__(self, name="testingdistro", cfg=None, paths=None): + if not cfg: + cfg = {} + if not paths: + paths = {} + super(TestingDistro, self).__init__(name, cfg, paths) + + def install_packages(self, pkglist): + pass + + def set_hostname(self, hostname, fqdn=None): + pass + + def uses_systemd(self): + return True + + def get_primary_arch(self): + return 'i386' + + def get_package_mirror_info(self, arch=None, data_source=None): + pass + + def apply_network(self, settings, bring_up=True): + return False + + def generate_fallback_config(self): + return {} + + def apply_network_config(self, netconfig, bring_up=False) -> bool: + return False + + def apply_network_config_names(self, netconfig): + pass + + def apply_locale(self, locale, out_fn=None): + pass + + def set_timezone(self, tz): + pass + + def _read_hostname(self, filename, default=None): + raise NotImplementedError() + + def _write_hostname(self, hostname, filename): + raise NotImplementedError() + + def _read_system_hostname(self): + raise NotImplementedError() + + def update_hostname(self, hostname, fqdn, prev_hostname_fn): + pass + + def update_etc_hosts(self, hostname, fqdn): + pass + + def add_user(self, name, **kwargs): + pass + + def add_snap_user(self, name, **kwargs): + return 'snap_user' + + def create_user(self, name, **kwargs): + return True + + def lock_passwd(self, name): + pass + + def expire_passwd(self, user): + pass + + def set_passwd(self, user, passwd, hashed=False): + return True + + def ensure_sudo_dir(self, path, sudo_base='/etc/sudoers'): + pass + + def write_sudo_rules(self, user, rules, sudo_file=None): + pass + + def create_group(self, name, members=None): + pass + + def shutdown_command(self, *, mode, delay, message): + pass + + def package_command(self, command, args=None, pkgs=None): + pass + + def update_package_sources(self): + return (True, "yay") -- cgit v1.2.3 From 62c2a56e053dd70638ef3421ffbfbc0b81356691 Mon Sep 17 00:00:00 2001 From: Olivier Lemasle Date: Mon, 18 Oct 2021 18:26:48 +0200 Subject: CloudStack: fix data-server DNS resolution (#1004) CloudStack DNS resolution should be done against the DNS search domain (with the final dot, DNS resolution does not work with e.g. Fedora 34) LP: #1942232 --- cloudinit/sources/DataSourceCloudStack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 54810439..8cb0d5a7 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -160,7 +160,7 @@ class DataSourceCloudStack(sources.DataSource): def get_data_server(): # Returns the metadataserver from dns try: - addrinfo = getaddrinfo("data-server.", 80) + addrinfo = getaddrinfo("data-server", 80) except gaierror: LOG.debug("DNS Entry data-server not found") return None -- cgit v1.2.3 From a0a68a24c34ee268962e7a3c3844c59ab4036bf9 Mon Sep 17 00:00:00 2001 From: Thomas Weißschuh Date: Tue, 19 Oct 2021 02:09:40 +0200 Subject: VMware: read network-config from ISO (#1066) There is no reason for the ISO missing this functionality. As discussed in https://github.com/canonical/cloud-init/pull/947/files#r707338489 --- cloudinit/sources/DataSourceOVF.py | 3 +-- doc/sources/ovf/example/ovf-env.xml | 2 +- tests/unittests/test_datasource/test_ovf.py | 10 ++++++++-- tools/.github-cla-signers | 1 + 4 files changed, 11 insertions(+), 5 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 08a205f1..5257a534 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -360,8 +360,7 @@ class DataSourceOVF(sources.DataSource): if contents: break if contents: - read_network = ('com.vmware.guestInfo' == name) - (md, ud, cfg) = read_ovf_environment(contents, read_network) + (md, ud, cfg) = read_ovf_environment(contents, True) self.environment = contents if 'network-config' in md and md['network-config']: self._network_config = md['network-config'] diff --git a/doc/sources/ovf/example/ovf-env.xml b/doc/sources/ovf/example/ovf-env.xml index 4ef4ee63..e5f4e262 100644 --- a/doc/sources/ovf/example/ovf-env.xml +++ b/doc/sources/ovf/example/ovf-env.xml @@ -42,7 +42,7 @@ + + ESX Server + 3.0.1 + VMware, Inc. + en_US + + + +{properties} + + +""" + + +def fill_properties(props, template=OVF_ENV_CONTENT): + lines = [] + prop_tmpl = '' + for key, val in props.items(): + lines.append(prop_tmpl.format(key=key, val=val)) + indent = " " + properties = ''.join([indent + line + "\n" for line in lines]) + return template.format(properties=properties) + + +class TestReadOvfEnv(CiTestCase): + def test_with_b64_userdata(self): + user_data = "#!/bin/sh\necho hello world\n" + user_data_b64 = base64.b64encode(user_data.encode()).decode() + props = {"user-data": user_data_b64, "password": "passw0rd", + "instance-id": "inst-001"} + env = fill_properties(props) + md, ud, cfg = dsovf.read_ovf_environment(env) + self.assertEqual({"instance-id": "inst-001"}, md) + self.assertEqual(user_data.encode(), ud) + self.assertEqual({'password': "passw0rd"}, cfg) + + def test_with_non_b64_userdata(self): + user_data = "my-user-data" + props = {"user-data": user_data, "instance-id": "inst-001"} + env = fill_properties(props) + md, ud, cfg = dsovf.read_ovf_environment(env) + self.assertEqual({"instance-id": "inst-001"}, md) + self.assertEqual(user_data.encode(), ud) + self.assertEqual({}, cfg) + + def test_with_no_userdata(self): + props = {"password": "passw0rd", "instance-id": "inst-001"} + env = fill_properties(props) + md, ud, cfg = dsovf.read_ovf_environment(env) + self.assertEqual({"instance-id": "inst-001"}, md) + self.assertEqual({'password': "passw0rd"}, cfg) + self.assertIsNone(ud) + + def test_with_b64_network_config_enable_read_network(self): + network_config = dedent("""\ + network: + version: 2 + ethernets: + nics: + nameservers: + addresses: + - 127.0.0.53 + search: + - eng.vmware.com + - vmware.com + match: + name: eth* + gateway4: 10.10.10.253 + dhcp4: false + addresses: + - 10.10.10.1/24 + """) + network_config_b64 = base64.b64encode(network_config.encode()).decode() + props = {"network-config": network_config_b64, + "password": "passw0rd", + "instance-id": "inst-001"} + env = fill_properties(props) + md, ud, cfg = dsovf.read_ovf_environment(env, True) + self.assertEqual("inst-001", md["instance-id"]) + self.assertEqual({'password': "passw0rd"}, cfg) + self.assertEqual( + {'version': 2, 'ethernets': + {'nics': + {'nameservers': + {'addresses': ['127.0.0.53'], + 'search': ['eng.vmware.com', 'vmware.com']}, + 'match': {'name': 'eth*'}, + 'gateway4': '10.10.10.253', + 'dhcp4': False, + 'addresses': ['10.10.10.1/24']}}}, + md["network-config"]) + self.assertIsNone(ud) + + def test_with_non_b64_network_config_enable_read_network(self): + network_config = dedent("""\ + network: + version: 2 + ethernets: + nics: + nameservers: + addresses: + - 127.0.0.53 + search: + - eng.vmware.com + - vmware.com + match: + name: eth* + gateway4: 10.10.10.253 + dhcp4: false + addresses: + - 10.10.10.1/24 + """) + props = {"network-config": network_config, + "password": "passw0rd", + "instance-id": "inst-001"} + env = fill_properties(props) + md, ud, cfg = dsovf.read_ovf_environment(env, True) + self.assertEqual({"instance-id": "inst-001"}, md) + self.assertEqual({'password': "passw0rd"}, cfg) + self.assertIsNone(ud) + + def test_with_b64_network_config_disable_read_network(self): + network_config = dedent("""\ + network: + version: 2 + ethernets: + nics: + nameservers: + addresses: + - 127.0.0.53 + search: + - eng.vmware.com + - vmware.com + match: + name: eth* + gateway4: 10.10.10.253 + dhcp4: false + addresses: + - 10.10.10.1/24 + """) + network_config_b64 = base64.b64encode(network_config.encode()).decode() + props = {"network-config": network_config_b64, + "password": "passw0rd", + "instance-id": "inst-001"} + env = fill_properties(props) + md, ud, cfg = dsovf.read_ovf_environment(env) + self.assertEqual({"instance-id": "inst-001"}, md) + self.assertEqual({'password': "passw0rd"}, cfg) + self.assertIsNone(ud) + + +class TestMarkerFiles(CiTestCase): + + def setUp(self): + super(TestMarkerFiles, self).setUp() + self.tdir = self.tmp_dir() + + def test_false_when_markerid_none(self): + """Return False when markerid provided is None.""" + self.assertFalse( + dsovf.check_marker_exists(markerid=None, marker_dir=self.tdir)) + + def test_markerid_file_exist(self): + """Return False when markerid file path does not exist, + True otherwise.""" + self.assertFalse( + dsovf.check_marker_exists('123', self.tdir)) + + marker_file = self.tmp_path('.markerfile-123.txt', self.tdir) + util.write_file(marker_file, '') + self.assertTrue( + dsovf.check_marker_exists('123', self.tdir) + ) + + def test_marker_file_setup(self): + """Test creation of marker files.""" + markerfilepath = self.tmp_path('.markerfile-hi.txt', self.tdir) + self.assertFalse(os.path.exists(markerfilepath)) + dsovf.setup_marker_files(markerid='hi', marker_dir=self.tdir) + self.assertTrue(os.path.exists(markerfilepath)) + + +class TestDatasourceOVF(CiTestCase): + + with_logs = True + + def setUp(self): + super(TestDatasourceOVF, self).setUp() + self.datasource = dsovf.DataSourceOVF + self.tdir = self.tmp_dir() + + def test_get_data_false_on_none_dmi_data(self): + """When dmi for system-product-name is None, get_data returns False.""" + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource(sys_cfg={}, distro={}, paths=paths) + retcode = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': None, + 'transport_iso9660': NOT_FOUND, + 'transport_vmware_guestinfo': NOT_FOUND}, + ds.get_data) + self.assertFalse(retcode, 'Expected False return from ds.get_data') + self.assertIn( + 'DEBUG: No system-product-name found', self.logs.getvalue()) + + def test_get_data_vmware_customization_disabled(self): + """When vmware customization is disabled via sys_cfg and + allow_raw_data is disabled via ds_cfg, log a message. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True, + 'datasource': {'OVF': {'allow_raw_data': False}}}, + distro={}, paths=paths) + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [MISC] + MARKER-ID = 12345345 + """) + util.write_file(conf_file, conf_content) + retcode = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'transport_iso9660': NOT_FOUND, + 'transport_vmware_guestinfo': NOT_FOUND, + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file}, + ds.get_data) + self.assertFalse(retcode, 'Expected False return from ds.get_data') + self.assertIn( + 'DEBUG: Customization for VMware platform is disabled.', + self.logs.getvalue()) + + def test_get_data_vmware_customization_sys_cfg_disabled(self): + """When vmware customization is disabled via sys_cfg and + no meta data is found, log a message. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True, + 'datasource': {'OVF': {'allow_raw_data': True}}}, + distro={}, paths=paths) + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [MISC] + MARKER-ID = 12345345 + """) + util.write_file(conf_file, conf_content) + retcode = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'transport_iso9660': NOT_FOUND, + 'transport_vmware_guestinfo': NOT_FOUND, + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file}, + ds.get_data) + self.assertFalse(retcode, 'Expected False return from ds.get_data') + self.assertIn( + 'DEBUG: Customization using VMware config is disabled.', + self.logs.getvalue()) + + def test_get_data_allow_raw_data_disabled(self): + """When allow_raw_data is disabled via ds_cfg and + meta data is found, log a message. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': False, + 'datasource': {'OVF': {'allow_raw_data': False}}}, + distro={}, paths=paths) + + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + """) + util.write_file(conf_file, conf_content) + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + util.write_file(metadata_file, "This is meta data") + retcode = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'transport_iso9660': NOT_FOUND, + 'transport_vmware_guestinfo': NOT_FOUND, + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'collect_imc_file_paths': [self.tdir + '/test-meta', '', '']}, + ds.get_data) + self.assertFalse(retcode, 'Expected False return from ds.get_data') + self.assertIn( + 'DEBUG: Customization using raw data is disabled.', + self.logs.getvalue()) + + def test_get_data_vmware_customization_enabled(self): + """When cloud-init workflow for vmware is enabled via sys_cfg log a + message. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': False}, distro={}, + paths=paths) + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CUSTOM-SCRIPT] + SCRIPT-NAME = test-script + [MISC] + MARKER-ID = 12345345 + """) + util.write_file(conf_file, conf_content) + with mock.patch(MPATH + 'get_tools_config', return_value='true'): + with self.assertRaises(CustomScriptNotFound) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + customscript = self.tmp_path('test-script', self.tdir) + self.assertIn('Script %s not found!!' % customscript, + str(context.exception)) + + def test_get_data_cust_script_disabled(self): + """If custom script is disabled by VMware tools configuration, + raise a RuntimeError. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': False}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CUSTOM-SCRIPT] + SCRIPT-NAME = test-script + [MISC] + MARKER-ID = 12345346 + """) + util.write_file(conf_file, conf_content) + # Prepare the custom sript + customscript = self.tmp_path('test-script', self.tdir) + util.write_file(customscript, "This is the post cust script") + + with mock.patch(MPATH + 'get_tools_config', return_value='invalid'): + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(RuntimeError) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + self.assertIn('Custom script is disabled by VM Administrator', + str(context.exception)) + + def test_get_data_cust_script_enabled(self): + """If custom script is enabled by VMware tools configuration, + execute the script. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': False}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CUSTOM-SCRIPT] + SCRIPT-NAME = test-script + [MISC] + MARKER-ID = 12345346 + """) + util.write_file(conf_file, conf_content) + + # Mock custom script is enabled by return true when calling + # get_tools_config + with mock.patch(MPATH + 'get_tools_config', return_value="true"): + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(CustomScriptNotFound) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + # Verify custom script is trying to be executed + customscript = self.tmp_path('test-script', self.tdir) + self.assertIn('Script %s not found!!' % customscript, + str(context.exception)) + + def test_get_data_force_run_post_script_is_yes(self): + """If DEFAULT-RUN-POST-CUST-SCRIPT is yes, custom script could run if + enable-custom-scripts is not defined in VM Tools configuration + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': False}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + # set DEFAULT-RUN-POST-CUST-SCRIPT = yes so that enable-custom-scripts + # default value is TRUE + conf_content = dedent("""\ + [CUSTOM-SCRIPT] + SCRIPT-NAME = test-script + [MISC] + MARKER-ID = 12345346 + DEFAULT-RUN-POST-CUST-SCRIPT = yes + """) + util.write_file(conf_file, conf_content) + + # Mock get_tools_config(section, key, defaultVal) to return + # defaultVal + def my_get_tools_config(*args, **kwargs): + return args[2] + + with mock.patch(MPATH + 'get_tools_config', + side_effect=my_get_tools_config): + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(CustomScriptNotFound) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + # Verify custom script still runs although it is + # disabled by VMware Tools + customscript = self.tmp_path('test-script', self.tdir) + self.assertIn('Script %s not found!!' % customscript, + str(context.exception)) + + def test_get_data_non_vmware_seed_platform_info(self): + """Platform info properly reports when on non-vmware platforms.""" + paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir}) + # Write ovf-env.xml seed file + seed_dir = self.tmp_path('seed', dir=self.tdir) + ovf_env = self.tmp_path('ovf-env.xml', dir=seed_dir) + util.write_file(ovf_env, OVF_ENV_CONTENT) + ds = self.datasource(sys_cfg={}, distro={}, paths=paths) + + self.assertEqual('ovf', ds.cloud_name) + self.assertEqual('ovf', ds.platform_type) + with mock.patch(MPATH + 'dmi.read_dmi_data', return_value='!VMware'): + with mock.patch(MPATH + 'transport_vmware_guestinfo') as m_guestd: + with mock.patch(MPATH + 'transport_iso9660') as m_iso9660: + m_iso9660.return_value = NOT_FOUND + m_guestd.return_value = NOT_FOUND + self.assertTrue(ds.get_data()) + self.assertEqual( + 'ovf (%s/seed/ovf-env.xml)' % self.tdir, + ds.subplatform) + + def test_get_data_vmware_seed_platform_info(self): + """Platform info properly reports when on VMware platform.""" + paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir}) + # Write ovf-env.xml seed file + seed_dir = self.tmp_path('seed', dir=self.tdir) + ovf_env = self.tmp_path('ovf-env.xml', dir=seed_dir) + util.write_file(ovf_env, OVF_ENV_CONTENT) + ds = self.datasource(sys_cfg={}, distro={}, paths=paths) + + self.assertEqual('ovf', ds.cloud_name) + self.assertEqual('ovf', ds.platform_type) + with mock.patch(MPATH + 'dmi.read_dmi_data', return_value='VMWare'): + with mock.patch(MPATH + 'transport_vmware_guestinfo') as m_guestd: + with mock.patch(MPATH + 'transport_iso9660') as m_iso9660: + m_iso9660.return_value = NOT_FOUND + m_guestd.return_value = NOT_FOUND + self.assertTrue(ds.get_data()) + self.assertEqual( + 'vmware (%s/seed/ovf-env.xml)' % self.tdir, + ds.subplatform) + + @mock.patch('cloudinit.subp.subp') + @mock.patch('cloudinit.sources.DataSource.persist_instance_data') + def test_get_data_vmware_guestinfo_with_network_config( + self, m_persist, m_subp + ): + self._test_get_data_with_network_config(guestinfo=False, iso=True) + + @mock.patch('cloudinit.subp.subp') + @mock.patch('cloudinit.sources.DataSource.persist_instance_data') + def test_get_data_iso9660_with_network_config(self, m_persist, m_subp): + self._test_get_data_with_network_config(guestinfo=True, iso=False) + + def _test_get_data_with_network_config(self, guestinfo, iso): + network_config = dedent("""\ + network: + version: 2 + ethernets: + nics: + nameservers: + addresses: + - 127.0.0.53 + search: + - vmware.com + match: + name: eth* + gateway4: 10.10.10.253 + dhcp4: false + addresses: + - 10.10.10.1/24 + """) + network_config_b64 = base64.b64encode(network_config.encode()).decode() + props = {"network-config": network_config_b64, + "password": "passw0rd", + "instance-id": "inst-001"} + env = fill_properties(props) + paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir}) + ds = self.datasource(sys_cfg={}, distro={}, paths=paths) + with mock.patch(MPATH + 'transport_vmware_guestinfo', + return_value=env if guestinfo else NOT_FOUND): + with mock.patch(MPATH + 'transport_iso9660', + return_value=env if iso else NOT_FOUND): + self.assertTrue(ds.get_data()) + self.assertEqual('inst-001', ds.metadata['instance-id']) + self.assertEqual( + {'version': 2, 'ethernets': + {'nics': + {'nameservers': + {'addresses': ['127.0.0.53'], + 'search': ['vmware.com']}, + 'match': {'name': 'eth*'}, + 'gateway4': '10.10.10.253', + 'dhcp4': False, + 'addresses': ['10.10.10.1/24']}}}, + ds.network_config) + + def test_get_data_cloudinit_metadata_json(self): + """Test metadata can be loaded to cloud-init metadata and network. + The metadata format is json. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + """) + util.write_file(conf_file, conf_content) + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + metadata_content = dedent("""\ + { + "instance-id": "cloud-vm", + "local-hostname": "my-host.domain.com", + "network": { + "version": 2, + "ethernets": { + "eths": { + "match": { + "name": "ens*" + }, + "dhcp4": true + } + } + } + } + """) + util.write_file(metadata_file, metadata_content) + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + result = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'collect_imc_file_paths': [self.tdir + '/test-meta', '', ''], + 'get_nics_to_enable': ''}, + ds._get_data) + + self.assertTrue(result) + self.assertEqual("cloud-vm", ds.metadata['instance-id']) + self.assertEqual("my-host.domain.com", ds.metadata['local-hostname']) + self.assertEqual(2, ds.network_config['version']) + self.assertTrue(ds.network_config['ethernets']['eths']['dhcp4']) + + def test_get_data_cloudinit_metadata_yaml(self): + """Test metadata can be loaded to cloud-init metadata and network. + The metadata format is yaml. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + """) + util.write_file(conf_file, conf_content) + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + metadata_content = dedent("""\ + instance-id: cloud-vm + local-hostname: my-host.domain.com + network: + version: 2 + ethernets: + nics: + match: + name: ens* + dhcp4: yes + """) + util.write_file(metadata_file, metadata_content) + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + result = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'collect_imc_file_paths': [self.tdir + '/test-meta', '', ''], + 'get_nics_to_enable': ''}, + ds._get_data) + + self.assertTrue(result) + self.assertEqual("cloud-vm", ds.metadata['instance-id']) + self.assertEqual("my-host.domain.com", ds.metadata['local-hostname']) + self.assertEqual(2, ds.network_config['version']) + self.assertTrue(ds.network_config['ethernets']['nics']['dhcp4']) + + def test_get_data_cloudinit_metadata_not_valid(self): + """Test metadata is not JSON or YAML format. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True}, distro={}, + paths=paths) + + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + """) + util.write_file(conf_file, conf_content) + + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + metadata_content = "[This is not json or yaml format]a=b" + util.write_file(metadata_file, metadata_content) + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(YAMLError) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'collect_imc_file_paths': [ + self.tdir + '/test-meta', '', '' + ], + 'get_nics_to_enable': ''}, + ds.get_data) + + self.assertIn("expected '', but found ''", + str(context.exception)) + + def test_get_data_cloudinit_metadata_not_found(self): + """Test metadata file can't be found. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True}, distro={}, + paths=paths) + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + """) + util.write_file(conf_file, conf_content) + # Don't prepare the meta data file + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(FileNotFoundError) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + + self.assertIn('is not found', str(context.exception)) + + def test_get_data_cloudinit_userdata(self): + """Test user data can be loaded to cloud-init user data. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': False}, distro={}, + paths=paths) + + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + USERDATA = test-user + """) + util.write_file(conf_file, conf_content) + + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + metadata_content = dedent("""\ + instance-id: cloud-vm + local-hostname: my-host.domain.com + network: + version: 2 + ethernets: + nics: + match: + name: ens* + dhcp4: yes + """) + util.write_file(metadata_file, metadata_content) + + # Prepare the user data file + userdata_file = self.tmp_path('test-user', self.tdir) + userdata_content = "This is the user data" + util.write_file(userdata_file, userdata_content) + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + result = wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'collect_imc_file_paths': [self.tdir + '/test-meta', + self.tdir + '/test-user', ''], + 'get_nics_to_enable': ''}, + ds._get_data) + + self.assertTrue(result) + self.assertEqual("cloud-vm", ds.metadata['instance-id']) + self.assertEqual(userdata_content, ds.userdata_raw) + + def test_get_data_cloudinit_userdata_not_found(self): + """Test userdata file can't be found. + """ + paths = Paths({'cloud_dir': self.tdir}) + ds = self.datasource( + sys_cfg={'disable_vmware_customization': True}, distro={}, + paths=paths) + + # Prepare the conf file + conf_file = self.tmp_path('test-cust', self.tdir) + conf_content = dedent("""\ + [CLOUDINIT] + METADATA = test-meta + USERDATA = test-user + """) + util.write_file(conf_file, conf_content) + + # Prepare the meta data file + metadata_file = self.tmp_path('test-meta', self.tdir) + metadata_content = dedent("""\ + instance-id: cloud-vm + local-hostname: my-host.domain.com + network: + version: 2 + ethernets: + nics: + match: + name: ens* + dhcp4: yes + """) + util.write_file(metadata_file, metadata_content) + + # Don't prepare the user data file + + with mock.patch(MPATH + 'set_customization_status', + return_value=('msg', b'')): + with self.assertRaises(FileNotFoundError) as context: + wrap_and_call( + 'cloudinit.sources.DataSourceOVF', + {'dmi.read_dmi_data': 'vmware', + 'util.del_dir': True, + 'search_file': self.tdir, + 'wait_for_imc_cfg_file': conf_file, + 'get_nics_to_enable': ''}, + ds.get_data) + + self.assertIn('is not found', str(context.exception)) + + +class TestTransportIso9660(CiTestCase): + + def setUp(self): + super(TestTransportIso9660, self).setUp() + self.add_patch('cloudinit.util.find_devs_with', + 'm_find_devs_with') + self.add_patch('cloudinit.util.mounts', 'm_mounts') + self.add_patch('cloudinit.util.mount_cb', 'm_mount_cb') + self.add_patch('cloudinit.sources.DataSourceOVF.get_ovf_env', + 'm_get_ovf_env') + self.m_get_ovf_env.return_value = ('myfile', 'mycontent') + + def test_find_already_mounted(self): + """Check we call get_ovf_env from on matching mounted devices""" + mounts = { + '/dev/sr9': { + 'fstype': 'iso9660', + 'mountpoint': 'wark/media/sr9', + 'opts': 'ro', + } + } + self.m_mounts.return_value = mounts + + self.assertEqual("mycontent", dsovf.transport_iso9660()) + + def test_find_already_mounted_skips_non_iso9660(self): + """Check we call get_ovf_env ignoring non iso9660""" + mounts = { + '/dev/xvdb': { + 'fstype': 'vfat', + 'mountpoint': 'wark/foobar', + 'opts': 'defaults,noatime', + }, + '/dev/xvdc': { + 'fstype': 'iso9660', + 'mountpoint': 'wark/media/sr9', + 'opts': 'ro', + } + } + # We use an OrderedDict here to ensure we check xvdb before xvdc + # as we're not mocking the regex matching, however, if we place + # an entry in the results then we can be reasonably sure that + # we're skipping an entry which fails to match. + self.m_mounts.return_value = ( + OrderedDict(sorted(mounts.items(), key=lambda t: t[0]))) + + self.assertEqual("mycontent", dsovf.transport_iso9660()) + + def test_find_already_mounted_matches_kname(self): + """Check we dont regex match on basename of the device""" + mounts = { + '/dev/foo/bar/xvdc': { + 'fstype': 'iso9660', + 'mountpoint': 'wark/media/sr9', + 'opts': 'ro', + } + } + # we're skipping an entry which fails to match. + self.m_mounts.return_value = mounts + + self.assertEqual(NOT_FOUND, dsovf.transport_iso9660()) + + def test_mount_cb_called_on_blkdevs_with_iso9660(self): + """Check we call mount_cb on blockdevs with iso9660 only""" + self.m_mounts.return_value = {} + self.m_find_devs_with.return_value = ['/dev/sr0'] + self.m_mount_cb.return_value = ("myfile", "mycontent") + + self.assertEqual("mycontent", dsovf.transport_iso9660()) + self.m_mount_cb.assert_called_with( + "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660") + + def test_mount_cb_called_on_blkdevs_with_iso9660_check_regex(self): + """Check we call mount_cb on blockdevs with iso9660 and match regex""" + self.m_mounts.return_value = {} + self.m_find_devs_with.return_value = [ + '/dev/abc', '/dev/my-cdrom', '/dev/sr0'] + self.m_mount_cb.return_value = ("myfile", "mycontent") + + self.assertEqual("mycontent", dsovf.transport_iso9660()) + self.m_mount_cb.assert_called_with( + "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660") + + def test_mount_cb_not_called_no_matches(self): + """Check we don't call mount_cb if nothing matches""" + self.m_mounts.return_value = {} + self.m_find_devs_with.return_value = ['/dev/vg/myovf'] + + self.assertEqual(NOT_FOUND, dsovf.transport_iso9660()) + self.assertEqual(0, self.m_mount_cb.call_count) + + def test_mount_cb_called_require_iso_false(self): + """Check we call mount_cb on blockdevs with require_iso=False""" + self.m_mounts.return_value = {} + self.m_find_devs_with.return_value = ['/dev/xvdz'] + self.m_mount_cb.return_value = ("myfile", "mycontent") + + self.assertEqual( + "mycontent", dsovf.transport_iso9660(require_iso=False)) + + self.m_mount_cb.assert_called_with( + "/dev/xvdz", dsovf.get_ovf_env, mtype=None) + + def test_maybe_cdrom_device_none(self): + """Test maybe_cdrom_device returns False for none/empty input""" + self.assertFalse(dsovf.maybe_cdrom_device(None)) + self.assertFalse(dsovf.maybe_cdrom_device('')) + + def test_maybe_cdrom_device_non_string_exception(self): + """Test maybe_cdrom_device raises ValueError on non-string types""" + with self.assertRaises(ValueError): + dsovf.maybe_cdrom_device({'a': 'eleven'}) + + def test_maybe_cdrom_device_false_on_multi_dir_paths(self): + """Test maybe_cdrom_device is false on /dev[/.*]/* paths""" + self.assertFalse(dsovf.maybe_cdrom_device('/dev/foo/sr0')) + self.assertFalse(dsovf.maybe_cdrom_device('foo/sr0')) + self.assertFalse(dsovf.maybe_cdrom_device('../foo/sr0')) + self.assertFalse(dsovf.maybe_cdrom_device('../foo/sr0')) + + def test_maybe_cdrom_device_true_on_hd_partitions(self): + """Test maybe_cdrom_device is false on /dev/hd[a-z][0-9]+ paths""" + self.assertTrue(dsovf.maybe_cdrom_device('/dev/hda1')) + self.assertTrue(dsovf.maybe_cdrom_device('hdz9')) + + def test_maybe_cdrom_device_true_on_valid_relative_paths(self): + """Test maybe_cdrom_device normalizes paths""" + self.assertTrue(dsovf.maybe_cdrom_device('/dev/wark/../sr9')) + self.assertTrue(dsovf.maybe_cdrom_device('///sr0')) + self.assertTrue(dsovf.maybe_cdrom_device('/sr0')) + self.assertTrue(dsovf.maybe_cdrom_device('//dev//hda')) + + def test_maybe_cdrom_device_true_on_xvd_partitions(self): + """Test maybe_cdrom_device returns true on xvd*""" + self.assertTrue(dsovf.maybe_cdrom_device('/dev/xvda')) + self.assertTrue(dsovf.maybe_cdrom_device('/dev/xvda1')) + self.assertTrue(dsovf.maybe_cdrom_device('xvdza1')) + + +@mock.patch(MPATH + "subp.which") +@mock.patch(MPATH + "subp.subp") +class TestTransportVmwareGuestinfo(CiTestCase): + """Test the com.vmware.guestInfo transport implemented in + transport_vmware_guestinfo.""" + + rpctool = 'vmware-rpctool' + with_logs = True + rpctool_path = '/not/important/vmware-rpctool' + + def test_without_vmware_rpctool_returns_notfound(self, m_subp, m_which): + m_which.return_value = None + self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) + self.assertEqual(0, m_subp.call_count, + "subp should not be called if no rpctool in path.") + + def test_notfound_on_exit_code_1(self, m_subp, m_which): + """If vmware-rpctool exits 1, then must return not found.""" + m_which.return_value = self.rpctool_path + m_subp.side_effect = subp.ProcessExecutionError( + stdout="", stderr="No value found", exit_code=1, cmd=["unused"]) + self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) + self.assertEqual(1, m_subp.call_count) + self.assertNotIn("WARNING", self.logs.getvalue(), + "exit code of 1 by rpctool should not cause warning.") + + def test_notfound_if_no_content_but_exit_zero(self, m_subp, m_which): + """If vmware-rpctool exited 0 with no stdout is normal not-found. + + This isn't actually a case I've seen. normally on "not found", + rpctool would exit 1 with 'No value found' on stderr. But cover + the case where it exited 0 and just wrote nothing to stdout. + """ + m_which.return_value = self.rpctool_path + m_subp.return_value = ('', '') + self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) + self.assertEqual(1, m_subp.call_count) + + def test_notfound_and_warns_on_unexpected_exit_code(self, m_subp, m_which): + """If vmware-rpctool exits non zero or 1, warnings should be logged.""" + m_which.return_value = self.rpctool_path + m_subp.side_effect = subp.ProcessExecutionError( + stdout=None, stderr="No value found", exit_code=2, cmd=["unused"]) + self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) + self.assertEqual(1, m_subp.call_count) + self.assertIn("WARNING", self.logs.getvalue(), + "exit code of 2 by rpctool should log WARNING.") + + def test_found_when_guestinfo_present(self, m_subp, m_which): + """When there is a ovf info, transport should return it.""" + m_which.return_value = self.rpctool_path + content = fill_properties({}) + m_subp.return_value = (content, '') + self.assertEqual(content, dsovf.transport_vmware_guestinfo()) + self.assertEqual(1, m_subp.call_count) + +# +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_rbx.py b/tests/unittests/sources/test_rbx.py new file mode 100644 index 00000000..c1294c92 --- /dev/null +++ b/tests/unittests/sources/test_rbx.py @@ -0,0 +1,238 @@ +import json + +from cloudinit import helpers +from cloudinit import distros +from cloudinit.sources import DataSourceRbxCloud as ds +from tests.unittests.helpers import mock, CiTestCase, populate_dir +from cloudinit import subp + +DS_PATH = "cloudinit.sources.DataSourceRbxCloud" + +CRYPTO_PASS = "$6$uktth46t$FvpDzFD2iL9YNZIG1Epz7957hJqbH0f" \ + "QKhnzcfBcUhEodGAWRqTy7tYG4nEW7SUOYBjxOSFIQW5" \ + "tToyGP41.s1" + +CLOUD_METADATA = { + "vm": { + "memory": 4, + "cpu": 2, + "name": "vm-image-builder", + "_id": "5beab44f680cffd11f0e60fc" + }, + "additionalMetadata": { + "username": "guru", + "sshKeys": ["ssh-rsa ..."], + "password": { + "sha512": CRYPTO_PASS + } + }, + "disk": [ + {"size": 10, "type": "ssd", + "name": "vm-image-builder-os", + "_id": "5beab450680cffd11f0e60fe"}, + {"size": 2, "type": "ssd", + "name": "ubuntu-1804-bionic", + "_id": "5bef002c680cffd11f107590"} + ], + "netadp": [ + { + "ip": [{"address": "62.181.8.174"}], + "network": { + "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]}, + "routing": [], + "gateway": "62.181.8.1", + "netmask": "255.255.248.0", + "name": "public", + "type": "public", + "_id": "5784e97be2627505227b578c" + }, + "speed": 1000, + "type": "hv", + "macaddress": "00:15:5D:FF:0F:03", + "_id": "5beab450680cffd11f0e6102" + }, + { + "ip": [{"address": "10.209.78.11"}], + "network": { + "dns": {"nameservers": ["9.9.9.9", "8.8.8.8"]}, + "routing": [], + "gateway": "10.209.78.1", + "netmask": "255.255.255.0", + "name": "network-determined-bardeen", + "type": "private", + "_id": "5beaec64680cffd11f0e7c31" + }, + "speed": 1000, + "type": "hv", + "macaddress": "00:15:5D:FF:0F:24", + "_id": "5bec18c6680cffd11f0f0d8b" + } + ], + "dvddrive": [{"iso": {}}] +} + + +class TestRbxDataSource(CiTestCase): + parsed_user = None + allowed_subp = ['bash'] + + def _fetch_distro(self, kind): + cls = distros.fetch(kind) + paths = helpers.Paths({}) + return cls(kind, {}, paths) + + def setUp(self): + super(TestRbxDataSource, self).setUp() + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp} + ) + + # defaults for few tests + self.ds = ds.DataSourceRbxCloud + self.seed_dir = self.paths.seed_dir + self.sys_cfg = {'datasource': {'RbxCloud': {'dsmode': 'local'}}} + + def test_seed_read_user_data_callback_empty_file(self): + populate_user_metadata(self.seed_dir, '') + populate_cloud_metadata(self.seed_dir, {}) + results = ds.read_user_data_callback(self.seed_dir) + + self.assertIsNone(results) + + def test_seed_read_user_data_callback_valid_disk(self): + populate_user_metadata(self.seed_dir, '') + populate_cloud_metadata(self.seed_dir, CLOUD_METADATA) + results = ds.read_user_data_callback(self.seed_dir) + + self.assertNotEqual(results, None) + self.assertTrue('userdata' in results) + self.assertTrue('metadata' in results) + self.assertTrue('cfg' in results) + + def test_seed_read_user_data_callback_userdata(self): + userdata = "#!/bin/sh\nexit 1" + populate_user_metadata(self.seed_dir, userdata) + populate_cloud_metadata(self.seed_dir, CLOUD_METADATA) + + results = ds.read_user_data_callback(self.seed_dir) + + self.assertNotEqual(results, None) + self.assertTrue('userdata' in results) + self.assertEqual(results['userdata'], userdata) + + def test_generate_network_config(self): + expected = { + 'version': 1, + 'config': [ + { + 'subnets': [ + {'control': 'auto', + 'dns_nameservers': ['8.8.8.8', '8.8.4.4'], + 'netmask': '255.255.248.0', + 'address': '62.181.8.174', + 'type': 'static', 'gateway': '62.181.8.1'} + ], + 'type': 'physical', + 'name': 'eth0', + 'mac_address': '00:15:5d:ff:0f:03' + }, + { + 'subnets': [ + {'control': 'auto', + 'dns_nameservers': ['9.9.9.9', '8.8.8.8'], + 'netmask': '255.255.255.0', + 'address': '10.209.78.11', + 'type': 'static', + 'gateway': '10.209.78.1'} + ], + 'type': 'physical', + 'name': 'eth1', + 'mac_address': '00:15:5d:ff:0f:24' + } + ] + } + self.assertTrue( + ds.generate_network_config(CLOUD_METADATA['netadp']), + expected + ) + + @mock.patch(DS_PATH + '.subp.subp') + def test_gratuitous_arp_run_standard_arping(self, m_subp): + """Test handle run arping & parameters.""" + items = [ + { + 'destination': '172.17.0.2', + 'source': '172.16.6.104' + }, + { + 'destination': '172.17.0.2', + 'source': '172.16.6.104', + }, + ] + ds.gratuitous_arp(items, self._fetch_distro('ubuntu')) + self.assertEqual([ + mock.call([ + 'arping', '-c', '2', '-S', + '172.16.6.104', '172.17.0.2' + ]), + mock.call([ + 'arping', '-c', '2', '-S', + '172.16.6.104', '172.17.0.2' + ]) + ], m_subp.call_args_list + ) + + @mock.patch(DS_PATH + '.subp.subp') + def test_handle_rhel_like_arping(self, m_subp): + """Test handle on RHEL-like distros.""" + items = [ + { + 'source': '172.16.6.104', + 'destination': '172.17.0.2', + } + ] + ds.gratuitous_arp(items, self._fetch_distro('fedora')) + self.assertEqual([ + mock.call( + ['arping', '-c', '2', '-s', '172.16.6.104', '172.17.0.2'] + )], + m_subp.call_args_list + ) + + @mock.patch( + DS_PATH + '.subp.subp', + side_effect=subp.ProcessExecutionError() + ) + def test_continue_on_arping_error(self, m_subp): + """Continue when command error""" + items = [ + { + 'destination': '172.17.0.2', + 'source': '172.16.6.104' + }, + { + 'destination': '172.17.0.2', + 'source': '172.16.6.104', + }, + ] + ds.gratuitous_arp(items, self._fetch_distro('ubuntu')) + self.assertEqual([ + mock.call([ + 'arping', '-c', '2', '-S', + '172.16.6.104', '172.17.0.2' + ]), + mock.call([ + 'arping', '-c', '2', '-S', + '172.16.6.104', '172.17.0.2' + ]) + ], m_subp.call_args_list + ) + + +def populate_cloud_metadata(path, data): + populate_dir(path, {'cloud.json': json.dumps(data)}) + + +def populate_user_metadata(path, data): + populate_dir(path, {'user.data': data}) diff --git a/tests/unittests/sources/test_scaleway.py b/tests/unittests/sources/test_scaleway.py new file mode 100644 index 00000000..33ae26b8 --- /dev/null +++ b/tests/unittests/sources/test_scaleway.py @@ -0,0 +1,473 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import json + +import httpretty +import requests + +from cloudinit import helpers +from cloudinit import settings +from cloudinit import sources +from cloudinit.sources import DataSourceScaleway + +from tests.unittests.helpers import mock, HttprettyTestCase, CiTestCase + + +class DataResponses(object): + """ + Possible responses of the API endpoint + 169.254.42.42/user_data/cloud-init and + 169.254.42.42/vendor_data/cloud-init. + """ + + FAKE_USER_DATA = '#!/bin/bash\necho "user-data"' + + @staticmethod + def rate_limited(method, uri, headers): + return 429, headers, '' + + @staticmethod + def api_error(method, uri, headers): + return 500, headers, '' + + @classmethod + def get_ok(cls, method, uri, headers): + return 200, headers, cls.FAKE_USER_DATA + + @staticmethod + def empty(method, uri, headers): + """ + No user data for this server. + """ + return 404, headers, '' + + +class MetadataResponses(object): + """ + Possible responses of the metadata API. + """ + + FAKE_METADATA = { + 'id': '00000000-0000-0000-0000-000000000000', + 'hostname': 'scaleway.host', + 'tags': [ + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", + ], + 'ssh_public_keys': [{ + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + 'fingerprint': '2048 06:ae:... login (RSA)' + }, { + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'fingerprint': '2048 06:ff:... login2 (RSA)' + }] + } + + @classmethod + def get_ok(cls, method, uri, headers): + return 200, headers, json.dumps(cls.FAKE_METADATA) + + +class TestOnScaleway(CiTestCase): + + def setUp(self): + super(TestOnScaleway, self).setUp() + self.tmp = self.tmp_dir() + + def install_mocks(self, fake_dmi, fake_file_exists, fake_cmdline): + mock, faked = fake_dmi + mock.return_value = 'Scaleway' if faked else 'Whatever' + + mock, faked = fake_file_exists + mock.return_value = faked + + mock, faked = fake_cmdline + mock.return_value = \ + 'initrd=initrd showopts scaleway nousb' if faked \ + else 'BOOT_IMAGE=/vmlinuz-3.11.0-26-generic' + + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('os.path.exists') + @mock.patch('cloudinit.dmi.read_dmi_data') + def test_not_on_scaleway(self, m_read_dmi_data, m_file_exists, + m_get_cmdline): + self.install_mocks( + fake_dmi=(m_read_dmi_data, False), + fake_file_exists=(m_file_exists, False), + fake_cmdline=(m_get_cmdline, False) + ) + self.assertFalse(DataSourceScaleway.on_scaleway()) + + # When not on Scaleway, get_data() returns False. + datasource = DataSourceScaleway.DataSourceScaleway( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}) + ) + self.assertFalse(datasource.get_data()) + + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('os.path.exists') + @mock.patch('cloudinit.dmi.read_dmi_data') + def test_on_scaleway_dmi(self, m_read_dmi_data, m_file_exists, + m_get_cmdline): + """ + dmidecode returns "Scaleway". + """ + # dmidecode returns "Scaleway" + self.install_mocks( + fake_dmi=(m_read_dmi_data, True), + fake_file_exists=(m_file_exists, False), + fake_cmdline=(m_get_cmdline, False) + ) + self.assertTrue(DataSourceScaleway.on_scaleway()) + + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('os.path.exists') + @mock.patch('cloudinit.dmi.read_dmi_data') + def test_on_scaleway_var_run_scaleway(self, m_read_dmi_data, m_file_exists, + m_get_cmdline): + """ + /var/run/scaleway exists. + """ + self.install_mocks( + fake_dmi=(m_read_dmi_data, False), + fake_file_exists=(m_file_exists, True), + fake_cmdline=(m_get_cmdline, False) + ) + self.assertTrue(DataSourceScaleway.on_scaleway()) + + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('os.path.exists') + @mock.patch('cloudinit.dmi.read_dmi_data') + def test_on_scaleway_cmdline(self, m_read_dmi_data, m_file_exists, + m_get_cmdline): + """ + "scaleway" in /proc/cmdline. + """ + self.install_mocks( + fake_dmi=(m_read_dmi_data, False), + fake_file_exists=(m_file_exists, False), + fake_cmdline=(m_get_cmdline, True) + ) + self.assertTrue(DataSourceScaleway.on_scaleway()) + + +def get_source_address_adapter(*args, **kwargs): + """ + Scaleway user/vendor data API requires to be called with a privileged port. + + If the unittests are run as non-root, the user doesn't have the permission + to bind on ports below 1024. + + This function removes the bind on a privileged address, since anyway the + HTTP call is mocked by httpretty. + """ + kwargs.pop('source_address') + return requests.adapters.HTTPAdapter(*args, **kwargs) + + +class TestDataSourceScaleway(HttprettyTestCase): + + def setUp(self): + tmp = self.tmp_dir() + self.datasource = DataSourceScaleway.DataSourceScaleway( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': tmp}) + ) + super(TestDataSourceScaleway, self).setUp() + + self.metadata_url = \ + DataSourceScaleway.BUILTIN_DS_CONFIG['metadata_url'] + self.userdata_url = \ + DataSourceScaleway.BUILTIN_DS_CONFIG['userdata_url'] + self.vendordata_url = \ + DataSourceScaleway.BUILTIN_DS_CONFIG['vendordata_url'] + + self.add_patch('cloudinit.sources.DataSourceScaleway.on_scaleway', + '_m_on_scaleway', return_value=True) + self.add_patch( + 'cloudinit.sources.DataSourceScaleway.net.find_fallback_nic', + '_m_find_fallback_nic', return_value='scalewaynic0') + + @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') + @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', + get_source_address_adapter) + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('time.sleep', return_value=None) + def test_metadata_ok(self, sleep, m_get_cmdline, dhcpv4): + """ + get_data() returns metadata, user data and vendor data. + """ + m_get_cmdline.return_value = 'scaleway' + + # Make user data API return a valid response + httpretty.register_uri(httpretty.GET, self.metadata_url, + body=MetadataResponses.get_ok) + httpretty.register_uri(httpretty.GET, self.userdata_url, + body=DataResponses.get_ok) + httpretty.register_uri(httpretty.GET, self.vendordata_url, + body=DataResponses.get_ok) + self.datasource.get_data() + + self.assertEqual(self.datasource.get_instance_id(), + MetadataResponses.FAKE_METADATA['id']) + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + ].sort()) + self.assertEqual(self.datasource.get_hostname(), + MetadataResponses.FAKE_METADATA['hostname']) + self.assertEqual(self.datasource.get_userdata_raw(), + DataResponses.FAKE_USER_DATA) + self.assertEqual(self.datasource.get_vendordata_raw(), + DataResponses.FAKE_USER_DATA) + self.assertIsNone(self.datasource.availability_zone) + self.assertIsNone(self.datasource.region) + self.assertEqual(sleep.call_count, 0) + + def test_ssh_keys_empty(self): + """ + get_public_ssh_keys() should return empty list if no ssh key are + available + """ + self.datasource.metadata['tags'] = [] + self.datasource.metadata['ssh_public_keys'] = [] + self.assertEqual(self.datasource.get_public_ssh_keys(), []) + + def test_ssh_keys_only_tags(self): + """ + get_public_ssh_keys() should return list of keys available in tags + """ + self.datasource.metadata['tags'] = [ + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABCCCCC", + ] + self.datasource.metadata['ssh_public_keys'] = [] + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + ].sort()) + + def test_ssh_keys_only_conf(self): + """ + get_public_ssh_keys() should return list of keys available in + ssh_public_keys field + """ + self.datasource.metadata['tags'] = [] + self.datasource.metadata['ssh_public_keys'] = [{ + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + 'fingerprint': '2048 06:ae:... login (RSA)' + }, { + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'fingerprint': '2048 06:ff:... login2 (RSA)' + }] + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + ].sort()) + + def test_ssh_keys_both(self): + """ + get_public_ssh_keys() should return a merge of keys available + in ssh_public_keys and tags + """ + self.datasource.metadata['tags'] = [ + "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", + ] + + self.datasource.metadata['ssh_public_keys'] = [{ + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + 'fingerprint': '2048 06:ae:... login (RSA)' + }, { + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'fingerprint': '2048 06:ff:... login2 (RSA)' + }] + self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', + 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + ].sort()) + + @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') + @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', + get_source_address_adapter) + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('time.sleep', return_value=None) + def test_metadata_404(self, sleep, m_get_cmdline, dhcpv4): + """ + get_data() returns metadata, but no user data nor vendor data. + """ + m_get_cmdline.return_value = 'scaleway' + + # Make user and vendor data APIs return HTTP/404, which means there is + # no user / vendor data for the server. + httpretty.register_uri(httpretty.GET, self.metadata_url, + body=MetadataResponses.get_ok) + httpretty.register_uri(httpretty.GET, self.userdata_url, + body=DataResponses.empty) + httpretty.register_uri(httpretty.GET, self.vendordata_url, + body=DataResponses.empty) + self.datasource.get_data() + self.assertIsNone(self.datasource.get_userdata_raw()) + self.assertIsNone(self.datasource.get_vendordata_raw()) + self.assertEqual(sleep.call_count, 0) + + @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') + @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', + get_source_address_adapter) + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('time.sleep', return_value=None) + def test_metadata_rate_limit(self, sleep, m_get_cmdline, dhcpv4): + """ + get_data() is rate limited two times by the metadata API when fetching + user data. + """ + m_get_cmdline.return_value = 'scaleway' + + httpretty.register_uri(httpretty.GET, self.metadata_url, + body=MetadataResponses.get_ok) + httpretty.register_uri(httpretty.GET, self.vendordata_url, + body=DataResponses.empty) + + httpretty.register_uri( + httpretty.GET, self.userdata_url, + responses=[ + httpretty.Response(body=DataResponses.rate_limited), + httpretty.Response(body=DataResponses.rate_limited), + httpretty.Response(body=DataResponses.get_ok), + ] + ) + self.datasource.get_data() + self.assertEqual(self.datasource.get_userdata_raw(), + DataResponses.FAKE_USER_DATA) + self.assertEqual(sleep.call_count, 2) + + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_ok(self, m_get_cmdline, fallback_nic): + """ + network_config will only generate IPv4 config if no ipv6 data is + available in the metadata + """ + m_get_cmdline.return_value = 'scaleway' + fallback_nic.return_value = 'ens2' + self.datasource.metadata['ipv6'] = None + + netcfg = self.datasource.network_config + resp = { + 'version': 1, + 'config': [ + { + 'type': 'physical', + 'name': 'ens2', + 'subnets': [{'type': 'dhcp4'}] + } + ] + } + self.assertEqual(netcfg, resp) + + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_ipv6_ok(self, m_get_cmdline, fallback_nic): + """ + network_config will only generate IPv4/v6 configs if ipv6 data is + available in the metadata + """ + m_get_cmdline.return_value = 'scaleway' + fallback_nic.return_value = 'ens2' + self.datasource.metadata['ipv6'] = { + 'address': '2000:abc:4444:9876::42:999', + 'gateway': '2000:abc:4444:9876::42:000', + 'netmask': '127', + } + + netcfg = self.datasource.network_config + resp = { + 'version': 1, + 'config': [ + { + 'type': 'physical', + 'name': 'ens2', + 'subnets': [ + { + 'type': 'dhcp4' + }, + { + 'type': 'static', + 'address': '2000:abc:4444:9876::42:999', + 'gateway': '2000:abc:4444:9876::42:000', + 'netmask': '127', + } + ] + } + ] + } + self.assertEqual(netcfg, resp) + + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_existing(self, m_get_cmdline, fallback_nic): + """ + network_config() should return the same data if a network config + already exists + """ + m_get_cmdline.return_value = 'scaleway' + self.datasource._network_config = '0xdeadbeef' + + netcfg = self.datasource.network_config + self.assertEqual(netcfg, '0xdeadbeef') + + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_unset(self, m_get_cmdline, fallback_nic): + """ + _network_config will be set to sources.UNSET after the first boot. + Make sure it behave correctly. + """ + m_get_cmdline.return_value = 'scaleway' + fallback_nic.return_value = 'ens2' + self.datasource.metadata['ipv6'] = None + self.datasource._network_config = sources.UNSET + + resp = { + 'version': 1, + 'config': [ + { + 'type': 'physical', + 'name': 'ens2', + 'subnets': [{'type': 'dhcp4'}] + } + ] + } + + netcfg = self.datasource.network_config + self.assertEqual(netcfg, resp) + + @mock.patch('cloudinit.sources.DataSourceScaleway.LOG.warning') + @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') + @mock.patch('cloudinit.util.get_cmdline') + def test_network_config_cached_none(self, m_get_cmdline, fallback_nic, + logwarning): + """ + network_config() should return config data if cached data is None + rather than sources.UNSET + """ + m_get_cmdline.return_value = 'scaleway' + fallback_nic.return_value = 'ens2' + self.datasource.metadata['ipv6'] = None + self.datasource._network_config = None + + resp = { + 'version': 1, + 'config': [ + { + 'type': 'physical', + 'name': 'ens2', + 'subnets': [{'type': 'dhcp4'}] + } + ] + } + + netcfg = self.datasource.network_config + self.assertEqual(netcfg, resp) + logwarning.assert_called_with('Found None as cached _network_config. ' + 'Resetting to %s', sources.UNSET) diff --git a/tests/unittests/sources/test_smartos.py b/tests/unittests/sources/test_smartos.py new file mode 100644 index 00000000..e306eded --- /dev/null +++ b/tests/unittests/sources/test_smartos.py @@ -0,0 +1,1163 @@ +# Copyright (C) 2013 Canonical Ltd. +# Copyright 2019 Joyent, Inc. +# +# Author: Ben Howard +# +# This file is part of cloud-init. See LICENSE file for license information. + +'''This is a testcase for the SmartOS datasource. + +It replicates a serial console and acts like the SmartOS console does in +order to validate return responses. + +''' + +from binascii import crc32 +import json +import multiprocessing +import os +import os.path +import re +import signal +import stat +import unittest +import uuid + +from cloudinit import serial +from cloudinit.sources import DataSourceSmartOS +from cloudinit.sources.DataSourceSmartOS import ( + convert_smartos_network_data as convert_net, + SMARTOS_ENV_KVM, SERIAL_DEVICE, get_smartos_environ, + identify_file) +from cloudinit.event import EventScope, EventType + +from cloudinit import helpers as c_helpers +from cloudinit.util import (b64e, write_file) +from cloudinit.subp import (subp, ProcessExecutionError, which) + +from tests.unittests.helpers import ( + CiTestCase, mock, FilesystemMockingTestCase, skipIf) + + +try: + import serial as _pyserial + assert _pyserial # avoid pyflakes error F401: import unused + HAS_PYSERIAL = True +except ImportError: + HAS_PYSERIAL = False + +DSMOS = 'cloudinit.sources.DataSourceSmartOS' +SDC_NICS = json.loads(""" +[ + { + "nic_tag": "external", + "primary": true, + "mtu": 1500, + "model": "virtio", + "gateway": "8.12.42.1", + "netmask": "255.255.255.0", + "ip": "8.12.42.102", + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "gateways": [ + "8.12.42.1" + ], + "vlan_id": 324, + "mac": "90:b8:d0:f5:e4:f5", + "interface": "net0", + "ips": [ + "8.12.42.102/24" + ] + }, + { + "nic_tag": "sdc_overlay/16187209", + "gateway": "192.168.128.1", + "model": "virtio", + "mac": "90:b8:d0:a5:ff:cd", + "netmask": "255.255.252.0", + "ip": "192.168.128.93", + "network_uuid": "4cad71da-09bc-452b-986d-03562a03a0a9", + "gateways": [ + "192.168.128.1" + ], + "vlan_id": 2, + "mtu": 8500, + "interface": "net1", + "ips": [ + "192.168.128.93/22" + ] + } +] +""") + + +SDC_NICS_ALT = json.loads(""" +[ + { + "interface": "net0", + "mac": "90:b8:d0:ae:64:51", + "vlan_id": 324, + "nic_tag": "external", + "gateway": "8.12.42.1", + "gateways": [ + "8.12.42.1" + ], + "netmask": "255.255.255.0", + "ip": "8.12.42.51", + "ips": [ + "8.12.42.51/24" + ], + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model": "virtio", + "mtu": 1500, + "primary": true + }, + { + "interface": "net1", + "mac": "90:b8:d0:bd:4f:9c", + "vlan_id": 600, + "nic_tag": "internal", + "netmask": "255.255.255.0", + "ip": "10.210.1.217", + "ips": [ + "10.210.1.217/24" + ], + "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model": "virtio", + "mtu": 1500 + } +] +""") + +SDC_NICS_DHCP = json.loads(""" +[ + { + "interface": "net0", + "mac": "90:b8:d0:ae:64:51", + "vlan_id": 324, + "nic_tag": "external", + "gateway": "8.12.42.1", + "gateways": [ + "8.12.42.1" + ], + "netmask": "255.255.255.0", + "ip": "8.12.42.51", + "ips": [ + "8.12.42.51/24" + ], + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model": "virtio", + "mtu": 1500, + "primary": true + }, + { + "interface": "net1", + "mac": "90:b8:d0:bd:4f:9c", + "vlan_id": 600, + "nic_tag": "internal", + "netmask": "255.255.255.0", + "ip": "10.210.1.217", + "ips": [ + "dhcp" + ], + "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model": "virtio", + "mtu": 1500 + } +] +""") + +SDC_NICS_MIP = json.loads(""" +[ + { + "interface": "net0", + "mac": "90:b8:d0:ae:64:51", + "vlan_id": 324, + "nic_tag": "external", + "gateway": "8.12.42.1", + "gateways": [ + "8.12.42.1" + ], + "netmask": "255.255.255.0", + "ip": "8.12.42.51", + "ips": [ + "8.12.42.51/24", + "8.12.42.52/24" + ], + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model": "virtio", + "mtu": 1500, + "primary": true + }, + { + "interface": "net1", + "mac": "90:b8:d0:bd:4f:9c", + "vlan_id": 600, + "nic_tag": "internal", + "netmask": "255.255.255.0", + "ip": "10.210.1.217", + "ips": [ + "10.210.1.217/24", + "10.210.1.151/24" + ], + "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model": "virtio", + "mtu": 1500 + } +] +""") + +SDC_NICS_MIP_IPV6 = json.loads(""" +[ + { + "interface": "net0", + "mac": "90:b8:d0:ae:64:51", + "vlan_id": 324, + "nic_tag": "external", + "gateway": "8.12.42.1", + "gateways": [ + "8.12.42.1" + ], + "netmask": "255.255.255.0", + "ip": "8.12.42.51", + "ips": [ + "2001:4800:78ff:1b:be76:4eff:fe06:96b3/64", + "8.12.42.51/24" + ], + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model": "virtio", + "mtu": 1500, + "primary": true + }, + { + "interface": "net1", + "mac": "90:b8:d0:bd:4f:9c", + "vlan_id": 600, + "nic_tag": "internal", + "netmask": "255.255.255.0", + "ip": "10.210.1.217", + "ips": [ + "10.210.1.217/24" + ], + "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model": "virtio", + "mtu": 1500 + } +] +""") + +SDC_NICS_IPV4_IPV6 = json.loads(""" +[ + { + "interface": "net0", + "mac": "90:b8:d0:ae:64:51", + "vlan_id": 324, + "nic_tag": "external", + "gateway": "8.12.42.1", + "gateways": ["8.12.42.1", "2001::1", "2001::2"], + "netmask": "255.255.255.0", + "ip": "8.12.42.51", + "ips": ["2001::10/64", "8.12.42.51/24", "2001::11/64", + "8.12.42.52/32"], + "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model": "virtio", + "mtu": 1500, + "primary": true + }, + { + "interface": "net1", + "mac": "90:b8:d0:bd:4f:9c", + "vlan_id": 600, + "nic_tag": "internal", + "netmask": "255.255.255.0", + "ip": "10.210.1.217", + "ips": ["10.210.1.217/24"], + "gateways": ["10.210.1.210"], + "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model": "virtio", + "mtu": 1500 + } +] +""") + +SDC_NICS_SINGLE_GATEWAY = json.loads(""" +[ + { + "interface":"net0", + "mac":"90:b8:d0:d8:82:b4", + "vlan_id":324, + "nic_tag":"external", + "gateway":"8.12.42.1", + "gateways":["8.12.42.1"], + "netmask":"255.255.255.0", + "ip":"8.12.42.26", + "ips":["8.12.42.26/24"], + "network_uuid":"992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", + "model":"virtio", + "mtu":1500, + "primary":true + }, + { + "interface":"net1", + "mac":"90:b8:d0:0a:51:31", + "vlan_id":600, + "nic_tag":"internal", + "netmask":"255.255.255.0", + "ip":"10.210.1.27", + "ips":["10.210.1.27/24"], + "network_uuid":"98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", + "model":"virtio", + "mtu":1500 + } +] +""") + + +MOCK_RETURNS = { + 'hostname': 'test-host', + 'root_authorized_keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname', + 'disable_iptables_flag': None, + 'enable_motd_sys_info': None, + 'test-var1': 'some data', + 'cloud-init:user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']), + 'sdc:datacenter_name': 'somewhere2', + 'sdc:operator-script': '\n'.join(['bin/true', '']), + 'sdc:uuid': str(uuid.uuid4()), + 'sdc:vendor-data': '\n'.join(['VENDOR_DATA', '']), + 'user-data': '\n'.join(['something', '']), + 'user-script': '\n'.join(['/bin/true', '']), + 'sdc:nics': json.dumps(SDC_NICS), +} + +DMI_DATA_RETURN = 'smartdc' + +# Useful for calculating the length of a frame body. A SUCCESS body will be +# followed by more characters or be one character less if SUCCESS with no +# payload. See Section 4.3 of https://eng.joyent.com/mdata/protocol.html. +SUCCESS_LEN = len('0123abcd SUCCESS ') +NOTFOUND_LEN = len('0123abcd NOTFOUND') + + +class PsuedoJoyentClient(object): + def __init__(self, data=None): + if data is None: + data = MOCK_RETURNS.copy() + self.data = data + self._is_open = False + return + + def get(self, key, default=None, strip=False): + if key in self.data: + r = self.data[key] + if strip: + r = r.strip() + else: + r = default + return r + + def get_json(self, key, default=None): + result = self.get(key, default=default) + if result is None: + return default + return json.loads(result) + + def exists(self): + return True + + def open_transport(self): + assert(not self._is_open) + self._is_open = True + + def close_transport(self): + assert(self._is_open) + self._is_open = False + + +class TestSmartOSDataSource(FilesystemMockingTestCase): + jmc_cfact = None + get_smartos_environ = None + + def setUp(self): + super(TestSmartOSDataSource, self).setUp() + + self.add_patch(DSMOS + ".get_smartos_environ", "get_smartos_environ") + self.add_patch(DSMOS + ".jmc_client_factory", "jmc_cfact") + self.legacy_user_d = self.tmp_path('legacy_user_tmp') + os.mkdir(self.legacy_user_d) + self.add_patch(DSMOS + ".LEGACY_USER_D", "m_legacy_user_d", + autospec=False, new=self.legacy_user_d) + self.add_patch(DSMOS + ".identify_file", "m_identify_file", + return_value="text/plain") + + def _get_ds(self, mockdata=None, mode=DataSourceSmartOS.SMARTOS_ENV_KVM, + sys_cfg=None, ds_cfg=None): + self.jmc_cfact.return_value = PsuedoJoyentClient(mockdata) + self.get_smartos_environ.return_value = mode + + tmpd = self.tmp_dir() + dirs = {'cloud_dir': self.tmp_path('cloud_dir', tmpd), + 'run_dir': self.tmp_path('run_dir')} + for d in dirs.values(): + os.mkdir(d) + paths = c_helpers.Paths(dirs) + + if sys_cfg is None: + sys_cfg = {} + + if ds_cfg is not None: + sys_cfg['datasource'] = sys_cfg.get('datasource', {}) + sys_cfg['datasource']['SmartOS'] = ds_cfg + + return DataSourceSmartOS.DataSourceSmartOS( + sys_cfg, distro=None, paths=paths) + + def test_no_base64(self): + ds_cfg = {'no_base64_decode': ['test_var1'], 'all_base': True} + dsrc = self._get_ds(ds_cfg=ds_cfg) + ret = dsrc.get_data() + self.assertTrue(ret) + + def test_uuid(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['sdc:uuid'], + dsrc.metadata['instance-id']) + + def test_platform_info(self): + """All platform-related attributes are properly set.""" + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + self.assertEqual('joyent', dsrc.cloud_name) + self.assertEqual('joyent', dsrc.platform_type) + self.assertEqual('serial (/dev/ttyS1)', dsrc.subplatform) + + def test_root_keys(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['root_authorized_keys'], + dsrc.metadata['public-keys']) + + def test_hostname_b64(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['hostname'], + dsrc.metadata['local-hostname']) + + def test_hostname(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['hostname'], + dsrc.metadata['local-hostname']) + + def test_hostname_if_no_sdc_hostname(self): + my_returns = MOCK_RETURNS.copy() + my_returns['sdc:hostname'] = 'sdc-' + my_returns['hostname'] + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(my_returns['hostname'], + dsrc.metadata['local-hostname']) + + def test_sdc_hostname_if_no_hostname(self): + my_returns = MOCK_RETURNS.copy() + my_returns['sdc:hostname'] = 'sdc-' + my_returns['hostname'] + del my_returns['hostname'] + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(my_returns['sdc:hostname'], + dsrc.metadata['local-hostname']) + + def test_sdc_uuid_if_no_hostname_or_sdc_hostname(self): + my_returns = MOCK_RETURNS.copy() + del my_returns['hostname'] + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(my_returns['sdc:uuid'], + dsrc.metadata['local-hostname']) + + def test_userdata(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['user-data'], + dsrc.metadata['legacy-user-data']) + self.assertEqual(MOCK_RETURNS['cloud-init:user-data'], + dsrc.userdata_raw) + + def test_sdc_nics(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(json.loads(MOCK_RETURNS['sdc:nics']), + dsrc.metadata['network-data']) + + def test_sdc_scripts(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['user-script'], + dsrc.metadata['user-script']) + + legacy_script_f = "%s/user-script" % self.legacy_user_d + print("legacy_script_f=%s" % legacy_script_f) + self.assertTrue(os.path.exists(legacy_script_f)) + self.assertTrue(os.path.islink(legacy_script_f)) + user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:] + self.assertEqual(user_script_perm, '700') + + def test_scripts_shebanged(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['user-script'], + dsrc.metadata['user-script']) + + legacy_script_f = "%s/user-script" % self.legacy_user_d + self.assertTrue(os.path.exists(legacy_script_f)) + self.assertTrue(os.path.islink(legacy_script_f)) + shebang = None + with open(legacy_script_f, 'r') as f: + shebang = f.readlines()[0].strip() + self.assertEqual(shebang, "#!/bin/bash") + user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:] + self.assertEqual(user_script_perm, '700') + + def test_scripts_shebang_not_added(self): + """ + Test that the SmartOS requirement that plain text scripts + are executable. This test makes sure that plain texts scripts + with out file magic have it added appropriately by cloud-init. + """ + + my_returns = MOCK_RETURNS.copy() + my_returns['user-script'] = '\n'.join(['#!/usr/bin/perl', + 'print("hi")', '']) + + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(my_returns['user-script'], + dsrc.metadata['user-script']) + + legacy_script_f = "%s/user-script" % self.legacy_user_d + self.assertTrue(os.path.exists(legacy_script_f)) + self.assertTrue(os.path.islink(legacy_script_f)) + shebang = None + with open(legacy_script_f, 'r') as f: + shebang = f.readlines()[0].strip() + self.assertEqual(shebang, "#!/usr/bin/perl") + + def test_userdata_removed(self): + """ + User-data in the SmartOS world is supposed to be written to a file + each and every boot. This tests to make sure that in the event the + legacy user-data is removed, the existing user-data is backed-up + and there is no /var/db/user-data left. + """ + + user_data_f = "%s/mdata-user-data" % self.legacy_user_d + with open(user_data_f, 'w') as f: + f.write("PREVIOUS") + + my_returns = MOCK_RETURNS.copy() + del my_returns['user-data'] + + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertFalse(dsrc.metadata.get('legacy-user-data')) + + found_new = False + for root, _dirs, files in os.walk(self.legacy_user_d): + for name in files: + name_f = os.path.join(root, name) + permissions = oct(os.stat(name_f)[stat.ST_MODE])[-3:] + if re.match(r'.*\/mdata-user-data$', name_f): + found_new = True + print(name_f) + self.assertEqual(permissions, '400') + + self.assertFalse(found_new) + + def test_vendor_data_not_default(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['sdc:vendor-data'], + dsrc.metadata['vendor-data']) + + def test_default_vendor_data(self): + my_returns = MOCK_RETURNS.copy() + def_op_script = my_returns['sdc:vendor-data'] + del my_returns['sdc:vendor-data'] + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertNotEqual(def_op_script, dsrc.metadata['vendor-data']) + + # we expect default vendor-data is a boothook + self.assertTrue(dsrc.vendordata_raw.startswith("#cloud-boothook")) + + def test_disable_iptables_flag(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['disable_iptables_flag'], + dsrc.metadata['iptables_disable']) + + def test_motd_sys_info(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(MOCK_RETURNS['enable_motd_sys_info'], + dsrc.metadata['motd_sys_info']) + + def test_default_ephemeral(self): + # Test to make sure that the builtin config has the ephemeral + # configuration. + dsrc = self._get_ds() + cfg = dsrc.get_config_obj() + + ret = dsrc.get_data() + self.assertTrue(ret) + + assert 'disk_setup' in cfg + assert 'fs_setup' in cfg + self.assertIsInstance(cfg['disk_setup'], dict) + self.assertIsInstance(cfg['fs_setup'], list) + + def test_override_disk_aliases(self): + # Test to make sure that the built-in DS is overriden + builtin = DataSourceSmartOS.BUILTIN_DS_CONFIG + + mydscfg = {'disk_aliases': {'FOO': '/dev/bar'}} + + # expect that these values are in builtin, or this is pointless + for k in mydscfg: + self.assertIn(k, builtin) + + dsrc = self._get_ds(ds_cfg=mydscfg) + ret = dsrc.get_data() + self.assertTrue(ret) + + self.assertEqual(mydscfg['disk_aliases']['FOO'], + dsrc.ds_cfg['disk_aliases']['FOO']) + + self.assertEqual(dsrc.device_name_to_device('FOO'), + mydscfg['disk_aliases']['FOO']) + + def test_reconfig_network_on_boot(self): + # Test to ensure that network is configured from metadata on each boot + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + self.assertSetEqual( + {EventType.BOOT_NEW_INSTANCE, + EventType.BOOT, + EventType.BOOT_LEGACY}, + dsrc.default_update_events[EventScope.NETWORK] + ) + + +class TestIdentifyFile(CiTestCase): + """Test the 'identify_file' utility.""" + @skipIf(not which("file"), "command 'file' not available.") + def test_file_happy_path(self): + """Test file is available and functional on plain text.""" + fname = self.tmp_path("myfile") + write_file(fname, "plain text content here\n") + with self.allow_subp(["file"]): + self.assertEqual("text/plain", identify_file(fname)) + + @mock.patch(DSMOS + ".subp.subp") + def test_returns_none_on_error(self, m_subp): + """On 'file' execution error, None should be returned.""" + m_subp.side_effect = ProcessExecutionError("FILE_FAILED", exit_code=99) + fname = self.tmp_path("myfile") + write_file(fname, "plain text content here\n") + self.assertEqual(None, identify_file(fname)) + self.assertEqual( + [mock.call(["file", "--brief", "--mime-type", fname])], + m_subp.call_args_list) + + +class ShortReader(object): + """Implements a 'read' interface for bytes provided. + much like io.BytesIO but the 'endbyte' acts as if EOF. + When it is reached a short will be returned.""" + def __init__(self, initial_bytes, endbyte=b'\0'): + self.data = initial_bytes + self.index = 0 + self.len = len(self.data) + self.endbyte = endbyte + + @property + def emptied(self): + return self.index >= self.len + + def read(self, size=-1): + """Read size bytes but not past a null.""" + if size == 0 or self.index >= self.len: + return b'' + + rsize = size + if size < 0 or size + self.index > self.len: + rsize = self.len - self.index + + next_null = self.data.find(self.endbyte, self.index, rsize) + if next_null >= 0: + rsize = next_null - self.index + 1 + i = self.index + self.index += rsize + ret = self.data[i:i + rsize] + if len(ret) and ret[-1:] == self.endbyte: + ret = ret[:-1] + return ret + + +class TestJoyentMetadataClient(FilesystemMockingTestCase): + + invalid = b'invalid command\n' + failure = b'FAILURE\n' + v2_ok = b'V2_OK\n' + + def setUp(self): + super(TestJoyentMetadataClient, self).setUp() + + self.serial = mock.MagicMock(spec=serial.Serial) + self.request_id = 0xabcdef12 + self.metadata_value = 'value' + self.response_parts = { + 'command': 'SUCCESS', + 'crc': 'b5a9ff00', + 'length': SUCCESS_LEN + len(b64e(self.metadata_value)), + 'payload': b64e(self.metadata_value), + 'request_id': '{0:08x}'.format(self.request_id), + } + + def make_response(): + payloadstr = '' + if 'payload' in self.response_parts: + payloadstr = ' {0}'.format(self.response_parts['payload']) + return ('V2 {length} {crc} {request_id} ' + '{command}{payloadstr}\n'.format( + payloadstr=payloadstr, + **self.response_parts).encode('ascii')) + + self.metasource_data = None + + def read_response(length): + if not self.metasource_data: + self.metasource_data = make_response() + self.metasource_data_len = len(self.metasource_data) + resp = self.metasource_data[:length] + self.metasource_data = self.metasource_data[length:] + return resp + + self.serial.read.side_effect = read_response + self.patched_funcs.enter_context( + mock.patch('cloudinit.sources.DataSourceSmartOS.random.randint', + mock.Mock(return_value=self.request_id))) + + def _get_client(self): + return DataSourceSmartOS.JoyentMetadataClient( + fp=self.serial, smartos_type=DataSourceSmartOS.SMARTOS_ENV_KVM) + + def _get_serial_client(self): + self.serial.timeout = 1 + return DataSourceSmartOS.JoyentMetadataSerialClient(None, + fp=self.serial) + + def assertEndsWith(self, haystack, prefix): + self.assertTrue(haystack.endswith(prefix), + "{0} does not end with '{1}'".format( + repr(haystack), prefix)) + + def assertStartsWith(self, haystack, prefix): + self.assertTrue(haystack.startswith(prefix), + "{0} does not start with '{1}'".format( + repr(haystack), prefix)) + + def assertNoMoreSideEffects(self, obj): + self.assertRaises(StopIteration, obj) + + def test_get_metadata_writes_a_single_line(self): + client = self._get_client() + client.get('some_key') + self.assertEqual(1, self.serial.write.call_count) + written_line = self.serial.write.call_args[0][0] + self.assertEndsWith(written_line.decode('ascii'), + b'\n'.decode('ascii')) + self.assertEqual(1, written_line.count(b'\n')) + + def _get_written_line(self, key='some_key'): + client = self._get_client() + client.get(key) + return self.serial.write.call_args[0][0] + + def test_get_metadata_writes_bytes(self): + self.assertIsInstance(self._get_written_line(), bytes) + + def test_get_metadata_line_starts_with_v2(self): + foo = self._get_written_line() + self.assertStartsWith(foo.decode('ascii'), b'V2'.decode('ascii')) + + def test_get_metadata_uses_get_command(self): + parts = self._get_written_line().decode('ascii').strip().split(' ') + self.assertEqual('GET', parts[4]) + + def test_get_metadata_base64_encodes_argument(self): + key = 'my_key' + parts = self._get_written_line(key).decode('ascii').strip().split(' ') + self.assertEqual(b64e(key), parts[5]) + + def test_get_metadata_calculates_length_correctly(self): + parts = self._get_written_line().decode('ascii').strip().split(' ') + expected_length = len(' '.join(parts[3:])) + self.assertEqual(expected_length, int(parts[1])) + + def test_get_metadata_uses_appropriate_request_id(self): + parts = self._get_written_line().decode('ascii').strip().split(' ') + request_id = parts[3] + self.assertEqual(8, len(request_id)) + self.assertEqual(request_id, request_id.lower()) + + def test_get_metadata_uses_random_number_for_request_id(self): + line = self._get_written_line() + request_id = line.decode('ascii').strip().split(' ')[3] + self.assertEqual('{0:08x}'.format(self.request_id), request_id) + + def test_get_metadata_checksums_correctly(self): + parts = self._get_written_line().decode('ascii').strip().split(' ') + expected_checksum = '{0:08x}'.format( + crc32(' '.join(parts[3:]).encode('utf-8')) & 0xffffffff) + checksum = parts[2] + self.assertEqual(expected_checksum, checksum) + + def test_get_metadata_reads_a_line(self): + client = self._get_client() + client.get('some_key') + self.assertEqual(self.metasource_data_len, self.serial.read.call_count) + + def test_get_metadata_returns_valid_value(self): + client = self._get_client() + value = client.get('some_key') + self.assertEqual(self.metadata_value, value) + + def test_get_metadata_throws_exception_for_incorrect_length(self): + self.response_parts['length'] = 0 + client = self._get_client() + self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, + client.get, 'some_key') + + def test_get_metadata_throws_exception_for_incorrect_crc(self): + self.response_parts['crc'] = 'deadbeef' + client = self._get_client() + self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, + client.get, 'some_key') + + def test_get_metadata_throws_exception_for_request_id_mismatch(self): + self.response_parts['request_id'] = 'deadbeef' + client = self._get_client() + client._checksum = lambda _: self.response_parts['crc'] + self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, + client.get, 'some_key') + + def test_get_metadata_returns_None_if_value_not_found(self): + self.response_parts['payload'] = '' + self.response_parts['command'] = 'NOTFOUND' + self.response_parts['length'] = NOTFOUND_LEN + client = self._get_client() + client._checksum = lambda _: self.response_parts['crc'] + self.assertIsNone(client.get('some_key')) + + def test_negotiate(self): + client = self._get_client() + reader = ShortReader(self.v2_ok) + client.fp.read.side_effect = reader.read + client._negotiate() + self.assertTrue(reader.emptied) + + def test_negotiate_short_response(self): + client = self._get_client() + # chopped '\n' from v2_ok. + reader = ShortReader(self.v2_ok[:-1] + b'\0') + client.fp.read.side_effect = reader.read + self.assertRaises(DataSourceSmartOS.JoyentMetadataTimeoutException, + client._negotiate) + self.assertTrue(reader.emptied) + + def test_negotiate_bad_response(self): + client = self._get_client() + reader = ShortReader(b'garbage\n' + self.v2_ok) + client.fp.read.side_effect = reader.read + self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, + client._negotiate) + self.assertEqual(self.v2_ok, client.fp.read()) + + def test_serial_open_transport(self): + client = self._get_serial_client() + reader = ShortReader(b'garbage\0' + self.invalid + self.v2_ok) + client.fp.read.side_effect = reader.read + client.open_transport() + self.assertTrue(reader.emptied) + + def test_flush_failure(self): + client = self._get_serial_client() + reader = ShortReader(b'garbage' + b'\0' + self.failure + + self.invalid + self.v2_ok) + client.fp.read.side_effect = reader.read + client.open_transport() + self.assertTrue(reader.emptied) + + def test_flush_many_timeouts(self): + client = self._get_serial_client() + reader = ShortReader(b'\0' * 100 + self.invalid + self.v2_ok) + client.fp.read.side_effect = reader.read + client.open_transport() + self.assertTrue(reader.emptied) + + def test_list_metadata_returns_list(self): + parts = ['foo', 'bar'] + value = b64e('\n'.join(parts)) + self.response_parts['payload'] = value + self.response_parts['crc'] = '40873553' + self.response_parts['length'] = SUCCESS_LEN + len(value) + client = self._get_client() + self.assertEqual(client.list(), parts) + + def test_list_metadata_returns_empty_list_if_no_customer_metadata(self): + del self.response_parts['payload'] + self.response_parts['length'] = SUCCESS_LEN - 1 + self.response_parts['crc'] = '14e563ba' + client = self._get_client() + self.assertEqual(client.list(), []) + + +class TestNetworkConversion(CiTestCase): + def test_convert_simple(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.102/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:f5:e4:f5'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'static', + 'address': '192.168.128.93/22'}], + 'mtu': 8500, 'mac_address': '90:b8:d0:a5:ff:cd'}]} + found = convert_net(SDC_NICS) + self.assertEqual(expected, found) + + def test_convert_simple_alt(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.51/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'static', + 'address': '10.210.1.217/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} + found = convert_net(SDC_NICS_ALT) + self.assertEqual(expected, found) + + def test_convert_simple_dhcp(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.51/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'dhcp4'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} + found = convert_net(SDC_NICS_DHCP) + self.assertEqual(expected, found) + + def test_convert_simple_multi_ip(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.51/24'}, + {'type': 'static', + 'address': '8.12.42.52/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'static', + 'address': '10.210.1.217/24'}, + {'type': 'static', + 'address': '10.210.1.151/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} + found = convert_net(SDC_NICS_MIP) + self.assertEqual(expected, found) + + def test_convert_with_dns(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.51/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'dhcp4'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}, + {'type': 'nameserver', + 'address': ['8.8.8.8', '8.8.8.1'], 'search': ["local"]}]} + found = convert_net( + network_data=SDC_NICS_DHCP, dns_servers=['8.8.8.8', '8.8.8.1'], + dns_domain="local") + self.assertEqual(expected, found) + + def test_convert_simple_multi_ipv6(self): + expected = { + 'version': 1, + 'config': [ + {'name': 'net0', 'type': 'physical', + 'subnets': [{'type': 'static', 'address': + '2001:4800:78ff:1b:be76:4eff:fe06:96b3/64'}, + {'type': 'static', 'gateway': '8.12.42.1', + 'address': '8.12.42.51/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, + {'name': 'net1', 'type': 'physical', + 'subnets': [{'type': 'static', + 'address': '10.210.1.217/24'}], + 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} + found = convert_net(SDC_NICS_MIP_IPV6) + self.assertEqual(expected, found) + + def test_convert_simple_both_ipv4_ipv6(self): + expected = { + 'version': 1, + 'config': [ + {'mac_address': '90:b8:d0:ae:64:51', 'mtu': 1500, + 'name': 'net0', 'type': 'physical', + 'subnets': [{'address': '2001::10/64', 'gateway': '2001::1', + 'type': 'static'}, + {'address': '8.12.42.51/24', + 'gateway': '8.12.42.1', + 'type': 'static'}, + {'address': '2001::11/64', 'type': 'static'}, + {'address': '8.12.42.52/32', 'type': 'static'}]}, + {'mac_address': '90:b8:d0:bd:4f:9c', 'mtu': 1500, + 'name': 'net1', 'type': 'physical', + 'subnets': [{'address': '10.210.1.217/24', + 'type': 'static'}]}]} + found = convert_net(SDC_NICS_IPV4_IPV6) + self.assertEqual(expected, found) + + def test_gateways_not_on_all_nics(self): + expected = { + 'version': 1, + 'config': [ + {'mac_address': '90:b8:d0:d8:82:b4', 'mtu': 1500, + 'name': 'net0', 'type': 'physical', + 'subnets': [{'address': '8.12.42.26/24', + 'gateway': '8.12.42.1', 'type': 'static'}]}, + {'mac_address': '90:b8:d0:0a:51:31', 'mtu': 1500, + 'name': 'net1', 'type': 'physical', + 'subnets': [{'address': '10.210.1.27/24', + 'type': 'static'}]}]} + found = convert_net(SDC_NICS_SINGLE_GATEWAY) + self.assertEqual(expected, found) + + def test_routes_on_all_nics(self): + routes = [ + {'linklocal': False, 'dst': '3.0.0.0/8', 'gateway': '8.12.42.3'}, + {'linklocal': False, 'dst': '4.0.0.0/8', 'gateway': '10.210.1.4'}] + expected = { + 'version': 1, + 'config': [ + {'mac_address': '90:b8:d0:d8:82:b4', 'mtu': 1500, + 'name': 'net0', 'type': 'physical', + 'subnets': [{'address': '8.12.42.26/24', + 'gateway': '8.12.42.1', 'type': 'static', + 'routes': [{'network': '3.0.0.0/8', + 'gateway': '8.12.42.3'}, + {'network': '4.0.0.0/8', + 'gateway': '10.210.1.4'}]}]}, + {'mac_address': '90:b8:d0:0a:51:31', 'mtu': 1500, + 'name': 'net1', 'type': 'physical', + 'subnets': [{'address': '10.210.1.27/24', 'type': 'static', + 'routes': [{'network': '3.0.0.0/8', + 'gateway': '8.12.42.3'}, + {'network': '4.0.0.0/8', + 'gateway': '10.210.1.4'}]}]}]} + found = convert_net(SDC_NICS_SINGLE_GATEWAY, routes=routes) + self.maxDiff = None + self.assertEqual(expected, found) + + +@unittest.skipUnless(get_smartos_environ() == SMARTOS_ENV_KVM, + "Only supported on KVM and bhyve guests under SmartOS") +@unittest.skipUnless(os.access(SERIAL_DEVICE, os.W_OK), + "Requires write access to " + SERIAL_DEVICE) +@unittest.skipUnless(HAS_PYSERIAL is True, "pyserial not available") +class TestSerialConcurrency(CiTestCase): + """ + This class tests locking on an actual serial port, and as such can only + be run in a kvm or bhyve guest running on a SmartOS host. A test run on + a metadata socket will not be valid because a metadata socket ensures + there is only one session over a connection. In contrast, in the + absence of proper locking multiple processes opening the same serial + port can corrupt each others' exchanges with the metadata server. + + This takes on the order of 2 to 3 minutes to run. + """ + allowed_subp = ['mdata-get'] + + def setUp(self): + self.mdata_proc = multiprocessing.Process(target=self.start_mdata_loop) + self.mdata_proc.start() + super(TestSerialConcurrency, self).setUp() + + def tearDown(self): + # os.kill() rather than mdata_proc.terminate() to avoid console spam. + os.kill(self.mdata_proc.pid, signal.SIGKILL) + self.mdata_proc.join() + super(TestSerialConcurrency, self).tearDown() + + def start_mdata_loop(self): + """ + The mdata-get command is repeatedly run in a separate process so + that it may try to race with metadata operations performed in the + main test process. Use of mdata-get is better than two processes + using the protocol implementation in DataSourceSmartOS because we + are testing to be sure that cloud-init and mdata-get respect each + others locks. + """ + rcs = list(range(0, 256)) + while True: + subp(['mdata-get', 'sdc:routes'], rcs=rcs) + + def test_all_keys(self): + self.assertIsNotNone(self.mdata_proc.pid) + ds = DataSourceSmartOS + keys = [tup[0] for tup in ds.SMARTOS_ATTRIB_MAP.values()] + keys.extend(ds.SMARTOS_ATTRIB_JSON.values()) + + client = ds.jmc_client_factory(smartos_type=SMARTOS_ENV_KVM) + self.assertIsNotNone(client) + + # The behavior that we are testing for was observed mdata-get running + # 10 times at roughly the same time as cloud-init fetched each key + # once. cloud-init would regularly see failures before making it + # through all keys once. + for _ in range(0, 3): + for key in keys: + # We don't care about the return value, just that it doesn't + # thrown any exceptions. + client.get(key) + + self.assertIsNone(self.mdata_proc.exitcode) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_upcloud.py b/tests/unittests/sources/test_upcloud.py new file mode 100644 index 00000000..1d792066 --- /dev/null +++ b/tests/unittests/sources/test_upcloud.py @@ -0,0 +1,314 @@ +# Author: Antti Myyrä +# +# This file is part of cloud-init. See LICENSE file for license information. + +import json + +from cloudinit import helpers +from cloudinit import settings +from cloudinit import sources +from cloudinit.sources.DataSourceUpCloud import DataSourceUpCloud, \ + DataSourceUpCloudLocal + +from tests.unittests.helpers import mock, CiTestCase + +UC_METADATA = json.loads(""" +{ + "cloud_name": "upcloud", + "instance_id": "00322b68-0096-4042-9406-faad61922128", + "hostname": "test.example.com", + "platform": "servers", + "subplatform": "metadata (http://169.254.169.254)", + "public_keys": [ + "ssh-rsa AAAAB.... test1@example.com", + "ssh-rsa AAAAB.... test2@example.com" + ], + "region": "fi-hel2", + "network": { + "interfaces": [ + { + "index": 1, + "ip_addresses": [ + { + "address": "94.237.105.53", + "dhcp": true, + "dns": [ + "94.237.127.9", + "94.237.40.9" + ], + "family": "IPv4", + "floating": false, + "gateway": "94.237.104.1", + "network": "94.237.104.0/22" + }, + { + "address": "94.237.105.50", + "dhcp": false, + "dns": null, + "family": "IPv4", + "floating": true, + "gateway": "", + "network": "94.237.105.50/32" + } + ], + "mac": "3a:d6:ba:4a:36:e7", + "network_id": "031457f4-0f8c-483c-96f2-eccede02909c", + "type": "public" + }, + { + "index": 2, + "ip_addresses": [ + { + "address": "10.6.3.27", + "dhcp": true, + "dns": null, + "family": "IPv4", + "floating": false, + "gateway": "10.6.0.1", + "network": "10.6.0.0/22" + } + ], + "mac": "3a:d6:ba:4a:84:cc", + "network_id": "03d82553-5bea-4132-b29a-e1cf67ec2dd1", + "type": "utility" + }, + { + "index": 3, + "ip_addresses": [ + { + "address": "2a04:3545:1000:720:38d6:baff:fe4a:63e7", + "dhcp": true, + "dns": [ + "2a04:3540:53::1", + "2a04:3544:53::1" + ], + "family": "IPv6", + "floating": false, + "gateway": "2a04:3545:1000:720::1", + "network": "2a04:3545:1000:720::/64" + } + ], + "mac": "3a:d6:ba:4a:63:e7", + "network_id": "03000000-0000-4000-8046-000000000000", + "type": "public" + }, + { + "index": 4, + "ip_addresses": [ + { + "address": "172.30.1.10", + "dhcp": true, + "dns": null, + "family": "IPv4", + "floating": false, + "gateway": "172.30.1.1", + "network": "172.30.1.0/24" + } + ], + "mac": "3a:d6:ba:4a:8a:e1", + "network_id": "035a0a4a-7704-4de5-820d-189fc8132714", + "type": "private" + } + ], + "dns": [ + "94.237.127.9", + "94.237.40.9" + ] + }, + "storage": { + "disks": [ + { + "id": "014efb65-223b-4d44-8f0a-c29535b88dcf", + "serial": "014efb65223b4d448f0a", + "size": 10240, + "type": "disk", + "tier": "maxiops" + } + ] + }, + "tags": [], + "user_data": "", + "vendor_data": "" +} +""") + +UC_METADATA["user_data"] = b"""#cloud-config +runcmd: +- [touch, /root/cloud-init-worked ] +""" + +MD_URL = 'http://169.254.169.254/metadata/v1.json' + + +def _mock_dmi(): + return True, "00322b68-0096-4042-9406-faad61922128" + + +class TestUpCloudMetadata(CiTestCase): + """ + Test reading the meta-data + """ + def setUp(self): + super(TestUpCloudMetadata, self).setUp() + self.tmp = self.tmp_dir() + + def get_ds(self, get_sysinfo=_mock_dmi): + ds = DataSourceUpCloud( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + if get_sysinfo: + ds._get_sysinfo = get_sysinfo + return ds + + @mock.patch('cloudinit.sources.helpers.upcloud.read_sysinfo') + def test_returns_false_not_on_upcloud(self, m_read_sysinfo): + m_read_sysinfo.return_value = (False, None) + ds = self.get_ds(get_sysinfo=None) + self.assertEqual(False, ds.get_data()) + self.assertTrue(m_read_sysinfo.called) + + @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata') + def test_metadata(self, mock_readmd): + mock_readmd.return_value = UC_METADATA.copy() + + ds = self.get_ds() + ds.perform_dhcp_setup = False + + ret = ds.get_data() + self.assertTrue(ret) + + self.assertTrue(mock_readmd.called) + + self.assertEqual(UC_METADATA.get('user_data'), ds.get_userdata_raw()) + self.assertEqual(UC_METADATA.get('vendor_data'), + ds.get_vendordata_raw()) + self.assertEqual(UC_METADATA.get('region'), ds.availability_zone) + self.assertEqual(UC_METADATA.get('instance_id'), ds.get_instance_id()) + self.assertEqual(UC_METADATA.get('cloud_name'), ds.cloud_name) + + self.assertEqual(UC_METADATA.get('public_keys'), + ds.get_public_ssh_keys()) + self.assertIsInstance(ds.get_public_ssh_keys(), list) + + +class TestUpCloudNetworkSetup(CiTestCase): + """ + Test reading the meta-data on networked context + """ + + def setUp(self): + super(TestUpCloudNetworkSetup, self).setUp() + self.tmp = self.tmp_dir() + + def get_ds(self, get_sysinfo=_mock_dmi): + ds = DataSourceUpCloudLocal( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + if get_sysinfo: + ds._get_sysinfo = get_sysinfo + return ds + + @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata') + @mock.patch('cloudinit.net.find_fallback_nic') + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') + def test_network_configured_metadata(self, m_net, m_dhcp, + m_fallback_nic, mock_readmd): + mock_readmd.return_value = UC_METADATA.copy() + + m_fallback_nic.return_value = 'eth1' + m_dhcp.return_value = [{ + 'interface': 'eth1', 'fixed-address': '10.6.3.27', + 'routers': '10.6.0.1', 'subnet-mask': '22', + 'broadcast-address': '10.6.3.255'} + ] + + ds = self.get_ds() + + ret = ds.get_data() + self.assertTrue(ret) + + self.assertTrue(m_dhcp.called) + m_dhcp.assert_called_with('eth1', None) + + m_net.assert_called_once_with( + broadcast='10.6.3.255', interface='eth1', + ip='10.6.3.27', prefix_or_mask='22', + router='10.6.0.1', static_routes=None + ) + + self.assertTrue(mock_readmd.called) + + self.assertEqual(UC_METADATA.get('region'), ds.availability_zone) + self.assertEqual(UC_METADATA.get('instance_id'), ds.get_instance_id()) + self.assertEqual(UC_METADATA.get('cloud_name'), ds.cloud_name) + + @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata') + @mock.patch('cloudinit.net.get_interfaces_by_mac') + def test_network_configuration(self, m_get_by_mac, mock_readmd): + mock_readmd.return_value = UC_METADATA.copy() + + raw_ifaces = UC_METADATA.get('network').get('interfaces') + self.assertEqual(4, len(raw_ifaces)) + + m_get_by_mac.return_value = { + raw_ifaces[0].get('mac'): 'eth0', + raw_ifaces[1].get('mac'): 'eth1', + raw_ifaces[2].get('mac'): 'eth2', + raw_ifaces[3].get('mac'): 'eth3', + } + + ds = self.get_ds() + ds.perform_dhcp_setup = False + + ret = ds.get_data() + self.assertTrue(ret) + + self.assertTrue(mock_readmd.called) + + netcfg = ds.network_config + + self.assertEqual(1, netcfg.get('version')) + + config = netcfg.get('config') + self.assertIsInstance(config, list) + self.assertEqual(5, len(config)) + self.assertEqual('physical', config[3].get('type')) + + self.assertEqual(raw_ifaces[2].get('mac'), config[2] + .get('mac_address')) + self.assertEqual(1, len(config[2].get('subnets'))) + self.assertEqual('ipv6_dhcpv6-stateless', config[2].get('subnets')[0] + .get('type')) + + self.assertEqual(2, len(config[0].get('subnets'))) + self.assertEqual('static', config[0].get('subnets')[1].get('type')) + + dns = config[4] + self.assertEqual('nameserver', dns.get('type')) + self.assertEqual(2, len(dns.get('address'))) + self.assertEqual( + UC_METADATA.get('network').get('dns')[1], + dns.get('address')[1] + ) + + +class TestUpCloudDatasourceLoading(CiTestCase): + def test_get_datasource_list_returns_in_local(self): + deps = (sources.DEP_FILESYSTEM, ) + ds_list = sources.DataSourceUpCloud.get_datasource_list(deps) + self.assertEqual(ds_list, + [DataSourceUpCloudLocal]) + + def test_get_datasource_list_returns_in_normal(self): + deps = (sources.DEP_FILESYSTEM, sources.DEP_NETWORK) + ds_list = sources.DataSourceUpCloud.get_datasource_list(deps) + self.assertEqual(ds_list, + [DataSourceUpCloud]) + + def test_list_sources_finds_ds(self): + found = sources.list_sources( + ['UpCloud'], (sources.DEP_FILESYSTEM, sources.DEP_NETWORK), + ['cloudinit.sources']) + self.assertEqual([DataSourceUpCloud], + found) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_vmware.py b/tests/unittests/sources/test_vmware.py new file mode 100644 index 00000000..d34d7782 --- /dev/null +++ b/tests/unittests/sources/test_vmware.py @@ -0,0 +1,391 @@ +# Copyright (c) 2021 VMware, Inc. All Rights Reserved. +# +# Authors: Andrew Kutz +# +# This file is part of cloud-init. See LICENSE file for license information. + +import base64 +import gzip +import os + +import pytest + +from cloudinit import dmi, helpers, safeyaml +from cloudinit import settings +from cloudinit.sources import DataSourceVMware +from tests.unittests.helpers import ( + mock, + CiTestCase, + FilesystemMockingTestCase, + populate_dir, +) + + +PRODUCT_NAME_FILE_PATH = "/sys/class/dmi/id/product_name" +PRODUCT_NAME = "VMware7,1" +PRODUCT_UUID = "82343CED-E4C7-423B-8F6B-0D34D19067AB" +REROOT_FILES = { + DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, + PRODUCT_NAME_FILE_PATH: PRODUCT_NAME, +} + +VMW_MULTIPLE_KEYS = [ + "ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@vmw.com", + "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@vmw.com", +] +VMW_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@vmw.com" + +VMW_METADATA_YAML = """instance-id: cloud-vm +local-hostname: cloud-vm +network: + version: 2 + ethernets: + nics: + match: + name: ens* + dhcp4: yes +""" + +VMW_USERDATA_YAML = """## template: jinja +#cloud-config +users: +- default +""" + +VMW_VENDORDATA_YAML = """## template: jinja +#cloud-config +runcmd: +- echo "Hello, world." +""" + + +@pytest.yield_fixture(autouse=True) +def common_patches(): + with mock.patch('cloudinit.util.platform.platform', return_value='Linux'): + with mock.patch.multiple( + 'cloudinit.dmi', + is_container=mock.Mock(return_value=False), + is_FreeBSD=mock.Mock(return_value=False) + ): + yield + + +class TestDataSourceVMware(CiTestCase): + """ + Test common functionality that is not transport specific. + """ + + def setUp(self): + super(TestDataSourceVMware, self).setUp() + self.tmp = self.tmp_dir() + + def test_no_data_access_method(self): + ds = get_ds(self.tmp) + ds.vmware_rpctool = None + ret = ds.get_data() + self.assertFalse(ret) + + def test_get_host_info(self): + host_info = DataSourceVMware.get_host_info() + self.assertTrue(host_info) + self.assertTrue(host_info["hostname"]) + self.assertTrue(host_info["local-hostname"]) + self.assertTrue(host_info["local_hostname"]) + self.assertTrue(host_info[DataSourceVMware.LOCAL_IPV4]) + + +class TestDataSourceVMwareEnvVars(FilesystemMockingTestCase): + """ + Test the envvar transport. + """ + + def setUp(self): + super(TestDataSourceVMwareEnvVars, self).setUp() + self.tmp = self.tmp_dir() + os.environ[DataSourceVMware.VMX_GUESTINFO] = "1" + self.create_system_files() + + def tearDown(self): + del os.environ[DataSourceVMware.VMX_GUESTINFO] + return super(TestDataSourceVMwareEnvVars, self).tearDown() + + def create_system_files(self): + rootd = self.tmp_dir() + populate_dir( + rootd, + { + DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, + }, + ) + self.assertTrue(self.reRoot(rootd)) + + def assert_get_data_ok(self, m_fn, m_fn_call_count=6): + ds = get_ds(self.tmp) + ds.vmware_rpctool = None + ret = ds.get_data() + self.assertTrue(ret) + self.assertEqual(m_fn_call_count, m_fn.call_count) + self.assertEqual( + ds.data_access_method, DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR + ) + return ds + + def assert_metadata(self, metadata, m_fn, m_fn_call_count=6): + ds = self.assert_get_data_ok(m_fn, m_fn_call_count) + assert_metadata(self, ds, metadata) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_subplatform(self, m_fn): + m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] + ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4) + self.assertEqual( + ds.subplatform, + "%s (%s)" + % ( + DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR, + DataSourceVMware.get_guestinfo_envvar_key_name("metadata"), + ), + ) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_metadata_only(self, m_fn): + m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_userdata_only(self, m_fn): + m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_vendordata_only(self, m_fn): + m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_metadata_base64(self, m_fn): + data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) + m_fn.side_effect = [data, "base64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_metadata_b64(self, m_fn): + data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) + m_fn.side_effect = [data, "b64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_metadata_gzip_base64(self, m_fn): + data = VMW_METADATA_YAML.encode("utf-8") + data = gzip.compress(data) + data = base64.b64encode(data) + m_fn.side_effect = [data, "gzip+base64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_metadata_gz_b64(self, m_fn): + data = VMW_METADATA_YAML.encode("utf-8") + data = gzip.compress(data) + data = base64.b64encode(data) + m_fn.side_effect = [data, "gz+b64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_metadata_single_ssh_key(self, m_fn): + metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) + metadata["public_keys"] = VMW_SINGLE_KEY + metadata_yaml = safeyaml.dumps(metadata) + m_fn.side_effect = [metadata_yaml, "", "", ""] + self.assert_metadata(metadata, m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_metadata_multiple_ssh_keys(self, m_fn): + metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) + metadata["public_keys"] = VMW_MULTIPLE_KEYS + metadata_yaml = safeyaml.dumps(metadata) + m_fn.side_effect = [metadata_yaml, "", "", ""] + self.assert_metadata(metadata, m_fn, m_fn_call_count=4) + + +class TestDataSourceVMwareGuestInfo(FilesystemMockingTestCase): + """ + Test the guestinfo transport on a VMware platform. + """ + + def setUp(self): + super(TestDataSourceVMwareGuestInfo, self).setUp() + self.tmp = self.tmp_dir() + self.create_system_files() + + def create_system_files(self): + rootd = self.tmp_dir() + populate_dir( + rootd, + { + DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, + PRODUCT_NAME_FILE_PATH: PRODUCT_NAME, + }, + ) + self.assertTrue(self.reRoot(rootd)) + + def assert_get_data_ok(self, m_fn, m_fn_call_count=6): + ds = get_ds(self.tmp) + ds.vmware_rpctool = "vmware-rpctool" + ret = ds.get_data() + self.assertTrue(ret) + self.assertEqual(m_fn_call_count, m_fn.call_count) + self.assertEqual( + ds.data_access_method, + DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO, + ) + return ds + + def assert_metadata(self, metadata, m_fn, m_fn_call_count=6): + ds = self.assert_get_data_ok(m_fn, m_fn_call_count) + assert_metadata(self, ds, metadata) + + def test_ds_valid_on_vmware_platform(self): + system_type = dmi.read_dmi_data("system-product-name") + self.assertEqual(system_type, PRODUCT_NAME) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_subplatform(self, m_fn): + m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] + ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4) + self.assertEqual( + ds.subplatform, + "%s (%s)" + % ( + DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO, + DataSourceVMware.get_guestinfo_key_name("metadata"), + ), + ) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_userdata_only(self, m_fn): + m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_vendordata_only(self, m_fn): + m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_metadata_single_ssh_key(self, m_fn): + metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) + metadata["public_keys"] = VMW_SINGLE_KEY + metadata_yaml = safeyaml.dumps(metadata) + m_fn.side_effect = [metadata_yaml, "", "", ""] + self.assert_metadata(metadata, m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_metadata_multiple_ssh_keys(self, m_fn): + metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) + metadata["public_keys"] = VMW_MULTIPLE_KEYS + metadata_yaml = safeyaml.dumps(metadata) + m_fn.side_effect = [metadata_yaml, "", "", ""] + self.assert_metadata(metadata, m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_metadata_base64(self, m_fn): + data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) + m_fn.side_effect = [data, "base64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_metadata_b64(self, m_fn): + data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) + m_fn.side_effect = [data, "b64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_metadata_gzip_base64(self, m_fn): + data = VMW_METADATA_YAML.encode("utf-8") + data = gzip.compress(data) + data = base64.b64encode(data) + m_fn.side_effect = [data, "gzip+base64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_metadata_gz_b64(self, m_fn): + data = VMW_METADATA_YAML.encode("utf-8") + data = gzip.compress(data) + data = base64.b64encode(data) + m_fn.side_effect = [data, "gz+b64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + +class TestDataSourceVMwareGuestInfo_InvalidPlatform(FilesystemMockingTestCase): + """ + Test the guestinfo transport on a non-VMware platform. + """ + + def setUp(self): + super(TestDataSourceVMwareGuestInfo_InvalidPlatform, self).setUp() + self.tmp = self.tmp_dir() + self.create_system_files() + + def create_system_files(self): + rootd = self.tmp_dir() + populate_dir( + rootd, + { + DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, + }, + ) + self.assertTrue(self.reRoot(rootd)) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_ds_invalid_on_non_vmware_platform(self, m_fn): + system_type = dmi.read_dmi_data("system-product-name") + self.assertEqual(system_type, None) + + m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] + ds = get_ds(self.tmp) + ds.vmware_rpctool = "vmware-rpctool" + ret = ds.get_data() + self.assertFalse(ret) + + +def assert_metadata(test_obj, ds, metadata): + test_obj.assertEqual(metadata.get("instance-id"), ds.get_instance_id()) + test_obj.assertEqual(metadata.get("local-hostname"), ds.get_hostname()) + + expected_public_keys = metadata.get("public_keys") + if not isinstance(expected_public_keys, list): + expected_public_keys = [expected_public_keys] + + test_obj.assertEqual(expected_public_keys, ds.get_public_ssh_keys()) + test_obj.assertIsInstance(ds.get_public_ssh_keys(), list) + + +def get_ds(temp_dir): + ds = DataSourceVMware.DataSourceVMware( + settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": temp_dir}) + ) + ds.vmware_rpctool = "vmware-rpctool" + return ds + + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/test_vultr.py b/tests/unittests/sources/test_vultr.py new file mode 100644 index 00000000..40594b95 --- /dev/null +++ b/tests/unittests/sources/test_vultr.py @@ -0,0 +1,337 @@ +# Author: Eric Benner +# +# This file is part of cloud-init. See LICENSE file for license information. + +# Vultr Metadata API: +# https://www.vultr.com/metadata/ + +import json + +from cloudinit import helpers +from cloudinit import settings +from cloudinit.sources import DataSourceVultr +from cloudinit.sources.helpers import vultr + +from tests.unittests.helpers import mock, CiTestCase + +# Vultr metadata test data +VULTR_V1_1 = { + 'bgp': { + 'ipv4': { + 'my-address': '', + 'my-asn': '', + 'peer-address': '', + 'peer-asn': '' + }, + 'ipv6': { + 'my-address': '', + 'my-asn': '', + 'peer-address': '', + 'peer-asn': '' + } + }, + 'hostname': 'CLOUDINIT_1', + 'instanceid': '42506325', + 'interfaces': [ + { + 'ipv4': { + 'additional': [ + ], + 'address': '108.61.89.242', + 'gateway': '108.61.89.1', + 'netmask': '255.255.255.0' + }, + 'ipv6': { + 'additional': [ + ], + 'address': '2001:19f0:5:56c2:5400:03ff:fe15:c465', + 'network': '2001:19f0:5:56c2::', + 'prefix': '64' + }, + 'mac': '56:00:03:15:c4:65', + 'network-type': 'public' + } + ], + 'public-keys': [ + 'ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key' + ], + 'region': { + 'regioncode': 'EWR' + }, + 'user-defined': [ + ], + 'startup-script': 'echo No configured startup script', + 'raid1-script': '', + 'user-data': [ + ], + 'vendor-data': [ + { + 'package_upgrade': 'true', + 'disable_root': 0, + 'ssh_pwauth': 1, + 'chpasswd': { + 'expire': False, + 'list': [ + 'root:$6$S2Smuj.../VqxmIR9Urw0jPZ88i4yvB/' + ] + }, + 'system_info': { + 'default_user': { + 'name': 'root' + } + } + } + ] +} + +VULTR_V1_2 = { + 'bgp': { + 'ipv4': { + 'my-address': '', + 'my-asn': '', + 'peer-address': '', + 'peer-asn': '' + }, + 'ipv6': { + 'my-address': '', + 'my-asn': '', + 'peer-address': '', + 'peer-asn': '' + } + }, + 'hostname': 'CLOUDINIT_2', + 'instance-v2-id': '29bea708-2e6e-480a-90ad-0e6b5d5ad62f', + 'instanceid': '42872224', + 'interfaces': [ + { + 'ipv4': { + 'additional': [ + ], + 'address':'45.76.7.171', + 'gateway':'45.76.6.1', + 'netmask':'255.255.254.0' + }, + 'ipv6':{ + 'additional': [ + ], + 'address':'2001:19f0:5:28a7:5400:03ff:fe1b:4eca', + 'network':'2001:19f0:5:28a7::', + 'prefix':'64' + }, + 'mac':'56:00:03:1b:4e:ca', + 'network-type':'public' + }, + { + 'ipv4': { + 'additional': [ + ], + 'address':'10.1.112.3', + 'gateway':'', + 'netmask':'255.255.240.0' + }, + 'ipv6':{ + 'additional': [ + ], + 'network':'', + 'prefix':'' + }, + 'mac':'5a:00:03:1b:4e:ca', + 'network-type':'private', + 'network-v2-id':'fbbe2b5b-b986-4396-87f5-7246660ccb64', + 'networkid':'net5e7155329d730' + } + ], + 'public-keys': [ + 'ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key' + ], + 'region': { + 'regioncode': 'EWR' + }, + 'user-defined': [ + ], + 'startup-script': 'echo No configured startup script', + 'user-data': [ + ], + + 'vendor-data': [ + { + 'package_upgrade': 'true', + 'disable_root': 0, + 'ssh_pwauth': 1, + 'chpasswd': { + 'expire': False, + 'list': [ + 'root:$6$SxXx...k2mJNIzZB5vMCDBlYT1' + ] + }, + 'system_info': { + 'default_user': { + 'name': 'root' + } + } + } + ] +} + +SSH_KEYS_1 = [ + "ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key" +] + +# Expected generated objects + +# Expected config +EXPECTED_VULTR_CONFIG = { + 'package_upgrade': 'true', + 'disable_root': 0, + 'ssh_pwauth': 1, + 'chpasswd': { + 'expire': False, + 'list': [ + 'root:$6$SxXx...k2mJNIzZB5vMCDBlYT1' + ] + }, + 'system_info': { + 'default_user': { + 'name': 'root' + } + } +} + +# Expected network config object from generator +EXPECTED_VULTR_NETWORK_1 = { + 'version': 1, + 'config': [ + { + 'type': 'nameserver', + 'address': ['108.61.10.10'] + }, + { + 'name': 'eth0', + 'type': 'physical', + 'mac_address': '56:00:03:15:c4:65', + 'accept-ra': 1, + 'subnets': [ + {'type': 'dhcp', 'control': 'auto'}, + {'type': 'ipv6_slaac', 'control': 'auto'} + ], + } + ] +} + +EXPECTED_VULTR_NETWORK_2 = { + 'version': 1, + 'config': [ + { + 'type': 'nameserver', + 'address': ['108.61.10.10'] + }, + { + 'name': 'eth0', + 'type': 'physical', + 'mac_address': '56:00:03:1b:4e:ca', + 'accept-ra': 1, + 'subnets': [ + {'type': 'dhcp', 'control': 'auto'}, + {'type': 'ipv6_slaac', 'control': 'auto'} + ], + }, + { + 'name': 'eth1', + 'type': 'physical', + 'mac_address': '5a:00:03:1b:4e:ca', + 'subnets': [ + { + "type": "static", + "control": "auto", + "address": "10.1.112.3", + "netmask": "255.255.240.0" + } + ], + } + ] +} + + +INTERFACE_MAP = { + '56:00:03:15:c4:65': 'eth0', + '56:00:03:1b:4e:ca': 'eth0', + '5a:00:03:1b:4e:ca': 'eth1' +} + + +class TestDataSourceVultr(CiTestCase): + def setUp(self): + super(TestDataSourceVultr, self).setUp() + + # Stored as a dict to make it easier to maintain + raw1 = json.dumps(VULTR_V1_1['vendor-data'][0]) + raw2 = json.dumps(VULTR_V1_2['vendor-data'][0]) + + # Make expected format + VULTR_V1_1['vendor-data'] = [raw1] + VULTR_V1_2['vendor-data'] = [raw2] + + self.tmp = self.tmp_dir() + + # Test the datasource itself + @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch('cloudinit.sources.helpers.vultr.is_vultr') + @mock.patch('cloudinit.sources.helpers.vultr.get_metadata') + def test_datasource(self, + mock_getmeta, + mock_isvultr, + mock_netmap): + mock_getmeta.return_value = VULTR_V1_2 + mock_isvultr.return_value = True + mock_netmap.return_value = INTERFACE_MAP + + source = DataSourceVultr.DataSourceVultr( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + + # Test for failure + self.assertEqual(True, source._get_data()) + + # Test instance id + self.assertEqual("42872224", source.metadata['instanceid']) + + # Test hostname + self.assertEqual("CLOUDINIT_2", source.metadata['local-hostname']) + + # Test ssh keys + self.assertEqual(SSH_KEYS_1, source.metadata['public-keys']) + + # Test vendor data generation + orig_val = self.maxDiff + self.maxDiff = None + + vendordata = source.vendordata_raw + + # Test vendor config + self.assertEqual( + EXPECTED_VULTR_CONFIG, + json.loads(vendordata[0].replace("#cloud-config", ""))) + + self.maxDiff = orig_val + + # Test network config generation + self.assertEqual(EXPECTED_VULTR_NETWORK_2, source.network_config) + + # Test network config generation + @mock.patch('cloudinit.net.get_interfaces_by_mac') + def test_network_config(self, mock_netmap): + mock_netmap.return_value = INTERFACE_MAP + interf = VULTR_V1_1['interfaces'] + + self.assertEqual(EXPECTED_VULTR_NETWORK_1, + vultr.generate_network_config(interf)) + + # Test Private Networking config generation + @mock.patch('cloudinit.net.get_interfaces_by_mac') + def test_private_network_config(self, mock_netmap): + mock_netmap.return_value = INTERFACE_MAP + interf = VULTR_V1_2['interfaces'] + + self.assertEqual(EXPECTED_VULTR_NETWORK_2, + vultr.generate_network_config(interf)) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/vmware/__init__.py b/tests/unittests/sources/vmware/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/sources/vmware/test_custom_script.py b/tests/unittests/sources/vmware/test_custom_script.py new file mode 100644 index 00000000..fcbb9cd5 --- /dev/null +++ b/tests/unittests/sources/vmware/test_custom_script.py @@ -0,0 +1,109 @@ +# Copyright (C) 2015 Canonical Ltd. +# Copyright (C) 2017-2019 VMware INC. +# +# Author: Maitreyee Saikia +# +# This file is part of cloud-init. See LICENSE file for license information. + +import os +import stat +from cloudinit import util +from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( + CustomScriptConstant, + CustomScriptNotFound, + PreCustomScript, + PostCustomScript, +) +from tests.unittests.helpers import CiTestCase, mock + + +class TestVmwareCustomScript(CiTestCase): + def setUp(self): + self.tmpDir = self.tmp_dir() + # Mock the tmpDir as the root dir in VM. + self.execDir = os.path.join(self.tmpDir, ".customization") + self.execScript = os.path.join(self.execDir, + ".customize.sh") + + def test_prepare_custom_script(self): + """ + This test is designed to verify the behavior based on the presence of + custom script. Mainly needed for scenario where a custom script is + expected, but was not properly copied. "CustomScriptNotFound" exception + is raised in such cases. + """ + # Custom script does not exist. + preCust = PreCustomScript("random-vmw-test", self.tmpDir) + self.assertEqual("random-vmw-test", preCust.scriptname) + self.assertEqual(self.tmpDir, preCust.directory) + self.assertEqual(self.tmp_path("random-vmw-test", self.tmpDir), + preCust.scriptpath) + with self.assertRaises(CustomScriptNotFound): + preCust.prepare_script() + + # Custom script exists. + custScript = self.tmp_path("test-cust", self.tmpDir) + util.write_file(custScript, "test-CR-strip\r\r") + with mock.patch.object(CustomScriptConstant, + "CUSTOM_TMP_DIR", + self.execDir): + with mock.patch.object(CustomScriptConstant, + "CUSTOM_SCRIPT", + self.execScript): + postCust = PostCustomScript("test-cust", + self.tmpDir, + self.tmpDir) + self.assertEqual("test-cust", postCust.scriptname) + self.assertEqual(self.tmpDir, postCust.directory) + self.assertEqual(custScript, postCust.scriptpath) + postCust.prepare_script() + + # Custom script is copied with exec privilege + self.assertTrue(os.path.exists(self.execScript)) + st = os.stat(self.execScript) + self.assertTrue(st.st_mode & stat.S_IEXEC) + with open(self.execScript, "r") as f: + content = f.read() + self.assertEqual(content, "test-CR-strip") + # Check if all carraige returns are stripped from script. + self.assertFalse("\r" in content) + + def test_execute_post_cust(self): + """ + This test is designed to verify the behavior after execute post + customization. + """ + # Prepare the customize package + postCustRun = self.tmp_path("post-customize-guest.sh", self.tmpDir) + util.write_file(postCustRun, "This is the script to run post cust") + userScript = self.tmp_path("test-cust", self.tmpDir) + util.write_file(userScript, "This is the post cust script") + + # Mock the cc_scripts_per_instance dir and marker file. + # Create another tmp dir for cc_scripts_per_instance. + ccScriptDir = self.tmp_dir() + ccScript = os.path.join(ccScriptDir, "post-customize-guest.sh") + markerFile = os.path.join(self.tmpDir, ".markerFile") + with mock.patch.object(CustomScriptConstant, + "CUSTOM_TMP_DIR", + self.execDir): + with mock.patch.object(CustomScriptConstant, + "CUSTOM_SCRIPT", + self.execScript): + with mock.patch.object(CustomScriptConstant, + "POST_CUSTOM_PENDING_MARKER", + markerFile): + postCust = PostCustomScript("test-cust", + self.tmpDir, + ccScriptDir) + postCust.execute() + # Check cc_scripts_per_instance and marker file + # are created. + self.assertTrue(os.path.exists(ccScript)) + with open(ccScript, "r") as f: + content = f.read() + self.assertEqual(content, + "This is the script to run post cust") + self.assertTrue(os.path.exists(markerFile)) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/vmware/test_guestcust_util.py b/tests/unittests/sources/vmware/test_guestcust_util.py new file mode 100644 index 00000000..9114f0b9 --- /dev/null +++ b/tests/unittests/sources/vmware/test_guestcust_util.py @@ -0,0 +1,98 @@ +# Copyright (C) 2019 Canonical Ltd. +# Copyright (C) 2019 VMware INC. +# +# Author: Xiaofeng Wang +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import subp +from cloudinit.sources.helpers.vmware.imc.config import Config +from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile +from cloudinit.sources.helpers.vmware.imc.guestcust_util import ( + get_tools_config, + set_gc_status, +) +from tests.unittests.helpers import CiTestCase, mock + + +class TestGuestCustUtil(CiTestCase): + def test_get_tools_config_not_installed(self): + """ + This test is designed to verify the behavior if vmware-toolbox-cmd + is not installed. + """ + with mock.patch.object(subp, 'which', return_value=None): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), 'defaultVal') + + def test_get_tools_config_internal_exception(self): + """ + This test is designed to verify the behavior if internal exception + is raised. + """ + with mock.patch.object(subp, 'which', return_value='/dummy/path'): + with mock.patch.object(subp, 'subp', + return_value=('key=value', b''), + side_effect=subp.ProcessExecutionError( + "subp failed", exit_code=99)): + # verify return value is 'defaultVal', not 'value'. + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'defaultVal') + + def test_get_tools_config_normal(self): + """ + This test is designed to verify the value could be parsed from + key = value of the given [section] + """ + with mock.patch.object(subp, 'which', return_value='/dummy/path'): + # value is not blank + with mock.patch.object(subp, 'subp', + return_value=('key = value ', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'value') + # value is blank + with mock.patch.object(subp, 'subp', + return_value=('key = ', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + '') + # value contains = + with mock.patch.object(subp, 'subp', + return_value=('key=Bar=Wark', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'Bar=Wark') + + # value contains specific characters + with mock.patch.object(subp, 'subp', + return_value=('[a] b.c_d=e-f', b'')): + self.assertEqual( + get_tools_config('section', 'key', 'defaultVal'), + 'e-f') + + def test_set_gc_status(self): + """ + This test is designed to verify the behavior of set_gc_status + """ + # config is None, return None + self.assertEqual(set_gc_status(None, 'Successful'), None) + + # post gc status is NO, return None + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + conf = Config(cf) + self.assertEqual(set_gc_status(conf, 'Successful'), None) + + # post gc status is YES, subp is called to execute command + cf._insertKey("MISC|POST-GC-STATUS", "YES") + conf = Config(cf) + with mock.patch.object(subp, 'subp', + return_value=('ok', b'')) as mockobj: + self.assertEqual( + set_gc_status(conf, 'Successful'), ('ok', b'')) + mockobj.assert_called_once_with( + ['vmware-rpctool', 'info-set guestinfo.gc.status Successful'], + rcs=[0]) + +# vi: ts=4 expandtab diff --git a/tests/unittests/sources/vmware/test_vmware_config_file.py b/tests/unittests/sources/vmware/test_vmware_config_file.py new file mode 100644 index 00000000..54de113e --- /dev/null +++ b/tests/unittests/sources/vmware/test_vmware_config_file.py @@ -0,0 +1,545 @@ +# Copyright (C) 2015 Canonical Ltd. +# Copyright (C) 2016 VMware INC. +# +# Author: Sankar Tanguturi +# Pengpeng Sun +# +# This file is part of cloud-init. See LICENSE file for license information. + +import logging +import os +import sys +import tempfile +import textwrap + +from cloudinit.sources.DataSourceOVF import get_network_config_from_conf +from cloudinit.sources.DataSourceOVF import read_vmware_imc +from cloudinit.sources.helpers.vmware.imc.boot_proto import BootProtoEnum +from cloudinit.sources.helpers.vmware.imc.config import Config +from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile +from cloudinit.sources.helpers.vmware.imc.config_nic import gen_subnet +from cloudinit.sources.helpers.vmware.imc.config_nic import NicConfigurator +from tests.unittests.helpers import CiTestCase + +logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) +logger = logging.getLogger(__name__) + + +class TestVmwareConfigFile(CiTestCase): + + def test_utility_methods(self): + """Tests basic utility methods of ConfigFile class""" + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + + cf.clear() + + self.assertEqual(0, len(cf), "clear size") + + cf._insertKey(" PASSWORD|-PASS ", " foo ") + cf._insertKey("BAR", " ") + + self.assertEqual(2, len(cf), "insert size") + self.assertEqual('foo', cf["PASSWORD|-PASS"], "password") + self.assertTrue("PASSWORD|-PASS" in cf, "hasPassword") + self.assertFalse(cf.should_keep_current_value("PASSWORD|-PASS"), + "keepPassword") + self.assertFalse(cf.should_remove_current_value("PASSWORD|-PASS"), + "removePassword") + self.assertFalse("FOO" in cf, "hasFoo") + self.assertTrue(cf.should_keep_current_value("FOO"), "keepFoo") + self.assertFalse(cf.should_remove_current_value("FOO"), "removeFoo") + self.assertTrue("BAR" in cf, "hasBar") + self.assertFalse(cf.should_keep_current_value("BAR"), "keepBar") + self.assertTrue(cf.should_remove_current_value("BAR"), "removeBar") + + def test_datasource_instance_id(self): + """Tests instance id for the DatasourceOVF""" + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + + instance_id_prefix = 'iid-vmware-' + + conf = Config(cf) + + (md1, _, _) = read_vmware_imc(conf) + self.assertIn(instance_id_prefix, md1["instance-id"]) + self.assertEqual(md1["instance-id"], 'iid-vmware-imc') + + (md2, _, _) = read_vmware_imc(conf) + self.assertIn(instance_id_prefix, md2["instance-id"]) + self.assertEqual(md2["instance-id"], 'iid-vmware-imc') + + self.assertEqual(md2["instance-id"], md1["instance-id"]) + + def test_configfile_static_2nics(self): + """Tests Config class for a configuration with two static NICs.""" + cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg") + + conf = Config(cf) + + self.assertEqual('myhost1', conf.host_name, "hostName") + self.assertEqual('Africa/Abidjan', conf.timezone, "tz") + self.assertTrue(conf.utc, "utc") + + self.assertEqual(['10.20.145.1', '10.20.145.2'], + conf.name_servers, + "dns") + self.assertEqual(['eng.vmware.com', 'proxy.vmware.com'], + conf.dns_suffixes, + "suffixes") + + nics = conf.nics + ipv40 = nics[0].staticIpv4 + + self.assertEqual(2, len(nics), "nics") + self.assertEqual('NIC1', nics[0].name, "nic0") + self.assertEqual('00:50:56:a6:8c:08', nics[0].mac, "mac0") + self.assertEqual(BootProtoEnum.STATIC, nics[0].bootProto, "bootproto0") + self.assertEqual('10.20.87.154', ipv40[0].ip, "ipv4Addr0") + self.assertEqual('255.255.252.0', ipv40[0].netmask, "ipv4Mask0") + self.assertEqual(2, len(ipv40[0].gateways), "ipv4Gw0") + self.assertEqual('10.20.87.253', ipv40[0].gateways[0], "ipv4Gw0_0") + self.assertEqual('10.20.87.105', ipv40[0].gateways[1], "ipv4Gw0_1") + + self.assertEqual(1, len(nics[0].staticIpv6), "ipv6Cnt0") + self.assertEqual('fc00:10:20:87::154', + nics[0].staticIpv6[0].ip, + "ipv6Addr0") + + self.assertEqual('NIC2', nics[1].name, "nic1") + self.assertTrue(not nics[1].staticIpv6, "ipv61 dhcp") + + def test_config_file_dhcp_2nics(self): + """Tests Config class for a configuration with two DHCP NICs.""" + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + + conf = Config(cf) + nics = conf.nics + self.assertEqual(2, len(nics), "nics") + self.assertEqual('NIC1', nics[0].name, "nic0") + self.assertEqual('00:50:56:a6:8c:08', nics[0].mac, "mac0") + self.assertEqual(BootProtoEnum.DHCP, nics[0].bootProto, "bootproto0") + + def test_config_password(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + + cf._insertKey("PASSWORD|-PASS", "test-password") + cf._insertKey("PASSWORD|RESET", "no") + + conf = Config(cf) + self.assertEqual('test-password', conf.admin_password, "password") + self.assertFalse(conf.reset_password, "do not reset password") + + def test_config_reset_passwd(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + + cf._insertKey("PASSWORD|-PASS", "test-password") + cf._insertKey("PASSWORD|RESET", "random") + + conf = Config(cf) + with self.assertRaises(ValueError): + pw = conf.reset_password + self.assertIsNone(pw) + + cf.clear() + cf._insertKey("PASSWORD|RESET", "yes") + self.assertEqual(1, len(cf), "insert size") + + conf = Config(cf) + self.assertTrue(conf.reset_password, "reset password") + + def test_get_config_nameservers(self): + """Tests DNS and nameserver settings in a configuration.""" + cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg") + + config = Config(cf) + + network_config = get_network_config_from_conf(config, False) + + self.assertEqual(1, network_config.get('version')) + + config_types = network_config.get('config') + name_servers = None + dns_suffixes = None + + for type in config_types: + if type.get('type') == 'nameserver': + name_servers = type.get('address') + dns_suffixes = type.get('search') + break + + self.assertEqual(['10.20.145.1', '10.20.145.2'], + name_servers, + "dns") + self.assertEqual(['eng.vmware.com', 'proxy.vmware.com'], + dns_suffixes, + "suffixes") + + def test_gen_subnet(self): + """Tests if gen_subnet properly calculates network subnet from + IPv4 address and netmask""" + ip_subnet_list = [['10.20.87.253', '255.255.252.0', '10.20.84.0'], + ['10.20.92.105', '255.255.252.0', '10.20.92.0'], + ['192.168.0.10', '255.255.0.0', '192.168.0.0']] + for entry in ip_subnet_list: + self.assertEqual(entry[2], gen_subnet(entry[0], entry[1]), + "Subnet for a specified ip and netmask") + + def test_get_config_dns_suffixes(self): + """Tests if get_network_config_from_conf properly + generates nameservers and dns settings from a + specified configuration""" + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + + config = Config(cf) + + network_config = get_network_config_from_conf(config, False) + + self.assertEqual(1, network_config.get('version')) + + config_types = network_config.get('config') + name_servers = None + dns_suffixes = None + + for type in config_types: + if type.get('type') == 'nameserver': + name_servers = type.get('address') + dns_suffixes = type.get('search') + break + + self.assertEqual([], + name_servers, + "dns") + self.assertEqual(['eng.vmware.com'], + dns_suffixes, + "suffixes") + + def test_get_nics_list_dhcp(self): + """Tests if NicConfigurator properly calculates network subnets + for a configuration with a list of DHCP NICs""" + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + + config = Config(cf) + + nicConfigurator = NicConfigurator(config.nics, False) + nics_cfg_list = nicConfigurator.generate() + + self.assertEqual(2, len(nics_cfg_list), "number of config elements") + + nic1 = {'name': 'NIC1'} + nic2 = {'name': 'NIC2'} + for cfg in nics_cfg_list: + if cfg.get('name') == nic1.get('name'): + nic1.update(cfg) + elif cfg.get('name') == nic2.get('name'): + nic2.update(cfg) + + self.assertEqual('physical', nic1.get('type'), 'type of NIC1') + self.assertEqual('NIC1', nic1.get('name'), 'name of NIC1') + self.assertEqual('00:50:56:a6:8c:08', nic1.get('mac_address'), + 'mac address of NIC1') + subnets = nic1.get('subnets') + self.assertEqual(1, len(subnets), 'number of subnets for NIC1') + subnet = subnets[0] + self.assertEqual('dhcp', subnet.get('type'), 'DHCP type for NIC1') + self.assertEqual('auto', subnet.get('control'), 'NIC1 Control type') + + self.assertEqual('physical', nic2.get('type'), 'type of NIC2') + self.assertEqual('NIC2', nic2.get('name'), 'name of NIC2') + self.assertEqual('00:50:56:a6:5a:de', nic2.get('mac_address'), + 'mac address of NIC2') + subnets = nic2.get('subnets') + self.assertEqual(1, len(subnets), 'number of subnets for NIC2') + subnet = subnets[0] + self.assertEqual('dhcp', subnet.get('type'), 'DHCP type for NIC2') + self.assertEqual('auto', subnet.get('control'), 'NIC2 Control type') + + def test_get_nics_list_static(self): + """Tests if NicConfigurator properly calculates network subnets + for a configuration with 2 static NICs""" + cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg") + + config = Config(cf) + + nicConfigurator = NicConfigurator(config.nics, False) + nics_cfg_list = nicConfigurator.generate() + + self.assertEqual(2, len(nics_cfg_list), "number of elements") + + nic1 = {'name': 'NIC1'} + nic2 = {'name': 'NIC2'} + route_list = [] + for cfg in nics_cfg_list: + cfg_type = cfg.get('type') + if cfg_type == 'physical': + if cfg.get('name') == nic1.get('name'): + nic1.update(cfg) + elif cfg.get('name') == nic2.get('name'): + nic2.update(cfg) + + self.assertEqual('physical', nic1.get('type'), 'type of NIC1') + self.assertEqual('NIC1', nic1.get('name'), 'name of NIC1') + self.assertEqual('00:50:56:a6:8c:08', nic1.get('mac_address'), + 'mac address of NIC1') + + subnets = nic1.get('subnets') + self.assertEqual(2, len(subnets), 'Number of subnets') + + static_subnet = [] + static6_subnet = [] + + for subnet in subnets: + subnet_type = subnet.get('type') + if subnet_type == 'static': + static_subnet.append(subnet) + elif subnet_type == 'static6': + static6_subnet.append(subnet) + else: + self.assertEqual(True, False, 'Unknown type') + if 'route' in subnet: + for route in subnet.get('routes'): + route_list.append(route) + + self.assertEqual(1, len(static_subnet), 'Number of static subnet') + self.assertEqual(1, len(static6_subnet), 'Number of static6 subnet') + + subnet = static_subnet[0] + self.assertEqual('10.20.87.154', subnet.get('address'), + 'IPv4 address of static subnet') + self.assertEqual('255.255.252.0', subnet.get('netmask'), + 'NetMask of static subnet') + self.assertEqual('auto', subnet.get('control'), + 'control for static subnet') + + subnet = static6_subnet[0] + self.assertEqual('fc00:10:20:87::154', subnet.get('address'), + 'IPv6 address of static subnet') + self.assertEqual('64', subnet.get('netmask'), + 'NetMask of static6 subnet') + + route_set = set(['10.20.87.253', '10.20.87.105', '192.168.0.10']) + for route in route_list: + self.assertEqual(10000, route.get('metric'), 'metric of route') + gateway = route.get('gateway') + if gateway in route_set: + route_set.discard(gateway) + else: + self.assertEqual(True, False, 'invalid gateway %s' % (gateway)) + + self.assertEqual('physical', nic2.get('type'), 'type of NIC2') + self.assertEqual('NIC2', nic2.get('name'), 'name of NIC2') + self.assertEqual('00:50:56:a6:ef:7d', nic2.get('mac_address'), + 'mac address of NIC2') + + subnets = nic2.get('subnets') + self.assertEqual(1, len(subnets), 'Number of subnets for NIC2') + + subnet = subnets[0] + self.assertEqual('static', subnet.get('type'), 'Subnet type') + self.assertEqual('192.168.6.102', subnet.get('address'), + 'Subnet address') + self.assertEqual('255.255.0.0', subnet.get('netmask'), + 'Subnet netmask') + + def test_custom_script(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + conf = Config(cf) + self.assertIsNone(conf.custom_script_name) + cf._insertKey("CUSTOM-SCRIPT|SCRIPT-NAME", "test-script") + conf = Config(cf) + self.assertEqual("test-script", conf.custom_script_name) + + def test_post_gc_status(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + conf = Config(cf) + self.assertFalse(conf.post_gc_status) + cf._insertKey("MISC|POST-GC-STATUS", "YES") + conf = Config(cf) + self.assertTrue(conf.post_gc_status) + + def test_no_default_run_post_script(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + conf = Config(cf) + self.assertFalse(conf.default_run_post_script) + cf._insertKey("MISC|DEFAULT-RUN-POST-CUST-SCRIPT", "NO") + conf = Config(cf) + self.assertFalse(conf.default_run_post_script) + + def test_yes_default_run_post_script(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + cf._insertKey("MISC|DEFAULT-RUN-POST-CUST-SCRIPT", "yes") + conf = Config(cf) + self.assertTrue(conf.default_run_post_script) + + +class TestVmwareNetConfig(CiTestCase): + """Test conversion of vmware config to cloud-init config.""" + + maxDiff = None + + def _get_NicConfigurator(self, text): + fp = None + try: + with tempfile.NamedTemporaryFile(mode="w", dir=self.tmp_dir(), + delete=False) as fp: + fp.write(text) + fp.close() + cfg = Config(ConfigFile(fp.name)) + return NicConfigurator(cfg.nics, use_system_devices=False) + finally: + if fp: + os.unlink(fp.name) + + def test_non_primary_nic_without_gateway(self): + """A non primary nic set is not required to have a gateway.""" + config = textwrap.dedent("""\ + [NETWORK] + NETWORKING = yes + BOOTPROTO = dhcp + HOSTNAME = myhost1 + DOMAINNAME = eng.vmware.com + + [NIC-CONFIG] + NICS = NIC1 + + [NIC1] + MACADDR = 00:50:56:a6:8c:08 + ONBOOT = yes + IPv4_MODE = BACKWARDS_COMPATIBLE + BOOTPROTO = static + IPADDR = 10.20.87.154 + NETMASK = 255.255.252.0 + """) + nc = self._get_NicConfigurator(config) + self.assertEqual( + [{'type': 'physical', 'name': 'NIC1', + 'mac_address': '00:50:56:a6:8c:08', + 'subnets': [ + {'control': 'auto', 'type': 'static', + 'address': '10.20.87.154', 'netmask': '255.255.252.0'}]}], + nc.generate()) + + def test_non_primary_nic_with_gateway(self): + """A non primary nic set can have a gateway.""" + config = textwrap.dedent("""\ + [NETWORK] + NETWORKING = yes + BOOTPROTO = dhcp + HOSTNAME = myhost1 + DOMAINNAME = eng.vmware.com + + [NIC-CONFIG] + NICS = NIC1 + + [NIC1] + MACADDR = 00:50:56:a6:8c:08 + ONBOOT = yes + IPv4_MODE = BACKWARDS_COMPATIBLE + BOOTPROTO = static + IPADDR = 10.20.87.154 + NETMASK = 255.255.252.0 + GATEWAY = 10.20.87.253 + """) + nc = self._get_NicConfigurator(config) + self.assertEqual( + [{'type': 'physical', 'name': 'NIC1', + 'mac_address': '00:50:56:a6:8c:08', + 'subnets': [ + {'control': 'auto', 'type': 'static', + 'address': '10.20.87.154', 'netmask': '255.255.252.0', + 'routes': + [{'type': 'route', 'destination': '10.20.84.0/22', + 'gateway': '10.20.87.253', 'metric': 10000}]}]}], + nc.generate()) + + def test_cust_non_primary_nic_with_gateway_(self): + """A customer non primary nic set can have a gateway.""" + config = textwrap.dedent("""\ + [NETWORK] + NETWORKING = yes + BOOTPROTO = dhcp + HOSTNAME = static-debug-vm + DOMAINNAME = cluster.local + + [NIC-CONFIG] + NICS = NIC1 + + [NIC1] + MACADDR = 00:50:56:ac:d1:8a + ONBOOT = yes + IPv4_MODE = BACKWARDS_COMPATIBLE + BOOTPROTO = static + IPADDR = 100.115.223.75 + NETMASK = 255.255.255.0 + GATEWAY = 100.115.223.254 + + + [DNS] + DNSFROMDHCP=no + + NAMESERVER|1 = 8.8.8.8 + + [DATETIME] + UTC = yes + """) + nc = self._get_NicConfigurator(config) + self.assertEqual( + [{'type': 'physical', 'name': 'NIC1', + 'mac_address': '00:50:56:ac:d1:8a', + 'subnets': [ + {'control': 'auto', 'type': 'static', + 'address': '100.115.223.75', 'netmask': '255.255.255.0', + 'routes': + [{'type': 'route', 'destination': '100.115.223.0/24', + 'gateway': '100.115.223.254', 'metric': 10000}]}]}], + nc.generate()) + + def test_a_primary_nic_with_gateway(self): + """A primary nic set can have a gateway.""" + config = textwrap.dedent("""\ + [NETWORK] + NETWORKING = yes + BOOTPROTO = dhcp + HOSTNAME = myhost1 + DOMAINNAME = eng.vmware.com + + [NIC-CONFIG] + NICS = NIC1 + + [NIC1] + MACADDR = 00:50:56:a6:8c:08 + ONBOOT = yes + IPv4_MODE = BACKWARDS_COMPATIBLE + BOOTPROTO = static + IPADDR = 10.20.87.154 + NETMASK = 255.255.252.0 + PRIMARY = true + GATEWAY = 10.20.87.253 + """) + nc = self._get_NicConfigurator(config) + self.assertEqual( + [{'type': 'physical', 'name': 'NIC1', + 'mac_address': '00:50:56:a6:8c:08', + 'subnets': [ + {'control': 'auto', 'type': 'static', + 'address': '10.20.87.154', 'netmask': '255.255.252.0', + 'gateway': '10.20.87.253'}]}], + nc.generate()) + + def test_meta_data(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + conf = Config(cf) + self.assertIsNone(conf.meta_data_name) + cf._insertKey("CLOUDINIT|METADATA", "test-metadata") + conf = Config(cf) + self.assertEqual("test-metadata", conf.meta_data_name) + + def test_user_data(self): + cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") + conf = Config(cf) + self.assertIsNone(conf.user_data_name) + cf._insertKey("CLOUDINIT|USERDATA", "test-userdata") + conf = Config(cf) + self.assertEqual("test-userdata", conf.user_data_name) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py index 739bbebf..4382a078 100644 --- a/tests/unittests/test__init__.py +++ b/tests/unittests/test__init__.py @@ -12,7 +12,7 @@ from cloudinit import settings from cloudinit import url_helper from cloudinit import util -from cloudinit.tests.helpers import TestCase, CiTestCase, ExitStack, mock +from tests.unittests.helpers import TestCase, CiTestCase, ExitStack, mock class FakeModule(handlers.Handler): diff --git a/tests/unittests/test_atomic_helper.py b/tests/unittests/test_atomic_helper.py index 0101b0e3..0c8b8e53 100644 --- a/tests/unittests/test_atomic_helper.py +++ b/tests/unittests/test_atomic_helper.py @@ -6,7 +6,7 @@ import stat from cloudinit import atomic_helper -from cloudinit.tests.helpers import CiTestCase +from tests.unittests.helpers import CiTestCase class TestAtomicHelper(CiTestCase): diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py index 230866b9..cf2c0a4d 100644 --- a/tests/unittests/test_builtin_handlers.py +++ b/tests/unittests/test_builtin_handlers.py @@ -11,7 +11,7 @@ import tempfile from textwrap import dedent -from cloudinit.tests.helpers import ( +from tests.unittests.helpers import ( FilesystemMockingTestCase, CiTestCase, mock, skipUnlessJinja) from cloudinit import handlers diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index 1459fd9c..fd717f34 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -5,7 +5,7 @@ import io from collections import namedtuple from cloudinit.cmd import main as cli -from cloudinit.tests import helpers as test_helpers +from tests.unittests import helpers as test_helpers from cloudinit.util import load_file, load_json diff --git a/tests/unittests/test_conftest.py b/tests/unittests/test_conftest.py new file mode 100644 index 00000000..2e02b7a7 --- /dev/null +++ b/tests/unittests/test_conftest.py @@ -0,0 +1,65 @@ +import pytest + +from cloudinit import subp +from tests.unittests.helpers import CiTestCase + + +class TestDisableSubpUsage: + """Test that the disable_subp_usage fixture behaves as expected.""" + + def test_using_subp_raises_assertion_error(self): + with pytest.raises(AssertionError): + subp.subp(["some", "args"]) + + def test_typeerrors_on_incorrect_usage(self): + with pytest.raises(TypeError): + # We are intentionally passing no value for a parameter, so: + # pylint: disable=no-value-for-parameter + subp.subp() + + @pytest.mark.allow_all_subp + def test_subp_usage_can_be_reenabled(self): + subp.subp(['whoami']) + + @pytest.mark.allow_subp_for("whoami") + def test_subp_usage_can_be_conditionally_reenabled(self): + # The two parameters test each potential invocation with a single + # argument + with pytest.raises(AssertionError) as excinfo: + subp.subp(["some", "args"]) + assert "allowed: whoami" in str(excinfo.value) + subp.subp(['whoami']) + + @pytest.mark.allow_subp_for("whoami", "bash") + def test_subp_usage_can_be_conditionally_reenabled_for_multiple_cmds(self): + with pytest.raises(AssertionError) as excinfo: + subp.subp(["some", "args"]) + assert "allowed: whoami,bash" in str(excinfo.value) + subp.subp(['bash', '-c', 'true']) + subp.subp(['whoami']) + + @pytest.mark.allow_all_subp + @pytest.mark.allow_subp_for("bash") + def test_both_marks_raise_an_error(self): + with pytest.raises(AssertionError, match="marked both"): + subp.subp(["bash"]) + + +class TestDisableSubpUsageInTestSubclass(CiTestCase): + """Test that disable_subp_usage doesn't impact CiTestCase's subp logic.""" + + def test_using_subp_raises_exception(self): + with pytest.raises(Exception): + subp.subp(["some", "args"]) + + def test_typeerrors_on_incorrect_usage(self): + with pytest.raises(TypeError): + subp.subp() + + def test_subp_usage_can_be_reenabled(self): + _old_allowed_subp = self.allow_subp + self.allowed_subp = True + try: + subp.subp(['bash', '-c', 'true']) + finally: + self.allowed_subp = _old_allowed_subp diff --git a/tests/unittests/test_cs_util.py b/tests/unittests/test_cs_util.py index bfd07ecf..be9da40c 100644 --- a/tests/unittests/test_cs_util.py +++ b/tests/unittests/test_cs_util.py @@ -1,6 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.tests import helpers as test_helpers +from tests.unittests import helpers as test_helpers from cloudinit.cs_utils import Cepko diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index 8c968ae9..2ee09bbb 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -25,7 +25,7 @@ from cloudinit import user_data as ud from cloudinit import safeyaml from cloudinit import util -from cloudinit.tests import helpers +from tests.unittests import helpers INSTANCE_ID = "i-testing" diff --git a/tests/unittests/test_datasource/__init__.py b/tests/unittests/test_datasource/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py deleted file mode 100644 index cab1ac2b..00000000 --- a/tests/unittests/test_datasource/test_aliyun.py +++ /dev/null @@ -1,248 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import functools -import httpretty -import os -from unittest import mock - -from cloudinit import helpers -from cloudinit.sources import DataSourceAliYun as ay -from cloudinit.sources.DataSourceEc2 import convert_ec2_metadata_network_config -from cloudinit.tests import helpers as test_helpers - -DEFAULT_METADATA = { - 'instance-id': 'aliyun-test-vm-00', - 'eipv4': '10.0.0.1', - 'hostname': 'test-hostname', - 'image-id': 'm-test', - 'launch-index': '0', - 'mac': '00:16:3e:00:00:00', - 'network-type': 'vpc', - 'private-ipv4': '192.168.0.1', - 'serial-number': 'test-string', - 'vpc-cidr-block': '192.168.0.0/16', - 'vpc-id': 'test-vpc', - 'vswitch-id': 'test-vpc', - 'vswitch-cidr-block': '192.168.0.0/16', - 'zone-id': 'test-zone-1', - 'ntp-conf': {'ntp_servers': [ - 'ntp1.aliyun.com', - 'ntp2.aliyun.com', - 'ntp3.aliyun.com']}, - 'source-address': ['http://mirrors.aliyun.com', - 'http://mirrors.aliyuncs.com'], - 'public-keys': {'key-pair-1': {'openssh-key': 'ssh-rsa AAAAB3...'}, - 'key-pair-2': {'openssh-key': 'ssh-rsa AAAAB3...'}} -} - -DEFAULT_USERDATA = """\ -#cloud-config - -hostname: localhost""" - - -def register_mock_metaserver(base_url, data): - def register_helper(register, base_url, body): - if isinstance(body, str): - register(base_url, body) - elif isinstance(body, list): - register(base_url.rstrip('/'), '\n'.join(body) + '\n') - elif isinstance(body, dict): - if not body: - register(base_url.rstrip('/') + '/', 'not found', - status_code=404) - vals = [] - for k, v in body.items(): - if isinstance(v, (str, list)): - suffix = k.rstrip('/') - else: - suffix = k.rstrip('/') + '/' - vals.append(suffix) - url = base_url.rstrip('/') + '/' + suffix - register_helper(register, url, v) - register(base_url, '\n'.join(vals) + '\n') - - register = functools.partial(httpretty.register_uri, httpretty.GET) - register_helper(register, base_url, data) - - -class TestAliYunDatasource(test_helpers.HttprettyTestCase): - def setUp(self): - super(TestAliYunDatasource, self).setUp() - cfg = {'datasource': {'AliYun': {'timeout': '1', 'max_wait': '1'}}} - distro = {} - paths = helpers.Paths({'run_dir': self.tmp_dir()}) - self.ds = ay.DataSourceAliYun(cfg, distro, paths) - self.metadata_address = self.ds.metadata_urls[0] - - @property - def default_metadata(self): - return DEFAULT_METADATA - - @property - def default_userdata(self): - return DEFAULT_USERDATA - - @property - def metadata_url(self): - return os.path.join( - self.metadata_address, - self.ds.min_metadata_version, 'meta-data') + '/' - - @property - def userdata_url(self): - return os.path.join( - self.metadata_address, - self.ds.min_metadata_version, 'user-data') - - # EC2 provides an instance-identity document which must return 404 here - # for this test to pass. - @property - def default_identity(self): - return {} - - @property - def identity_url(self): - return os.path.join(self.metadata_address, - self.ds.min_metadata_version, - 'dynamic', 'instance-identity') - - def regist_default_server(self): - register_mock_metaserver(self.metadata_url, self.default_metadata) - register_mock_metaserver(self.userdata_url, self.default_userdata) - register_mock_metaserver(self.identity_url, self.default_identity) - - def _test_get_data(self): - self.assertEqual(self.ds.metadata, self.default_metadata) - self.assertEqual(self.ds.userdata_raw, - self.default_userdata.encode('utf8')) - - def _test_get_sshkey(self): - pub_keys = [v['openssh-key'] for (_, v) in - self.default_metadata['public-keys'].items()] - self.assertEqual(self.ds.get_public_ssh_keys(), pub_keys) - - def _test_get_iid(self): - self.assertEqual(self.default_metadata['instance-id'], - self.ds.get_instance_id()) - - def _test_host_name(self): - self.assertEqual(self.default_metadata['hostname'], - self.ds.get_hostname()) - - @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun") - def test_with_mock_server(self, m_is_aliyun): - m_is_aliyun.return_value = True - self.regist_default_server() - ret = self.ds.get_data() - self.assertEqual(True, ret) - self.assertEqual(1, m_is_aliyun.call_count) - self._test_get_data() - self._test_get_sshkey() - self._test_get_iid() - self._test_host_name() - self.assertEqual('aliyun', self.ds.cloud_name) - self.assertEqual('ec2', self.ds.platform) - self.assertEqual( - 'metadata (http://100.100.100.200)', self.ds.subplatform) - - @mock.patch("cloudinit.sources.DataSourceAliYun._is_aliyun") - def test_returns_false_when_not_on_aliyun(self, m_is_aliyun): - """If is_aliyun returns false, then get_data should return False.""" - m_is_aliyun.return_value = False - self.regist_default_server() - ret = self.ds.get_data() - self.assertEqual(1, m_is_aliyun.call_count) - self.assertEqual(False, ret) - - def test_parse_public_keys(self): - public_keys = {} - self.assertEqual(ay.parse_public_keys(public_keys), []) - - public_keys = {'key-pair-0': 'ssh-key-0'} - self.assertEqual(ay.parse_public_keys(public_keys), - [public_keys['key-pair-0']]) - - public_keys = {'key-pair-0': 'ssh-key-0', 'key-pair-1': 'ssh-key-1'} - self.assertEqual(set(ay.parse_public_keys(public_keys)), - set([public_keys['key-pair-0'], - public_keys['key-pair-1']])) - - public_keys = {'key-pair-0': ['ssh-key-0', 'ssh-key-1']} - self.assertEqual(ay.parse_public_keys(public_keys), - public_keys['key-pair-0']) - - public_keys = {'key-pair-0': {'openssh-key': []}} - self.assertEqual(ay.parse_public_keys(public_keys), []) - - public_keys = {'key-pair-0': {'openssh-key': 'ssh-key-0'}} - self.assertEqual(ay.parse_public_keys(public_keys), - [public_keys['key-pair-0']['openssh-key']]) - - public_keys = {'key-pair-0': {'openssh-key': ['ssh-key-0', - 'ssh-key-1']}} - self.assertEqual(ay.parse_public_keys(public_keys), - public_keys['key-pair-0']['openssh-key']) - - def test_route_metric_calculated_without_device_number(self): - """Test that route-metric code works without `device-number` - - `device-number` is part of EC2 metadata, but not supported on aliyun. - Attempting to access it will raise a KeyError. - - LP: #1917875 - """ - netcfg = convert_ec2_metadata_network_config( - {"interfaces": {"macs": { - "06:17:04:d7:26:09": { - "interface-id": "eni-e44ef49e", - }, - "06:17:04:d7:26:08": { - "interface-id": "eni-e44ef49f", - } - }}}, - macs_to_nics={ - '06:17:04:d7:26:09': 'eth0', - '06:17:04:d7:26:08': 'eth1', - } - ) - - met0 = netcfg['ethernets']['eth0']['dhcp4-overrides']['route-metric'] - met1 = netcfg['ethernets']['eth1']['dhcp4-overrides']['route-metric'] - - # route-metric numbers should be 100 apart - assert 100 == abs(met0 - met1) - - -class TestIsAliYun(test_helpers.CiTestCase): - ALIYUN_PRODUCT = 'Alibaba Cloud ECS' - read_dmi_data_expected = [mock.call('system-product-name')] - - @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data") - def test_true_on_aliyun_product(self, m_read_dmi_data): - """Should return true if the dmi product data has expected value.""" - m_read_dmi_data.return_value = self.ALIYUN_PRODUCT - ret = ay._is_aliyun() - self.assertEqual(self.read_dmi_data_expected, - m_read_dmi_data.call_args_list) - self.assertEqual(True, ret) - - @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data") - def test_false_on_empty_string(self, m_read_dmi_data): - """Should return false on empty value returned.""" - m_read_dmi_data.return_value = "" - ret = ay._is_aliyun() - self.assertEqual(self.read_dmi_data_expected, - m_read_dmi_data.call_args_list) - self.assertEqual(False, ret) - - @mock.patch("cloudinit.sources.DataSourceAliYun.dmi.read_dmi_data") - def test_false_on_unknown_string(self, m_read_dmi_data): - """Should return false on an unrelated string.""" - m_read_dmi_data.return_value = "cubs win" - ret = ay._is_aliyun() - self.assertEqual(self.read_dmi_data_expected, - m_read_dmi_data.call_args_list) - self.assertEqual(False, ret) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py deleted file mode 100644 index 7a5393ac..00000000 --- a/tests/unittests/test_datasource/test_altcloud.py +++ /dev/null @@ -1,450 +0,0 @@ -# Copyright (C) 2009-2010 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# Copyright (C) 2012 Yahoo! Inc. -# -# Author: Joe VLcek -# -# This file is part of cloud-init. See LICENSE file for license information. - -''' -This test file exercises the code in sources DataSourceAltCloud.py -''' - -import os -import shutil -import tempfile - -from cloudinit import dmi -from cloudinit import helpers -from cloudinit import subp -from cloudinit import util - -from cloudinit.tests.helpers import CiTestCase, mock - -import cloudinit.sources.DataSourceAltCloud as dsac - -OS_UNAME_ORIG = getattr(os, 'uname') - - -def _write_user_data_files(mount_dir, value): - ''' - Populate the deltacloud_user_data_file the user_data_file - which would be populated with user data. - ''' - deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt' - user_data_file = mount_dir + '/user-data.txt' - - udfile = open(deltacloud_user_data_file, 'w') - udfile.write(value) - udfile.close() - os.chmod(deltacloud_user_data_file, 0o664) - - udfile = open(user_data_file, 'w') - udfile.write(value) - udfile.close() - os.chmod(user_data_file, 0o664) - - -def _remove_user_data_files(mount_dir, - dc_file=True, - non_dc_file=True): - ''' - Remove the test files: deltacloud_user_data_file and - user_data_file - ''' - deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt' - user_data_file = mount_dir + '/user-data.txt' - - # Ignore any failures removeing files that are already gone. - if dc_file: - try: - os.remove(deltacloud_user_data_file) - except OSError: - pass - - if non_dc_file: - try: - os.remove(user_data_file) - except OSError: - pass - - -def _dmi_data(expected): - ''' - Spoof the data received over DMI - ''' - def _data(key): - return expected - - return _data - - -class TestGetCloudType(CiTestCase): - '''Test to exercise method: DataSourceAltCloud.get_cloud_type()''' - - with_logs = True - - def setUp(self): - '''Set up.''' - super(TestGetCloudType, self).setUp() - self.tmp = self.tmp_dir() - self.paths = helpers.Paths({'cloud_dir': self.tmp}) - self.dmi_data = dmi.read_dmi_data - # We have a different code path for arm to deal with LP1243287 - # We have to switch arch to x86_64 to avoid test failure - force_arch('x86_64') - - def tearDown(self): - # Reset - dmi.read_dmi_data = self.dmi_data - force_arch() - - def test_cloud_info_file_ioerror(self): - """Return UNKNOWN when /etc/sysconfig/cloud-info exists but errors.""" - self.assertEqual('/etc/sysconfig/cloud-info', dsac.CLOUD_INFO_FILE) - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - # Attempting to read the directory generates IOError - with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.tmp): - self.assertEqual('UNKNOWN', dsrc.get_cloud_type()) - self.assertIn( - "[Errno 21] Is a directory: '%s'" % self.tmp, - self.logs.getvalue()) - - def test_cloud_info_file(self): - """Return uppercase stripped content from /etc/sysconfig/cloud-info.""" - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - cloud_info = self.tmp_path('cloud-info', dir=self.tmp) - util.write_file(cloud_info, ' OverRiDdeN CloudType ') - # Attempting to read the directory generates IOError - with mock.patch.object(dsac, 'CLOUD_INFO_FILE', cloud_info): - self.assertEqual('OVERRIDDEN CLOUDTYPE', dsrc.get_cloud_type()) - - def test_rhev(self): - ''' - Test method get_cloud_type() for RHEVm systems. - Forcing read_dmi_data return to match a RHEVm system: RHEV Hypervisor - ''' - dmi.read_dmi_data = _dmi_data('RHEV') - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - self.assertEqual('RHEV', dsrc.get_cloud_type()) - - def test_vsphere(self): - ''' - Test method get_cloud_type() for vSphere systems. - Forcing read_dmi_data return to match a vSphere system: RHEV Hypervisor - ''' - dmi.read_dmi_data = _dmi_data('VMware Virtual Platform') - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - self.assertEqual('VSPHERE', dsrc.get_cloud_type()) - - def test_unknown(self): - ''' - Test method get_cloud_type() for unknown systems. - Forcing read_dmi_data return to match an unrecognized return. - ''' - dmi.read_dmi_data = _dmi_data('Unrecognized Platform') - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - self.assertEqual('UNKNOWN', dsrc.get_cloud_type()) - - -class TestGetDataCloudInfoFile(CiTestCase): - ''' - Test to exercise method: DataSourceAltCloud.get_data() - With a contrived CLOUD_INFO_FILE - ''' - def setUp(self): - '''Set up.''' - self.tmp = self.tmp_dir() - self.paths = helpers.Paths( - {'cloud_dir': self.tmp, 'run_dir': self.tmp}) - self.cloud_info_file = self.tmp_path('cloud-info', dir=self.tmp) - - def test_rhev(self): - '''Success Test module get_data() forcing RHEV.''' - - util.write_file(self.cloud_info_file, 'RHEV') - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - dsrc.user_data_rhevm = lambda: True - with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): - self.assertEqual(True, dsrc.get_data()) - self.assertEqual('altcloud', dsrc.cloud_name) - self.assertEqual('altcloud', dsrc.platform_type) - self.assertEqual('rhev (/dev/fd0)', dsrc.subplatform) - - def test_vsphere(self): - '''Success Test module get_data() forcing VSPHERE.''' - - util.write_file(self.cloud_info_file, 'VSPHERE') - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - dsrc.user_data_vsphere = lambda: True - with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): - self.assertEqual(True, dsrc.get_data()) - self.assertEqual('altcloud', dsrc.cloud_name) - self.assertEqual('altcloud', dsrc.platform_type) - self.assertEqual('vsphere (unknown)', dsrc.subplatform) - - def test_fail_rhev(self): - '''Failure Test module get_data() forcing RHEV.''' - - util.write_file(self.cloud_info_file, 'RHEV') - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - dsrc.user_data_rhevm = lambda: False - with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): - self.assertEqual(False, dsrc.get_data()) - - def test_fail_vsphere(self): - '''Failure Test module get_data() forcing VSPHERE.''' - - util.write_file(self.cloud_info_file, 'VSPHERE') - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - dsrc.user_data_vsphere = lambda: False - with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): - self.assertEqual(False, dsrc.get_data()) - - def test_unrecognized(self): - '''Failure Test module get_data() forcing unrecognized.''' - - util.write_file(self.cloud_info_file, 'unrecognized') - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - with mock.patch.object(dsac, 'CLOUD_INFO_FILE', self.cloud_info_file): - self.assertEqual(False, dsrc.get_data()) - - -class TestGetDataNoCloudInfoFile(CiTestCase): - ''' - Test to exercise method: DataSourceAltCloud.get_data() - Without a CLOUD_INFO_FILE - ''' - def setUp(self): - '''Set up.''' - self.tmp = self.tmp_dir() - self.paths = helpers.Paths( - {'cloud_dir': self.tmp, 'run_dir': self.tmp}) - self.dmi_data = dmi.read_dmi_data - dsac.CLOUD_INFO_FILE = \ - 'no such file' - # We have a different code path for arm to deal with LP1243287 - # We have to switch arch to x86_64 to avoid test failure - force_arch('x86_64') - - def tearDown(self): - # Reset - dsac.CLOUD_INFO_FILE = \ - '/etc/sysconfig/cloud-info' - dmi.read_dmi_data = self.dmi_data - # Return back to original arch - force_arch() - - def test_rhev_no_cloud_file(self): - '''Test No cloud info file module get_data() forcing RHEV.''' - - dmi.read_dmi_data = _dmi_data('RHEV Hypervisor') - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - dsrc.user_data_rhevm = lambda: True - self.assertEqual(True, dsrc.get_data()) - - def test_vsphere_no_cloud_file(self): - '''Test No cloud info file module get_data() forcing VSPHERE.''' - - dmi.read_dmi_data = _dmi_data('VMware Virtual Platform') - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - dsrc.user_data_vsphere = lambda: True - self.assertEqual(True, dsrc.get_data()) - - def test_failure_no_cloud_file(self): - '''Test No cloud info file module get_data() forcing unrecognized.''' - - dmi.read_dmi_data = _dmi_data('Unrecognized Platform') - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - self.assertEqual(False, dsrc.get_data()) - - -class TestUserDataRhevm(CiTestCase): - ''' - Test to exercise method: DataSourceAltCloud.user_data_rhevm() - ''' - def setUp(self): - '''Set up.''' - self.paths = helpers.Paths({'cloud_dir': '/tmp'}) - self.mount_dir = self.tmp_dir() - _write_user_data_files(self.mount_dir, 'test user data') - self.add_patch( - 'cloudinit.sources.DataSourceAltCloud.modprobe_floppy', - 'm_modprobe_floppy', return_value=None) - self.add_patch( - 'cloudinit.sources.DataSourceAltCloud.util.udevadm_settle', - 'm_udevadm_settle', return_value=('', '')) - self.add_patch( - 'cloudinit.sources.DataSourceAltCloud.util.mount_cb', - 'm_mount_cb') - - def test_mount_cb_fails(self): - '''Test user_data_rhevm() where mount_cb fails.''' - - self.m_mount_cb.side_effect = util.MountFailedError("Failed Mount") - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - self.assertEqual(False, dsrc.user_data_rhevm()) - - def test_modprobe_fails(self): - '''Test user_data_rhevm() where modprobe fails.''' - - self.m_modprobe_floppy.side_effect = subp.ProcessExecutionError( - "Failed modprobe") - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - self.assertEqual(False, dsrc.user_data_rhevm()) - - def test_no_modprobe_cmd(self): - '''Test user_data_rhevm() with no modprobe command.''' - - self.m_modprobe_floppy.side_effect = subp.ProcessExecutionError( - "No such file or dir") - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - self.assertEqual(False, dsrc.user_data_rhevm()) - - def test_udevadm_fails(self): - '''Test user_data_rhevm() where udevadm fails.''' - - self.m_udevadm_settle.side_effect = subp.ProcessExecutionError( - "Failed settle.") - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - self.assertEqual(False, dsrc.user_data_rhevm()) - - def test_no_udevadm_cmd(self): - '''Test user_data_rhevm() with no udevadm command.''' - - self.m_udevadm_settle.side_effect = OSError("No such file or dir") - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - self.assertEqual(False, dsrc.user_data_rhevm()) - - -class TestUserDataVsphere(CiTestCase): - ''' - Test to exercise method: DataSourceAltCloud.user_data_vsphere() - ''' - def setUp(self): - '''Set up.''' - self.tmp = self.tmp_dir() - self.paths = helpers.Paths({'cloud_dir': self.tmp}) - self.mount_dir = tempfile.mkdtemp() - - _write_user_data_files(self.mount_dir, 'test user data') - - def tearDown(self): - # Reset - - _remove_user_data_files(self.mount_dir) - - # Attempt to remove the temp dir ignoring errors - try: - shutil.rmtree(self.mount_dir) - except OSError: - pass - - dsac.CLOUD_INFO_FILE = \ - '/etc/sysconfig/cloud-info' - - @mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with") - @mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb") - def test_user_data_vsphere_no_cdrom(self, m_mount_cb, m_find_devs_with): - '''Test user_data_vsphere() where mount_cb fails.''' - - m_mount_cb.return_value = [] - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - self.assertEqual(False, dsrc.user_data_vsphere()) - self.assertEqual(0, m_mount_cb.call_count) - - @mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with") - @mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb") - def test_user_data_vsphere_mcb_fail(self, m_mount_cb, m_find_devs_with): - '''Test user_data_vsphere() where mount_cb fails.''' - - m_find_devs_with.return_value = ["/dev/mock/cdrom"] - m_mount_cb.side_effect = util.MountFailedError("Unable To mount") - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - self.assertEqual(False, dsrc.user_data_vsphere()) - self.assertEqual(1, m_find_devs_with.call_count) - self.assertEqual(1, m_mount_cb.call_count) - - @mock.patch("cloudinit.sources.DataSourceAltCloud.util.find_devs_with") - @mock.patch("cloudinit.sources.DataSourceAltCloud.util.mount_cb") - def test_user_data_vsphere_success(self, m_mount_cb, m_find_devs_with): - """Test user_data_vsphere() where successful.""" - m_find_devs_with.return_value = ["/dev/mock/cdrom"] - m_mount_cb.return_value = 'raw userdata from cdrom' - dsrc = dsac.DataSourceAltCloud({}, None, self.paths) - cloud_info = self.tmp_path('cloud-info', dir=self.tmp) - util.write_file(cloud_info, 'VSPHERE') - self.assertEqual(True, dsrc.user_data_vsphere()) - m_find_devs_with.assert_called_once_with('LABEL=CDROM') - m_mount_cb.assert_called_once_with( - '/dev/mock/cdrom', dsac.read_user_data_callback) - with mock.patch.object(dsrc, 'get_cloud_type', return_value='VSPHERE'): - self.assertEqual('vsphere (/dev/mock/cdrom)', dsrc.subplatform) - - -class TestReadUserDataCallback(CiTestCase): - ''' - Test to exercise method: DataSourceAltCloud.read_user_data_callback() - ''' - def setUp(self): - '''Set up.''' - self.paths = helpers.Paths({'cloud_dir': '/tmp'}) - self.mount_dir = tempfile.mkdtemp() - - _write_user_data_files(self.mount_dir, 'test user data') - - def tearDown(self): - # Reset - - _remove_user_data_files(self.mount_dir) - - # Attempt to remove the temp dir ignoring errors - try: - shutil.rmtree(self.mount_dir) - except OSError: - pass - - def test_callback_both(self): - '''Test read_user_data_callback() with both files.''' - - self.assertEqual('test user data', - dsac.read_user_data_callback(self.mount_dir)) - - def test_callback_dc(self): - '''Test read_user_data_callback() with only DC file.''' - - _remove_user_data_files(self.mount_dir, - dc_file=False, - non_dc_file=True) - - self.assertEqual('test user data', - dsac.read_user_data_callback(self.mount_dir)) - - def test_callback_non_dc(self): - '''Test read_user_data_callback() with only non-DC file.''' - - _remove_user_data_files(self.mount_dir, - dc_file=True, - non_dc_file=False) - - self.assertEqual('test user data', - dsac.read_user_data_callback(self.mount_dir)) - - def test_callback_none(self): - '''Test read_user_data_callback() no files are found.''' - - _remove_user_data_files(self.mount_dir) - self.assertIsNone(dsac.read_user_data_callback(self.mount_dir)) - - -def force_arch(arch=None): - - def _os_uname(): - return ('LINUX', 'NODENAME', 'RELEASE', 'VERSION', arch) - - if arch: - setattr(os, 'uname', _os_uname) - elif arch is None: - setattr(os, 'uname', OS_UNAME_ORIG) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py deleted file mode 100644 index 995d2b10..00000000 --- a/tests/unittests/test_datasource/test_azure.py +++ /dev/null @@ -1,3394 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit import distros -from cloudinit import helpers -from cloudinit import url_helper -from cloudinit.sources import ( - UNSET, DataSourceAzure as dsaz, InvalidMetaDataException) -from cloudinit.util import (b64e, decode_binary, load_file, write_file, - MountFailedError, json_dumps, load_json) -from cloudinit.version import version_string as vs -from cloudinit.tests.helpers import ( - HttprettyTestCase, CiTestCase, populate_dir, mock, wrap_and_call, - ExitStack, resourceLocation) -from cloudinit.sources.helpers import netlink - -import copy -import crypt -import httpretty -import json -import os -import requests -import stat -import xml.etree.ElementTree as ET -import yaml - - -def construct_valid_ovf_env(data=None, pubkeys=None, - userdata=None, platform_settings=None): - if data is None: - data = {'HostName': 'FOOHOST'} - if pubkeys is None: - pubkeys = {} - - content = """ - - - 1.0 - - LinuxProvisioningConfiguration - """ - for key, dval in data.items(): - if isinstance(dval, dict): - val = dict(dval).get('text') - attrs = ' ' + ' '.join(["%s='%s'" % (k, v) for k, v - in dict(dval).items() if k != 'text']) - else: - val = dval - attrs = "" - content += "<%s%s>%s\n" % (key, attrs, val, key) - - if userdata: - content += "%s\n" % (b64e(userdata)) - - if pubkeys: - content += "\n" - for fp, path, value in pubkeys: - content += " " - if fp and path: - content += ("%s%s" % - (fp, path)) - if value: - content += "%s" % value - content += "\n" - content += "" - content += """ - - - 1.0 - - kms.core.windows.net - false - """ - if platform_settings: - for k, v in platform_settings.items(): - content += "<%s>%s\n" % (k, v, k) - if "PreprovisionedVMType" not in platform_settings: - content += """""" - content += """ -""" - - return content - - -NETWORK_METADATA = { - "compute": { - "location": "eastus2", - "name": "my-hostname", - "offer": "UbuntuServer", - "osType": "Linux", - "placementGroupId": "", - "platformFaultDomain": "0", - "platformUpdateDomain": "0", - "publisher": "Canonical", - "resourceGroupName": "srugroup1", - "sku": "19.04-DAILY", - "subscriptionId": "12aad61c-6de4-4e53-a6c6-5aff52a83777", - "tags": "", - "version": "19.04.201906190", - "vmId": "ff702a6b-cb6a-4fcd-ad68-b4ce38227642", - "vmScaleSetName": "", - "vmSize": "Standard_DS1_v2", - "zone": "", - "publicKeys": [ - { - "keyData": "ssh-rsa key1", - "path": "path1" - } - ] - }, - "network": { - "interface": [ - { - "macAddress": "000D3A047598", - "ipv6": { - "ipAddress": [] - }, - "ipv4": { - "subnet": [ - { - "prefix": "24", - "address": "10.0.0.0" - } - ], - "ipAddress": [ - { - "privateIpAddress": "10.0.0.4", - "publicIpAddress": "104.46.124.81" - } - ] - } - } - ] - } -} - -SECONDARY_INTERFACE = { - "macAddress": "220D3A047598", - "ipv6": { - "ipAddress": [] - }, - "ipv4": { - "subnet": [ - { - "prefix": "24", - "address": "10.0.1.0" - } - ], - "ipAddress": [ - { - "privateIpAddress": "10.0.1.5", - } - ] - } -} - -SECONDARY_INTERFACE_NO_IP = { - "macAddress": "220D3A047598", - "ipv6": { - "ipAddress": [] - }, - "ipv4": { - "subnet": [ - { - "prefix": "24", - "address": "10.0.1.0" - } - ], - "ipAddress": [] - } -} - -IMDS_NETWORK_METADATA = { - "interface": [ - { - "macAddress": "000D3A047598", - "ipv6": { - "ipAddress": [] - }, - "ipv4": { - "subnet": [ - { - "prefix": "24", - "address": "10.0.0.0" - } - ], - "ipAddress": [ - { - "privateIpAddress": "10.0.0.4", - "publicIpAddress": "104.46.124.81" - } - ] - } - } - ] -} - -MOCKPATH = 'cloudinit.sources.DataSourceAzure.' -EXAMPLE_UUID = 'd0df4c54-4ecb-4a4b-9954-5bdf3ed5c3b8' - - -class TestParseNetworkConfig(CiTestCase): - - maxDiff = None - fallback_config = { - 'version': 1, - 'config': [{ - 'type': 'physical', 'name': 'eth0', - 'mac_address': '00:11:22:33:44:55', - 'params': {'driver': 'hv_netsvc'}, - 'subnets': [{'type': 'dhcp'}], - }] - } - - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) - def test_single_ipv4_nic_configuration(self, m_driver): - """parse_network_config emits dhcp on single nic with ipv4""" - expected = {'ethernets': { - 'eth0': {'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}, - 'dhcp6': False, - 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'set-name': 'eth0'}}, 'version': 2} - self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA)) - - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) - def test_increases_route_metric_for_non_primary_nics(self, m_driver): - """parse_network_config increases route-metric for each nic""" - expected = {'ethernets': { - 'eth0': {'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}, - 'dhcp6': False, - 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'set-name': 'eth0'}, - 'eth1': {'set-name': 'eth1', - 'match': {'macaddress': '22:0d:3a:04:75:98'}, - 'dhcp6': False, - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 200}}, - 'eth2': {'set-name': 'eth2', - 'match': {'macaddress': '33:0d:3a:04:75:98'}, - 'dhcp6': False, - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 300}}}, 'version': 2} - imds_data = copy.deepcopy(NETWORK_METADATA) - imds_data['network']['interface'].append(SECONDARY_INTERFACE) - third_intf = copy.deepcopy(SECONDARY_INTERFACE) - third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') - third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' - third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' - imds_data['network']['interface'].append(third_intf) - self.assertEqual(expected, dsaz.parse_network_config(imds_data)) - - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) - def test_ipv4_and_ipv6_route_metrics_match_for_nics(self, m_driver): - """parse_network_config emits matching ipv4 and ipv6 route-metrics.""" - expected = {'ethernets': { - 'eth0': {'addresses': ['10.0.0.5/24', '2001:dead:beef::2/128'], - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}, - 'dhcp6': True, - 'dhcp6-overrides': {'route-metric': 100}, - 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'set-name': 'eth0'}, - 'eth1': {'set-name': 'eth1', - 'match': {'macaddress': '22:0d:3a:04:75:98'}, - 'dhcp4': True, - 'dhcp6': False, - 'dhcp4-overrides': {'route-metric': 200}}, - 'eth2': {'set-name': 'eth2', - 'match': {'macaddress': '33:0d:3a:04:75:98'}, - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 300}, - 'dhcp6': True, - 'dhcp6-overrides': {'route-metric': 300}}}, 'version': 2} - imds_data = copy.deepcopy(NETWORK_METADATA) - nic1 = imds_data['network']['interface'][0] - nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) - - nic1['ipv6'] = { - "subnet": [{"address": "2001:dead:beef::16"}], - "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}, - {"privateIpAddress": "2001:dead:beef::2"}] - } - imds_data['network']['interface'].append(SECONDARY_INTERFACE) - third_intf = copy.deepcopy(SECONDARY_INTERFACE) - third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') - third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' - third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' - third_intf['ipv6'] = { - "subnet": [{"prefix": "64", "address": "2001:dead:beef::2"}], - "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}] - } - imds_data['network']['interface'].append(third_intf) - self.assertEqual(expected, dsaz.parse_network_config(imds_data)) - - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) - def test_ipv4_secondary_ips_will_be_static_addrs(self, m_driver): - """parse_network_config emits primary ipv4 as dhcp others are static""" - expected = {'ethernets': { - 'eth0': {'addresses': ['10.0.0.5/24'], - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}, - 'dhcp6': True, - 'dhcp6-overrides': {'route-metric': 100}, - 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'set-name': 'eth0'}}, 'version': 2} - imds_data = copy.deepcopy(NETWORK_METADATA) - nic1 = imds_data['network']['interface'][0] - nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) - - nic1['ipv6'] = { - "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}], - "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}] - } - self.assertEqual(expected, dsaz.parse_network_config(imds_data)) - - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) - def test_ipv6_secondary_ips_will_be_static_cidrs(self, m_driver): - """parse_network_config emits primary ipv6 as dhcp others are static""" - expected = {'ethernets': { - 'eth0': {'addresses': ['10.0.0.5/24', '2001:dead:beef::2/10'], - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}, - 'dhcp6': True, - 'dhcp6-overrides': {'route-metric': 100}, - 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'set-name': 'eth0'}}, 'version': 2} - imds_data = copy.deepcopy(NETWORK_METADATA) - nic1 = imds_data['network']['interface'][0] - nic1['ipv4']['ipAddress'].append({'privateIpAddress': '10.0.0.5'}) - - # Secondary ipv6 addresses currently ignored/unconfigured - nic1['ipv6'] = { - "subnet": [{"prefix": "10", "address": "2001:dead:beef::16"}], - "ipAddress": [{"privateIpAddress": "2001:dead:beef::1"}, - {"privateIpAddress": "2001:dead:beef::2"}] - } - self.assertEqual(expected, dsaz.parse_network_config(imds_data)) - - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value='hv_netvsc') - def test_match_driver_for_netvsc(self, m_driver): - """parse_network_config emits driver when using netvsc.""" - expected = {'ethernets': { - 'eth0': { - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}, - 'dhcp6': False, - 'match': { - 'macaddress': '00:0d:3a:04:75:98', - 'driver': 'hv_netvsc', - }, - 'set-name': 'eth0' - }}, 'version': 2} - self.assertEqual(expected, dsaz.parse_network_config(NETWORK_METADATA)) - - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) - @mock.patch('cloudinit.net.generate_fallback_config') - def test_parse_network_config_uses_fallback_cfg_when_no_network_metadata( - self, m_fallback_config, m_driver): - """parse_network_config generates fallback network config when the - IMDS instance metadata is corrupted/invalid, such as when - network metadata is not present. - """ - imds_metadata_missing_network_metadata = copy.deepcopy( - NETWORK_METADATA) - del imds_metadata_missing_network_metadata['network'] - m_fallback_config.return_value = self.fallback_config - self.assertEqual( - self.fallback_config, - dsaz.parse_network_config( - imds_metadata_missing_network_metadata)) - - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) - @mock.patch('cloudinit.net.generate_fallback_config') - def test_parse_network_config_uses_fallback_cfg_when_no_interface_metadata( - self, m_fallback_config, m_driver): - """parse_network_config generates fallback network config when the - IMDS instance metadata is corrupted/invalid, such as when - network interface metadata is not present. - """ - imds_metadata_missing_interface_metadata = copy.deepcopy( - NETWORK_METADATA) - del imds_metadata_missing_interface_metadata['network']['interface'] - m_fallback_config.return_value = self.fallback_config - self.assertEqual( - self.fallback_config, - dsaz.parse_network_config( - imds_metadata_missing_interface_metadata)) - - -class TestGetMetadataFromIMDS(HttprettyTestCase): - - with_logs = True - - def setUp(self): - super(TestGetMetadataFromIMDS, self).setUp() - self.network_md_url = "{}/instance?api-version=2019-06-01".format( - dsaz.IMDS_URL - ) - - @mock.patch(MOCKPATH + 'readurl') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4', autospec=True) - @mock.patch(MOCKPATH + 'net.is_up', autospec=True) - def test_get_metadata_does_not_dhcp_if_network_is_up( - self, m_net_is_up, m_dhcp, m_readurl): - """Do not perform DHCP setup when nic is already up.""" - m_net_is_up.return_value = True - m_readurl.return_value = url_helper.StringResponse( - json.dumps(NETWORK_METADATA).encode('utf-8')) - self.assertEqual( - NETWORK_METADATA, - dsaz.get_metadata_from_imds('eth9', retries=3)) - - m_net_is_up.assert_called_with('eth9') - m_dhcp.assert_not_called() - self.assertIn( - "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time - self.logs.getvalue()) - - @mock.patch(MOCKPATH + 'readurl', autospec=True) - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') - @mock.patch(MOCKPATH + 'net.is_up') - def test_get_metadata_uses_instance_url( - self, m_net_is_up, m_dhcp, m_readurl): - """Make sure readurl is called with the correct url when accessing - metadata""" - m_net_is_up.return_value = True - m_readurl.return_value = url_helper.StringResponse( - json.dumps(IMDS_NETWORK_METADATA).encode('utf-8')) - - dsaz.get_metadata_from_imds( - 'eth0', retries=3, md_type=dsaz.metadata_type.all) - m_readurl.assert_called_with( - "http://169.254.169.254/metadata/instance?api-version=" - "2019-06-01", exception_cb=mock.ANY, - headers=mock.ANY, retries=mock.ANY, - timeout=mock.ANY, infinite=False) - - @mock.patch(MOCKPATH + 'readurl', autospec=True) - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') - @mock.patch(MOCKPATH + 'net.is_up') - def test_get_network_metadata_uses_network_url( - self, m_net_is_up, m_dhcp, m_readurl): - """Make sure readurl is called with the correct url when accessing - network metadata""" - m_net_is_up.return_value = True - m_readurl.return_value = url_helper.StringResponse( - json.dumps(IMDS_NETWORK_METADATA).encode('utf-8')) - - dsaz.get_metadata_from_imds( - 'eth0', retries=3, md_type=dsaz.metadata_type.network) - m_readurl.assert_called_with( - "http://169.254.169.254/metadata/instance/network?api-version=" - "2019-06-01", exception_cb=mock.ANY, - headers=mock.ANY, retries=mock.ANY, - timeout=mock.ANY, infinite=False) - - @mock.patch(MOCKPATH + 'readurl', autospec=True) - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') - @mock.patch(MOCKPATH + 'net.is_up') - def test_get_default_metadata_uses_instance_url( - self, m_net_is_up, m_dhcp, m_readurl): - """Make sure readurl is called with the correct url when accessing - metadata""" - m_net_is_up.return_value = True - m_readurl.return_value = url_helper.StringResponse( - json.dumps(IMDS_NETWORK_METADATA).encode('utf-8')) - - dsaz.get_metadata_from_imds( - 'eth0', retries=3) - m_readurl.assert_called_with( - "http://169.254.169.254/metadata/instance?api-version=" - "2019-06-01", exception_cb=mock.ANY, - headers=mock.ANY, retries=mock.ANY, - timeout=mock.ANY, infinite=False) - - @mock.patch(MOCKPATH + 'readurl', autospec=True) - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') - @mock.patch(MOCKPATH + 'net.is_up') - def test_get_metadata_uses_extended_url( - self, m_net_is_up, m_dhcp, m_readurl): - """Make sure readurl is called with the correct url when accessing - metadata""" - m_net_is_up.return_value = True - m_readurl.return_value = url_helper.StringResponse( - json.dumps(IMDS_NETWORK_METADATA).encode('utf-8')) - - dsaz.get_metadata_from_imds( - 'eth0', retries=3, md_type=dsaz.metadata_type.all, - api_version="2021-08-01") - m_readurl.assert_called_with( - "http://169.254.169.254/metadata/instance?api-version=" - "2021-08-01&extended=true", exception_cb=mock.ANY, - headers=mock.ANY, retries=mock.ANY, - timeout=mock.ANY, infinite=False) - - @mock.patch(MOCKPATH + 'readurl', autospec=True) - @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting', autospec=True) - @mock.patch(MOCKPATH + 'net.is_up', autospec=True) - def test_get_metadata_performs_dhcp_when_network_is_down( - self, m_net_is_up, m_dhcp, m_readurl): - """Perform DHCP setup when nic is not up.""" - m_net_is_up.return_value = False - m_readurl.return_value = url_helper.StringResponse( - json.dumps(NETWORK_METADATA).encode('utf-8')) - - self.assertEqual( - NETWORK_METADATA, - dsaz.get_metadata_from_imds('eth9', retries=2)) - - m_net_is_up.assert_called_with('eth9') - m_dhcp.assert_called_with(mock.ANY, 'eth9') - self.assertIn( - "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time - self.logs.getvalue()) - - m_readurl.assert_called_with( - self.network_md_url, exception_cb=mock.ANY, - headers={'Metadata': 'true'}, retries=2, - timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, infinite=False) - - @mock.patch('cloudinit.url_helper.time.sleep') - @mock.patch(MOCKPATH + 'net.is_up', autospec=True) - def test_get_metadata_from_imds_empty_when_no_imds_present( - self, m_net_is_up, m_sleep): - """Return empty dict when IMDS network metadata is absent.""" - httpretty.register_uri( - httpretty.GET, - dsaz.IMDS_URL + '/instance?api-version=2017-12-01', - body={}, status=404) - - m_net_is_up.return_value = True # skips dhcp - - self.assertEqual({}, dsaz.get_metadata_from_imds('eth9', retries=2)) - - m_net_is_up.assert_called_with('eth9') - self.assertEqual([mock.call(1), mock.call(1)], m_sleep.call_args_list) - self.assertIn( - "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time - self.logs.getvalue()) - - @mock.patch('requests.Session.request') - @mock.patch('cloudinit.url_helper.time.sleep') - @mock.patch(MOCKPATH + 'net.is_up', autospec=True) - def test_get_metadata_from_imds_retries_on_timeout( - self, m_net_is_up, m_sleep, m_request): - """Retry IMDS network metadata on timeout errors.""" - - self.attempt = 0 - m_request.side_effect = requests.Timeout('Fake Connection Timeout') - - def retry_callback(request, uri, headers): - self.attempt += 1 - raise requests.Timeout('Fake connection timeout') - - httpretty.register_uri( - httpretty.GET, - dsaz.IMDS_URL + 'instance?api-version=2017-12-01', - body=retry_callback) - - m_net_is_up.return_value = True # skips dhcp - - self.assertEqual({}, dsaz.get_metadata_from_imds('eth9', retries=3)) - - m_net_is_up.assert_called_with('eth9') - self.assertEqual([mock.call(1)]*3, m_sleep.call_args_list) - self.assertIn( - "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time - self.logs.getvalue()) - - -class TestAzureDataSource(CiTestCase): - - with_logs = True - - def setUp(self): - super(TestAzureDataSource, self).setUp() - self.tmp = self.tmp_dir() - - # patch cloud_dir, so our 'seed_dir' is guaranteed empty - self.paths = helpers.Paths( - {'cloud_dir': self.tmp, 'run_dir': self.tmp}) - self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent') - - self.patches = ExitStack() - self.addCleanup(self.patches.close) - - self.patches.enter_context(mock.patch.object( - dsaz, '_get_random_seed', return_value='wild')) - self.m_get_metadata_from_imds = self.patches.enter_context( - mock.patch.object( - dsaz, 'get_metadata_from_imds', - mock.MagicMock(return_value=NETWORK_METADATA))) - self.m_fallback_nic = self.patches.enter_context( - mock.patch('cloudinit.sources.net.find_fallback_nic', - return_value='eth9')) - self.m_remove_ubuntu_network_scripts = self.patches.enter_context( - mock.patch.object( - dsaz, 'maybe_remove_ubuntu_network_config_scripts', - mock.MagicMock())) - super(TestAzureDataSource, self).setUp() - - def apply_patches(self, patches): - for module, name, new in patches: - self.patches.enter_context(mock.patch.object(module, name, new)) - - def _get_mockds(self): - sysctl_out = "dev.storvsc.3.%pnpinfo: "\ - "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\ - "deviceid=f8b3781b-1e82-4818-a1c3-63d806ec15bb\n" - sysctl_out += "dev.storvsc.2.%pnpinfo: "\ - "classid=ba6163d9-04a1-4d29-b605-72e2ffb1dc7f "\ - "deviceid=f8b3781a-1e82-4818-a1c3-63d806ec15bb\n" - sysctl_out += "dev.storvsc.1.%pnpinfo: "\ - "classid=32412632-86cb-44a2-9b5c-50d1417354f5 "\ - "deviceid=00000000-0001-8899-0000-000000000000\n" - camctl_devbus = """ -scbus0 on ata0 bus 0 -scbus1 on ata1 bus 0 -scbus2 on blkvsc0 bus 0 -scbus3 on blkvsc1 bus 0 -scbus4 on storvsc2 bus 0 -scbus5 on storvsc3 bus 0 -scbus-1 on xpt0 bus 0 - """ - camctl_dev = """ - at scbus1 target 0 lun 0 (cd0,pass0) - at scbus2 target 0 lun 0 (da0,pass1) - at scbus3 target 1 lun 0 (da1,pass2) - """ - self.apply_patches([ - (dsaz, 'get_dev_storvsc_sysctl', mock.MagicMock( - return_value=sysctl_out)), - (dsaz, 'get_camcontrol_dev_bus', mock.MagicMock( - return_value=camctl_devbus)), - (dsaz, 'get_camcontrol_dev', mock.MagicMock( - return_value=camctl_dev)) - ]) - return dsaz - - def _get_ds(self, data, distro='ubuntu', - apply_network=None, instance_id=None): - - def _wait_for_files(flist, _maxwait=None, _naplen=None): - data['waited'] = flist - return [] - - def _load_possible_azure_ds(seed_dir, cache_dir): - yield seed_dir - yield dsaz.DEFAULT_PROVISIONING_ISO_DEV - yield from data.get('dsdevs', []) - if cache_dir: - yield cache_dir - - seed_dir = os.path.join(self.paths.seed_dir, "azure") - if data.get('ovfcontent') is not None: - populate_dir(seed_dir, - {'ovf-env.xml': data['ovfcontent']}) - - dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d - - self.m_is_platform_viable = mock.MagicMock(autospec=True) - self.m_get_metadata_from_fabric = mock.MagicMock( - return_value={'public-keys': []}) - self.m_report_failure_to_fabric = mock.MagicMock(autospec=True) - self.m_ephemeral_dhcpv4 = mock.MagicMock() - self.m_ephemeral_dhcpv4_with_reporting = mock.MagicMock() - self.m_list_possible_azure_ds = mock.MagicMock( - side_effect=_load_possible_azure_ds) - - if instance_id: - self.instance_id = instance_id - else: - self.instance_id = EXAMPLE_UUID - - def _dmi_mocks(key): - if key == 'system-uuid': - return self.instance_id - elif key == 'chassis-asset-tag': - return '7783-7084-3265-9085-8269-3286-77' - - self.apply_patches([ - (dsaz, 'list_possible_azure_ds', - self.m_list_possible_azure_ds), - (dsaz, 'perform_hostname_bounce', mock.MagicMock()), - (dsaz, 'get_hostname', mock.MagicMock()), - (dsaz, 'set_hostname', mock.MagicMock()), - (dsaz, '_is_platform_viable', - self.m_is_platform_viable), - (dsaz, 'get_metadata_from_fabric', - self.m_get_metadata_from_fabric), - (dsaz, 'report_failure_to_fabric', - self.m_report_failure_to_fabric), - (dsaz, 'EphemeralDHCPv4', self.m_ephemeral_dhcpv4), - (dsaz, 'EphemeralDHCPv4WithReporting', - self.m_ephemeral_dhcpv4_with_reporting), - (dsaz, 'get_boot_telemetry', mock.MagicMock()), - (dsaz, 'get_system_info', mock.MagicMock()), - (dsaz.subp, 'which', lambda x: True), - (dsaz.dmi, 'read_dmi_data', mock.MagicMock( - side_effect=_dmi_mocks)), - (dsaz.util, 'wait_for_files', mock.MagicMock( - side_effect=_wait_for_files)), - ]) - - if isinstance(distro, str): - distro_cls = distros.fetch(distro) - distro = distro_cls(distro, data.get('sys_cfg', {}), self.paths) - dsrc = dsaz.DataSourceAzure( - data.get('sys_cfg', {}), distro=distro, paths=self.paths) - if apply_network is not None: - dsrc.ds_cfg['apply_network_config'] = apply_network - - return dsrc - - def _get_and_setup(self, dsrc): - ret = dsrc.get_data() - if ret: - dsrc.setup(True) - return ret - - def xml_equals(self, oxml, nxml): - """Compare two sets of XML to make sure they are equal""" - - def create_tag_index(xml): - et = ET.fromstring(xml) - ret = {} - for x in et.iter(): - ret[x.tag] = x - return ret - - def tags_exists(x, y): - for tag in x.keys(): - assert tag in y - for tag in y.keys(): - assert tag in x - - def tags_equal(x, y): - for x_val in x.values(): - y_val = y.get(x_val.tag) - assert x_val.text == y_val.text - - old_cnt = create_tag_index(oxml) - new_cnt = create_tag_index(nxml) - tags_exists(old_cnt, new_cnt) - tags_equal(old_cnt, new_cnt) - - def xml_notequals(self, oxml, nxml): - try: - self.xml_equals(oxml, nxml) - except AssertionError: - return - raise AssertionError("XML is the same") - - def test_get_resource_disk(self): - ds = self._get_mockds() - dev = ds.get_resource_disk_on_freebsd(1) - self.assertEqual("da1", dev) - - def test_not_is_platform_viable_seed_should_return_no_datasource(self): - """Check seed_dir using _is_platform_viable and return False.""" - # Return a non-matching asset tag value - data = {} - dsrc = self._get_ds(data) - self.m_is_platform_viable.return_value = False - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ - mock.patch.object(dsrc, '_report_failure') as m_report_failure: - ret = dsrc.get_data() - self.m_is_platform_viable.assert_called_with(dsrc.seed_dir) - self.assertFalse(ret) - # Assert that for non viable platforms, - # there is no communication with the Azure datasource. - self.assertEqual( - 0, - m_crawl_metadata.call_count) - self.assertEqual( - 0, - m_report_failure.call_count) - - def test_platform_viable_but_no_devs_should_return_no_datasource(self): - """For platforms where the Azure platform is viable - (which is indicated by the matching asset tag), - the absence of any devs at all (devs == candidate sources - for crawling Azure datasource) is NOT expected. - Report failure to Azure as this is an unexpected fatal error. - """ - data = {} - dsrc = self._get_ds(data) - with mock.patch.object(dsrc, '_report_failure') as m_report_failure: - self.m_is_platform_viable.return_value = True - ret = dsrc.get_data() - self.m_is_platform_viable.assert_called_with(dsrc.seed_dir) - self.assertFalse(ret) - self.assertEqual( - 1, - m_report_failure.call_count) - - def test_crawl_metadata_exception_returns_no_datasource(self): - data = {} - dsrc = self._get_ds(data) - self.m_is_platform_viable.return_value = True - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: - m_crawl_metadata.side_effect = Exception - ret = dsrc.get_data() - self.m_is_platform_viable.assert_called_with(dsrc.seed_dir) - self.assertEqual( - 1, - m_crawl_metadata.call_count) - self.assertFalse(ret) - - def test_crawl_metadata_exception_should_report_failure_with_msg(self): - data = {} - dsrc = self._get_ds(data) - self.m_is_platform_viable.return_value = True - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ - mock.patch.object(dsrc, '_report_failure') as m_report_failure: - m_crawl_metadata.side_effect = Exception - dsrc.get_data() - self.assertEqual( - 1, - m_crawl_metadata.call_count) - m_report_failure.assert_called_once_with( - description=dsaz.DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE) - - def test_crawl_metadata_exc_should_log_could_not_crawl_msg(self): - data = {} - dsrc = self._get_ds(data) - self.m_is_platform_viable.return_value = True - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: - m_crawl_metadata.side_effect = Exception - dsrc.get_data() - self.assertEqual( - 1, - m_crawl_metadata.call_count) - self.assertIn( - "Could not crawl Azure metadata", - self.logs.getvalue()) - - def test_basic_seed_dir(self): - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': {}} - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(dsrc.userdata_raw, "") - self.assertEqual(dsrc.metadata['local-hostname'], odata['HostName']) - self.assertTrue(os.path.isfile( - os.path.join(self.waagent_d, 'ovf-env.xml'))) - self.assertEqual('azure', dsrc.cloud_name) - self.assertEqual('azure', dsrc.platform_type) - self.assertEqual( - 'seed-dir (%s/seed/azure)' % self.tmp, dsrc.subplatform) - - def test_basic_dev_file(self): - """When a device path is used, present that in subplatform.""" - data = {'sys_cfg': {}, 'dsdevs': ['/dev/cd0']} - dsrc = self._get_ds(data) - # DSAzure will attempt to mount /dev/sr0 first, which should - # fail with mount error since the list of devices doesn't have - # /dev/sr0 - with mock.patch(MOCKPATH + 'util.mount_cb') as m_mount_cb: - m_mount_cb.side_effect = [ - MountFailedError("fail"), - ({'local-hostname': 'me'}, 'ud', {'cfg': ''}, {}) - ] - self.assertTrue(dsrc.get_data()) - self.assertEqual(dsrc.userdata_raw, 'ud') - self.assertEqual(dsrc.metadata['local-hostname'], 'me') - self.assertEqual('azure', dsrc.cloud_name) - self.assertEqual('azure', dsrc.platform_type) - self.assertEqual('config-disk (/dev/cd0)', dsrc.subplatform) - - def test_get_data_non_ubuntu_will_not_remove_network_scripts(self): - """get_data on non-Ubuntu will not remove ubuntu net scripts.""" - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': {}} - - dsrc = self._get_ds(data, distro='debian') - dsrc.get_data() - self.m_remove_ubuntu_network_scripts.assert_not_called() - - def test_get_data_on_ubuntu_will_remove_network_scripts(self): - """get_data will remove ubuntu net scripts on Ubuntu distro.""" - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg} - - dsrc = self._get_ds(data, distro='ubuntu') - dsrc.get_data() - self.m_remove_ubuntu_network_scripts.assert_called_once_with() - - def test_get_data_on_ubuntu_will_not_remove_network_scripts_disabled(self): - """When apply_network_config false, do not remove scripts on Ubuntu.""" - sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg} - - dsrc = self._get_ds(data, distro='ubuntu') - dsrc.get_data() - self.m_remove_ubuntu_network_scripts.assert_not_called() - - def test_crawl_metadata_returns_structured_data_and_caches_nothing(self): - """Return all structured metadata and cache no class attributes.""" - yaml_cfg = "" - odata = {'HostName': "myhost", 'UserName': "myuser", - 'UserData': {'text': 'FOOBAR', 'encoding': 'plain'}, - 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': {}} - dsrc = self._get_ds(data) - expected_cfg = { - 'PreprovisionedVMType': None, - 'PreprovisionedVm': False, - 'datasource': {'Azure': {}}, - 'system_info': {'default_user': {'name': 'myuser'}}} - expected_metadata = { - 'azure_data': { - 'configurationsettype': 'LinuxProvisioningConfiguration'}, - 'imds': NETWORK_METADATA, - 'instance-id': EXAMPLE_UUID, - 'local-hostname': 'myhost', - 'random_seed': 'wild'} - - crawled_metadata = dsrc.crawl_metadata() - - self.assertCountEqual( - crawled_metadata.keys(), - ['cfg', 'files', 'metadata', 'userdata_raw']) - self.assertEqual(crawled_metadata['cfg'], expected_cfg) - self.assertEqual( - list(crawled_metadata['files'].keys()), ['ovf-env.xml']) - self.assertIn( - b'myhost', - crawled_metadata['files']['ovf-env.xml']) - self.assertEqual(crawled_metadata['metadata'], expected_metadata) - self.assertEqual(crawled_metadata['userdata_raw'], 'FOOBAR') - self.assertEqual(dsrc.userdata_raw, None) - self.assertEqual(dsrc.metadata, {}) - self.assertEqual(dsrc._metadata_imds, UNSET) - self.assertFalse(os.path.isfile( - os.path.join(self.waagent_d, 'ovf-env.xml'))) - - def test_crawl_metadata_raises_invalid_metadata_on_error(self): - """crawl_metadata raises an exception on invalid ovf-env.xml.""" - data = {'ovfcontent': "BOGUS", 'sys_cfg': {}} - dsrc = self._get_ds(data) - error_msg = ('BrokenAzureDataSource: Invalid ovf-env.xml:' - ' syntax error: line 1, column 0') - with self.assertRaises(InvalidMetaDataException) as cm: - dsrc.crawl_metadata() - self.assertEqual(str(cm.exception), error_msg) - - def test_crawl_metadata_call_imds_once_no_reprovision(self): - """If reprovisioning, report ready at the end""" - ovfenv = construct_valid_ovf_env( - platform_settings={"PreprovisionedVm": "False"} - ) - - data = { - 'ovfcontent': ovfenv, - 'sys_cfg': {} - } - dsrc = self._get_ds(data) - dsrc.crawl_metadata() - self.assertEqual(1, self.m_get_metadata_from_imds.call_count) - - @mock.patch( - 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting') - @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') - @mock.patch( - 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') - @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') - def test_crawl_metadata_call_imds_twice_with_reprovision( - self, poll_imds_func, m_report_ready, m_write, m_dhcp - ): - """If reprovisioning, imds metadata will be fetched twice""" - ovfenv = construct_valid_ovf_env( - platform_settings={"PreprovisionedVm": "True"} - ) - - data = { - 'ovfcontent': ovfenv, - 'sys_cfg': {} - } - dsrc = self._get_ds(data) - poll_imds_func.return_value = ovfenv - dsrc.crawl_metadata() - self.assertEqual(2, self.m_get_metadata_from_imds.call_count) - - @mock.patch( - 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting') - @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') - @mock.patch( - 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') - @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') - def test_crawl_metadata_on_reprovision_reports_ready( - self, poll_imds_func, m_report_ready, m_write, m_dhcp - ): - """If reprovisioning, report ready at the end""" - ovfenv = construct_valid_ovf_env( - platform_settings={"PreprovisionedVm": "True"} - ) - - data = { - 'ovfcontent': ovfenv, - 'sys_cfg': {} - } - dsrc = self._get_ds(data) - poll_imds_func.return_value = ovfenv - dsrc.crawl_metadata() - self.assertEqual(1, m_report_ready.call_count) - - @mock.patch( - 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting') - @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') - @mock.patch( - 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') - @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') - @mock.patch( - 'cloudinit.sources.DataSourceAzure.DataSourceAzure.' - '_wait_for_all_nics_ready') - def test_crawl_metadata_waits_for_nic_on_savable_vms( - self, detect_nics, poll_imds_func, report_ready_func, m_write, m_dhcp - ): - """If reprovisioning, report ready at the end""" - ovfenv = construct_valid_ovf_env( - platform_settings={"PreprovisionedVMType": "Savable", - "PreprovisionedVm": "True"} - ) - - data = { - 'ovfcontent': ovfenv, - 'sys_cfg': {} - } - dsrc = self._get_ds(data) - poll_imds_func.return_value = ovfenv - dsrc.crawl_metadata() - self.assertEqual(1, report_ready_func.call_count) - self.assertEqual(1, detect_nics.call_count) - - @mock.patch( - 'cloudinit.sources.DataSourceAzure.EphemeralDHCPv4WithReporting') - @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') - @mock.patch( - 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') - @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') - @mock.patch( - 'cloudinit.sources.DataSourceAzure.DataSourceAzure.' - '_wait_for_all_nics_ready') - @mock.patch('os.path.isfile') - def test_detect_nics_when_marker_present( - self, is_file, detect_nics, poll_imds_func, report_ready_func, m_write, - m_dhcp): - """If reprovisioning, wait for nic attach if marker present""" - - def is_file_ret(key): - return key == dsaz.REPROVISION_NIC_ATTACH_MARKER_FILE - - is_file.side_effect = is_file_ret - ovfenv = construct_valid_ovf_env() - - data = { - 'ovfcontent': ovfenv, - 'sys_cfg': {} - } - - dsrc = self._get_ds(data) - poll_imds_func.return_value = ovfenv - dsrc.crawl_metadata() - self.assertEqual(1, report_ready_func.call_count) - self.assertEqual(1, detect_nics.call_count) - - @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') - @mock.patch('cloudinit.sources.helpers.netlink.' - 'wait_for_media_disconnect_connect') - @mock.patch( - 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') - @mock.patch('cloudinit.sources.DataSourceAzure.readurl') - def test_crawl_metadata_on_reprovision_reports_ready_using_lease( - self, m_readurl, m_report_ready, - m_media_switch, m_write - ): - """If reprovisioning, report ready using the obtained lease""" - ovfenv = construct_valid_ovf_env( - platform_settings={"PreprovisionedVm": "True"} - ) - - data = { - 'ovfcontent': ovfenv, - 'sys_cfg': {} - } - dsrc = self._get_ds(data) - - with mock.patch.object(dsrc.distro.networking, 'is_up') \ - as m_dsrc_distro_networking_is_up: - - # For this mock, net should not be up, - # so that cached ephemeral won't be used. - # This is so that a NEW ephemeral dhcp lease will be discovered - # and used instead. - m_dsrc_distro_networking_is_up.return_value = False - - lease = { - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'unknown-245': '624c3620'} - self.m_ephemeral_dhcpv4_with_reporting.return_value \ - .__enter__.return_value = lease - m_media_switch.return_value = None - - reprovision_ovfenv = construct_valid_ovf_env() - m_readurl.return_value = url_helper.StringResponse( - reprovision_ovfenv.encode('utf-8')) - - dsrc.crawl_metadata() - self.assertEqual(2, m_report_ready.call_count) - m_report_ready.assert_called_with(lease=lease) - - def test_waagent_d_has_0700_perms(self): - # we expect /var/lib/waagent to be created 0700 - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertTrue(os.path.isdir(self.waagent_d)) - self.assertEqual(stat.S_IMODE(os.stat(self.waagent_d).st_mode), 0o700) - - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) - def test_network_config_set_from_imds(self, m_driver): - """Datasource.network_config returns IMDS network data.""" - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg} - expected_network_config = { - 'ethernets': { - 'eth0': {'set-name': 'eth0', - 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'dhcp6': False, - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}}}, - 'version': 2} - dsrc = self._get_ds(data) - dsrc.get_data() - self.assertEqual(expected_network_config, dsrc.network_config) - - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) - def test_network_config_set_from_imds_route_metric_for_secondary_nic( - self, m_driver): - """Datasource.network_config adds route-metric to secondary nics.""" - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg} - expected_network_config = { - 'ethernets': { - 'eth0': {'set-name': 'eth0', - 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'dhcp6': False, - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}}, - 'eth1': {'set-name': 'eth1', - 'match': {'macaddress': '22:0d:3a:04:75:98'}, - 'dhcp6': False, - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 200}}, - 'eth2': {'set-name': 'eth2', - 'match': {'macaddress': '33:0d:3a:04:75:98'}, - 'dhcp6': False, - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 300}}}, - 'version': 2} - imds_data = copy.deepcopy(NETWORK_METADATA) - imds_data['network']['interface'].append(SECONDARY_INTERFACE) - third_intf = copy.deepcopy(SECONDARY_INTERFACE) - third_intf['macAddress'] = third_intf['macAddress'].replace('22', '33') - third_intf['ipv4']['subnet'][0]['address'] = '10.0.2.0' - third_intf['ipv4']['ipAddress'][0]['privateIpAddress'] = '10.0.2.6' - imds_data['network']['interface'].append(third_intf) - - self.m_get_metadata_from_imds.return_value = imds_data - dsrc = self._get_ds(data) - dsrc.get_data() - self.assertEqual(expected_network_config, dsrc.network_config) - - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) - def test_network_config_set_from_imds_for_secondary_nic_no_ip( - self, m_driver): - """If an IP address is empty then there should no config for it.""" - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg} - expected_network_config = { - 'ethernets': { - 'eth0': {'set-name': 'eth0', - 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'dhcp6': False, - 'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}}}, - 'version': 2} - imds_data = copy.deepcopy(NETWORK_METADATA) - imds_data['network']['interface'].append(SECONDARY_INTERFACE_NO_IP) - self.m_get_metadata_from_imds.return_value = imds_data - dsrc = self._get_ds(data) - dsrc.get_data() - self.assertEqual(expected_network_config, dsrc.network_config) - - def test_availability_zone_set_from_imds(self): - """Datasource.availability returns IMDS platformFaultDomain.""" - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg} - dsrc = self._get_ds(data) - dsrc.get_data() - self.assertEqual('0', dsrc.availability_zone) - - def test_region_set_from_imds(self): - """Datasource.region returns IMDS region location.""" - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg} - dsrc = self._get_ds(data) - dsrc.get_data() - self.assertEqual('eastus2', dsrc.region) - - def test_sys_cfg_set_never_destroy_ntfs(self): - sys_cfg = {'datasource': {'Azure': { - 'never_destroy_ntfs': 'user-supplied-value'}}} - data = {'ovfcontent': construct_valid_ovf_env(data={}), - 'sys_cfg': sys_cfg} - - dsrc = self._get_ds(data) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(dsrc.ds_cfg.get(dsaz.DS_CFG_KEY_PRESERVE_NTFS), - 'user-supplied-value') - - def test_username_used(self): - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} - - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(dsrc.cfg['system_info']['default_user']['name'], - "myuser") - - def test_password_given(self): - odata = {'HostName': "myhost", 'UserName': "myuser", - 'UserPassword': "mypass"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} - - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertIn('default_user', dsrc.cfg['system_info']) - defuser = dsrc.cfg['system_info']['default_user'] - - # default user should be updated username and should not be locked. - self.assertEqual(defuser['name'], odata['UserName']) - self.assertFalse(defuser['lock_passwd']) - # passwd is crypt formated string $id$salt$encrypted - # encrypting plaintext with salt value of everything up to final '$' - # should equal that after the '$' - pos = defuser['passwd'].rfind("$") + 1 - self.assertEqual(defuser['passwd'], - crypt.crypt(odata['UserPassword'], - defuser['passwd'][0:pos])) - - # the same hashed value should also be present in cfg['password'] - self.assertEqual(defuser['passwd'], dsrc.cfg['password']) - - def test_user_not_locked_if_password_redacted(self): - odata = {'HostName': "myhost", 'UserName': "myuser", - 'UserPassword': dsaz.DEF_PASSWD_REDACTION} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} - - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertIn('default_user', dsrc.cfg['system_info']) - defuser = dsrc.cfg['system_info']['default_user'] - - # default user should be updated username and should not be locked. - self.assertEqual(defuser['name'], odata['UserName']) - self.assertIn('lock_passwd', defuser) - self.assertFalse(defuser['lock_passwd']) - - def test_userdata_plain(self): - mydata = "FOOBAR" - odata = {'UserData': {'text': mydata, 'encoding': 'plain'}} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} - - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(decode_binary(dsrc.userdata_raw), mydata) - - def test_userdata_found(self): - mydata = "FOOBAR" - odata = {'UserData': {'text': b64e(mydata), 'encoding': 'base64'}} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} - - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(dsrc.userdata_raw, mydata.encode('utf-8')) - - def test_default_ephemeral_configs_ephemeral_exists(self): - # make sure the ephemeral configs are correct if disk present - odata = {} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': {}} - - orig_exists = dsaz.os.path.exists - - def changed_exists(path): - return True if path == dsaz.RESOURCE_DISK_PATH else orig_exists( - path) - - with mock.patch(MOCKPATH + 'os.path.exists', new=changed_exists): - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - cfg = dsrc.get_config_obj() - - self.assertEqual(dsrc.device_name_to_device("ephemeral0"), - dsaz.RESOURCE_DISK_PATH) - assert 'disk_setup' in cfg - assert 'fs_setup' in cfg - self.assertIsInstance(cfg['disk_setup'], dict) - self.assertIsInstance(cfg['fs_setup'], list) - - def test_default_ephemeral_configs_ephemeral_does_not_exist(self): - # make sure the ephemeral configs are correct if disk not present - odata = {} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': {}} - - orig_exists = dsaz.os.path.exists - - def changed_exists(path): - return False if path == dsaz.RESOURCE_DISK_PATH else orig_exists( - path) - - with mock.patch(MOCKPATH + 'os.path.exists', new=changed_exists): - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - cfg = dsrc.get_config_obj() - - assert 'disk_setup' not in cfg - assert 'fs_setup' not in cfg - - def test_provide_disk_aliases(self): - # Make sure that user can affect disk aliases - dscfg = {'disk_aliases': {'ephemeral0': '/dev/sdc'}} - odata = {'HostName': "myhost", 'UserName': "myuser", - 'dscfg': {'text': b64e(yaml.dump(dscfg)), - 'encoding': 'base64'}} - usercfg = {'disk_setup': {'/dev/sdc': {'something': '...'}, - 'ephemeral0': False}} - userdata = '#cloud-config' + yaml.dump(usercfg) + "\n" - - ovfcontent = construct_valid_ovf_env(data=odata, userdata=userdata) - data = {'ovfcontent': ovfcontent, 'sys_cfg': {}} - - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - cfg = dsrc.get_config_obj() - self.assertTrue(cfg) - - def test_userdata_arrives(self): - userdata = "This is my user-data" - xml = construct_valid_ovf_env(data={}, userdata=userdata) - data = {'ovfcontent': xml} - dsrc = self._get_ds(data) - dsrc.get_data() - - self.assertEqual(userdata.encode('us-ascii'), dsrc.userdata_raw) - - def test_password_redacted_in_ovf(self): - odata = {'HostName': "myhost", 'UserName': "myuser", - 'UserPassword': "mypass"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} - dsrc = self._get_ds(data) - ret = dsrc.get_data() - - self.assertTrue(ret) - ovf_env_path = os.path.join(self.waagent_d, 'ovf-env.xml') - - # The XML should not be same since the user password is redacted - on_disk_ovf = load_file(ovf_env_path) - self.xml_notequals(data['ovfcontent'], on_disk_ovf) - - # Make sure that the redacted password on disk is not used by CI - self.assertNotEqual(dsrc.cfg.get('password'), - dsaz.DEF_PASSWD_REDACTION) - - # Make sure that the password was really encrypted - et = ET.fromstring(on_disk_ovf) - for elem in et.iter(): - if 'UserPassword' in elem.tag: - self.assertEqual(dsaz.DEF_PASSWD_REDACTION, elem.text) - - def test_ovf_env_arrives_in_waagent_dir(self): - xml = construct_valid_ovf_env(data={}, userdata="FOODATA") - dsrc = self._get_ds({'ovfcontent': xml}) - dsrc.get_data() - - # 'data_dir' is '/var/lib/waagent' (walinux-agent's state dir) - # we expect that the ovf-env.xml file is copied there. - ovf_env_path = os.path.join(self.waagent_d, 'ovf-env.xml') - self.assertTrue(os.path.exists(ovf_env_path)) - self.xml_equals(xml, load_file(ovf_env_path)) - - def test_ovf_can_include_unicode(self): - xml = construct_valid_ovf_env(data={}) - xml = '\ufeff{0}'.format(xml) - dsrc = self._get_ds({'ovfcontent': xml}) - dsrc.get_data() - - def test_dsaz_report_ready_returns_true_when_report_succeeds( - self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - self.assertTrue(dsrc._report_ready(lease=mock.MagicMock())) - - def test_dsaz_report_ready_returns_false_and_does_not_propagate_exc( - self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - self.m_get_metadata_from_fabric.side_effect = Exception - self.assertFalse(dsrc._report_ready(lease=mock.MagicMock())) - - def test_dsaz_report_failure_returns_true_when_report_succeeds(self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: - # mock crawl metadata failure to cause report failure - m_crawl_metadata.side_effect = Exception - - self.assertTrue(dsrc._report_failure()) - self.assertEqual( - 1, - self.m_report_failure_to_fabric.call_count) - - def test_dsaz_report_failure_returns_false_and_does_not_propagate_exc( - self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ - mock.patch.object(dsrc, '_ephemeral_dhcp_ctx') \ - as m_ephemeral_dhcp_ctx, \ - mock.patch.object(dsrc.distro.networking, 'is_up') \ - as m_dsrc_distro_networking_is_up: - # mock crawl metadata failure to cause report failure - m_crawl_metadata.side_effect = Exception - - # setup mocks to allow using cached ephemeral dhcp lease - m_dsrc_distro_networking_is_up.return_value = True - test_lease_dhcp_option_245 = 'test_lease_dhcp_option_245' - test_lease = {'unknown-245': test_lease_dhcp_option_245} - m_ephemeral_dhcp_ctx.lease = test_lease - - # We expect 3 calls to report_failure_to_fabric, - # because we try 3 different methods of calling report failure. - # The different methods are attempted in the following order: - # 1. Using cached ephemeral dhcp context to report failure to Azure - # 2. Using new ephemeral dhcp to report failure to Azure - # 3. Using fallback lease to report failure to Azure - self.m_report_failure_to_fabric.side_effect = Exception - self.assertFalse(dsrc._report_failure()) - self.assertEqual( - 3, - self.m_report_failure_to_fabric.call_count) - - def test_dsaz_report_failure_description_msg(self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: - # mock crawl metadata failure to cause report failure - m_crawl_metadata.side_effect = Exception - - test_msg = 'Test report failure description message' - self.assertTrue(dsrc._report_failure(description=test_msg)) - self.m_report_failure_to_fabric.assert_called_once_with( - dhcp_opts=mock.ANY, description=test_msg) - - def test_dsaz_report_failure_no_description_msg(self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata: - m_crawl_metadata.side_effect = Exception - - self.assertTrue(dsrc._report_failure()) # no description msg - self.m_report_failure_to_fabric.assert_called_once_with( - dhcp_opts=mock.ANY, description=None) - - def test_dsaz_report_failure_uses_cached_ephemeral_dhcp_ctx_lease(self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ - mock.patch.object(dsrc, '_ephemeral_dhcp_ctx') \ - as m_ephemeral_dhcp_ctx, \ - mock.patch.object(dsrc.distro.networking, 'is_up') \ - as m_dsrc_distro_networking_is_up: - # mock crawl metadata failure to cause report failure - m_crawl_metadata.side_effect = Exception - - # setup mocks to allow using cached ephemeral dhcp lease - m_dsrc_distro_networking_is_up.return_value = True - test_lease_dhcp_option_245 = 'test_lease_dhcp_option_245' - test_lease = {'unknown-245': test_lease_dhcp_option_245} - m_ephemeral_dhcp_ctx.lease = test_lease - - self.assertTrue(dsrc._report_failure()) - - # ensure called with cached ephemeral dhcp lease option 245 - self.m_report_failure_to_fabric.assert_called_once_with( - description=mock.ANY, dhcp_opts=test_lease_dhcp_option_245) - - # ensure cached ephemeral is cleaned - self.assertEqual( - 1, - m_ephemeral_dhcp_ctx.clean_network.call_count) - - def test_dsaz_report_failure_no_net_uses_new_ephemeral_dhcp_lease(self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ - mock.patch.object(dsrc.distro.networking, 'is_up') \ - as m_dsrc_distro_networking_is_up: - # mock crawl metadata failure to cause report failure - m_crawl_metadata.side_effect = Exception - - # net is not up and cannot use cached ephemeral dhcp - m_dsrc_distro_networking_is_up.return_value = False - # setup ephemeral dhcp lease discovery mock - test_lease_dhcp_option_245 = 'test_lease_dhcp_option_245' - test_lease = {'unknown-245': test_lease_dhcp_option_245} - self.m_ephemeral_dhcpv4_with_reporting.return_value \ - .__enter__.return_value = test_lease - - self.assertTrue(dsrc._report_failure()) - - # ensure called with the newly discovered - # ephemeral dhcp lease option 245 - self.m_report_failure_to_fabric.assert_called_once_with( - description=mock.ANY, dhcp_opts=test_lease_dhcp_option_245) - - def test_dsaz_report_failure_no_net_and_no_dhcp_uses_fallback_lease( - self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - - with mock.patch.object(dsrc, 'crawl_metadata') as m_crawl_metadata, \ - mock.patch.object(dsrc.distro.networking, 'is_up') \ - as m_dsrc_distro_networking_is_up: - # mock crawl metadata failure to cause report failure - m_crawl_metadata.side_effect = Exception - - # net is not up and cannot use cached ephemeral dhcp - m_dsrc_distro_networking_is_up.return_value = False - # ephemeral dhcp discovery failure, - # so cannot use a new ephemeral dhcp - self.m_ephemeral_dhcpv4_with_reporting.return_value \ - .__enter__.side_effect = Exception - - self.assertTrue(dsrc._report_failure()) - - # ensure called with fallback lease - self.m_report_failure_to_fabric.assert_called_once_with( - description=mock.ANY, - fallback_lease_file=dsrc.dhclient_lease_file) - - def test_exception_fetching_fabric_data_doesnt_propagate(self): - """Errors communicating with fabric should warn, but return True.""" - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - self.m_get_metadata_from_fabric.side_effect = Exception - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - - def test_fabric_data_included_in_metadata(self): - dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - self.m_get_metadata_from_fabric.return_value = {'test': 'value'} - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual('value', dsrc.metadata['test']) - - def test_instance_id_case_insensitive(self): - """Return the previous iid when current is a case-insensitive match.""" - lower_iid = EXAMPLE_UUID.lower() - upper_iid = EXAMPLE_UUID.upper() - # lowercase current UUID - ds = self._get_ds( - {'ovfcontent': construct_valid_ovf_env()}, instance_id=lower_iid - ) - # UPPERCASE previous - write_file( - os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), - upper_iid) - ds.get_data() - self.assertEqual(upper_iid, ds.metadata['instance-id']) - - # UPPERCASE current UUID - ds = self._get_ds( - {'ovfcontent': construct_valid_ovf_env()}, instance_id=upper_iid - ) - # lowercase previous - write_file( - os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), - lower_iid) - ds.get_data() - self.assertEqual(lower_iid, ds.metadata['instance-id']) - - def test_instance_id_endianness(self): - """Return the previous iid when dmi uuid is the byteswapped iid.""" - ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - # byte-swapped previous - write_file( - os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), - '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8') - ds.get_data() - self.assertEqual( - '544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8', ds.metadata['instance-id']) - # not byte-swapped previous - write_file( - os.path.join(self.paths.cloud_dir, 'data', 'instance-id'), - '644CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8') - ds.get_data() - self.assertEqual(self.instance_id, ds.metadata['instance-id']) - - def test_instance_id_from_dmidecode_used(self): - ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - ds.get_data() - self.assertEqual(self.instance_id, ds.metadata['instance-id']) - - def test_instance_id_from_dmidecode_used_for_builtin(self): - ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - ds.get_data() - self.assertEqual(self.instance_id, ds.metadata['instance-id']) - - @mock.patch(MOCKPATH + 'util.is_FreeBSD') - @mock.patch(MOCKPATH + '_check_freebsd_cdrom') - def test_list_possible_azure_ds(self, m_check_fbsd_cdrom, - m_is_FreeBSD): - """On FreeBSD, possible devs should show /dev/cd0.""" - m_is_FreeBSD.return_value = True - m_check_fbsd_cdrom.return_value = True - possible_ds = [] - for src in dsaz.list_possible_azure_ds( - "seed_dir", "cache_dir"): - possible_ds.append(src) - self.assertEqual(possible_ds, ["seed_dir", - dsaz.DEFAULT_PROVISIONING_ISO_DEV, - "/dev/cd0", - "cache_dir"]) - self.assertEqual( - [mock.call("/dev/cd0")], m_check_fbsd_cdrom.call_args_list) - - @mock.patch('cloudinit.sources.DataSourceAzure.device_driver', - return_value=None) - @mock.patch('cloudinit.net.generate_fallback_config') - def test_imds_network_config(self, mock_fallback, m_driver): - """Network config is generated from IMDS network data when present.""" - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg} - - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - - expected_cfg = { - 'ethernets': { - 'eth0': {'dhcp4': True, - 'dhcp4-overrides': {'route-metric': 100}, - 'dhcp6': False, - 'match': {'macaddress': '00:0d:3a:04:75:98'}, - 'set-name': 'eth0'}}, - 'version': 2} - - self.assertEqual(expected_cfg, dsrc.network_config) - mock_fallback.assert_not_called() - - @mock.patch('cloudinit.net.get_interface_mac') - @mock.patch('cloudinit.net.get_devicelist') - @mock.patch('cloudinit.net.device_driver') - @mock.patch('cloudinit.net.generate_fallback_config') - def test_imds_network_ignored_when_apply_network_config_false( - self, mock_fallback, mock_dd, mock_devlist, mock_get_mac): - """When apply_network_config is False, use fallback instead of IMDS.""" - sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg} - fallback_config = { - 'version': 1, - 'config': [{ - 'type': 'physical', 'name': 'eth0', - 'mac_address': '00:11:22:33:44:55', - 'params': {'driver': 'hv_netsvc'}, - 'subnets': [{'type': 'dhcp'}], - }] - } - mock_fallback.return_value = fallback_config - - mock_devlist.return_value = ['eth0'] - mock_dd.return_value = ['hv_netsvc'] - mock_get_mac.return_value = '00:11:22:33:44:55' - - dsrc = self._get_ds(data) - self.assertTrue(dsrc.get_data()) - self.assertEqual(dsrc.network_config, fallback_config) - - @mock.patch('cloudinit.net.get_interface_mac') - @mock.patch('cloudinit.net.get_devicelist') - @mock.patch('cloudinit.net.device_driver') - @mock.patch('cloudinit.net.generate_fallback_config', autospec=True) - def test_fallback_network_config(self, mock_fallback, mock_dd, - mock_devlist, mock_get_mac): - """On absent IMDS network data, generate network fallback config.""" - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': {}} - - fallback_config = { - 'version': 1, - 'config': [{ - 'type': 'physical', 'name': 'eth0', - 'mac_address': '00:11:22:33:44:55', - 'params': {'driver': 'hv_netsvc'}, - 'subnets': [{'type': 'dhcp'}], - }] - } - mock_fallback.return_value = fallback_config - - mock_devlist.return_value = ['eth0'] - mock_dd.return_value = ['hv_netsvc'] - mock_get_mac.return_value = '00:11:22:33:44:55' - - dsrc = self._get_ds(data) - # Represent empty response from network imds - self.m_get_metadata_from_imds.return_value = {} - ret = dsrc.get_data() - self.assertTrue(ret) - - netconfig = dsrc.network_config - self.assertEqual(netconfig, fallback_config) - mock_fallback.assert_called_with( - blacklist_drivers=['mlx4_core', 'mlx5_core'], - config_driver=True) - - @mock.patch(MOCKPATH + 'net.get_interfaces', autospec=True) - def test_blacklist_through_distro( - self, m_net_get_interfaces): - """Verify Azure DS updates blacklist drivers in the distro's - networking object.""" - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': {}} - - distro_cls = distros.fetch('ubuntu') - distro = distro_cls('ubuntu', {}, self.paths) - dsrc = self._get_ds(data, distro=distro) - dsrc.get_data() - self.assertEqual(distro.networking.blacklist_drivers, - dsaz.BLACKLIST_DRIVERS) - - distro.networking.get_interfaces_by_mac() - m_net_get_interfaces.assert_called_with( - blacklist_drivers=dsaz.BLACKLIST_DRIVERS) - - @mock.patch(MOCKPATH + 'subp.subp', autospec=True) - def test_get_hostname_with_no_args(self, m_subp): - dsaz.get_hostname() - m_subp.assert_called_once_with(("hostname",), capture=True) - - @mock.patch(MOCKPATH + 'subp.subp', autospec=True) - def test_get_hostname_with_string_arg(self, m_subp): - dsaz.get_hostname(hostname_command="hostname") - m_subp.assert_called_once_with(("hostname",), capture=True) - - @mock.patch(MOCKPATH + 'subp.subp', autospec=True) - def test_get_hostname_with_iterable_arg(self, m_subp): - dsaz.get_hostname(hostname_command=("hostname",)) - m_subp.assert_called_once_with(("hostname",), capture=True) - - @mock.patch( - 'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates') - def test_get_public_ssh_keys_with_imds(self, m_parse_certificates): - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg - } - dsrc = self._get_ds(data) - dsrc.get_data() - dsrc.setup(True) - ssh_keys = dsrc.get_public_ssh_keys() - self.assertEqual(ssh_keys, ["ssh-rsa key1"]) - self.assertEqual(m_parse_certificates.call_count, 0) - - def test_key_without_crlf_valid(self): - test_key = 'ssh-rsa somerandomkeystuff some comment' - assert True is dsaz._key_is_openssh_formatted(test_key) - - def test_key_with_crlf_invalid(self): - test_key = 'ssh-rsa someran\r\ndomkeystuff some comment' - assert False is dsaz._key_is_openssh_formatted(test_key) - - def test_key_endswith_crlf_valid(self): - test_key = 'ssh-rsa somerandomkeystuff some comment\r\n' - assert True is dsaz._key_is_openssh_formatted(test_key) - - @mock.patch( - 'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates') - @mock.patch(MOCKPATH + 'get_metadata_from_imds') - def test_get_public_ssh_keys_with_no_openssh_format( - self, - m_get_metadata_from_imds, - m_parse_certificates): - imds_data = copy.deepcopy(NETWORK_METADATA) - imds_data['compute']['publicKeys'][0]['keyData'] = 'no-openssh-format' - m_get_metadata_from_imds.return_value = imds_data - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg - } - dsrc = self._get_ds(data) - dsrc.get_data() - dsrc.setup(True) - ssh_keys = dsrc.get_public_ssh_keys() - self.assertEqual(ssh_keys, []) - self.assertEqual(m_parse_certificates.call_count, 0) - - @mock.patch(MOCKPATH + 'get_metadata_from_imds') - def test_get_public_ssh_keys_without_imds( - self, - m_get_metadata_from_imds): - m_get_metadata_from_imds.return_value = dict() - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg - } - dsrc = self._get_ds(data) - dsaz.get_metadata_from_fabric.return_value = {'public-keys': ['key2']} - dsrc.get_data() - dsrc.setup(True) - ssh_keys = dsrc.get_public_ssh_keys() - self.assertEqual(ssh_keys, ['key2']) - - @mock.patch(MOCKPATH + 'get_metadata_from_imds') - def test_imds_api_version_wanted_nonexistent( - self, - m_get_metadata_from_imds): - def get_metadata_from_imds_side_eff(*args, **kwargs): - if kwargs['api_version'] == dsaz.IMDS_VER_WANT: - raise url_helper.UrlError("No IMDS version", code=400) - return NETWORK_METADATA - m_get_metadata_from_imds.side_effect = get_metadata_from_imds_side_eff - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg - } - dsrc = self._get_ds(data) - dsrc.get_data() - self.assertIsNotNone(dsrc.metadata) - self.assertTrue(dsrc.failed_desired_api_version) - - @mock.patch( - MOCKPATH + 'get_metadata_from_imds', return_value=NETWORK_METADATA) - def test_imds_api_version_wanted_exists(self, m_get_metadata_from_imds): - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg - } - dsrc = self._get_ds(data) - dsrc.get_data() - self.assertIsNotNone(dsrc.metadata) - self.assertFalse(dsrc.failed_desired_api_version) - - @mock.patch(MOCKPATH + 'get_metadata_from_imds') - def test_hostname_from_imds(self, m_get_metadata_from_imds): - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg - } - imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA) - imds_data_with_os_profile["compute"]["osProfile"] = dict( - adminUsername="username1", - computerName="hostname1", - disablePasswordAuthentication="true" - ) - m_get_metadata_from_imds.return_value = imds_data_with_os_profile - dsrc = self._get_ds(data) - dsrc.get_data() - self.assertEqual(dsrc.metadata["local-hostname"], "hostname1") - - @mock.patch(MOCKPATH + 'get_metadata_from_imds') - def test_username_from_imds(self, m_get_metadata_from_imds): - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg - } - imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA) - imds_data_with_os_profile["compute"]["osProfile"] = dict( - adminUsername="username1", - computerName="hostname1", - disablePasswordAuthentication="true" - ) - m_get_metadata_from_imds.return_value = imds_data_with_os_profile - dsrc = self._get_ds(data) - dsrc.get_data() - self.assertEqual( - dsrc.cfg["system_info"]["default_user"]["name"], - "username1" - ) - - @mock.patch(MOCKPATH + 'get_metadata_from_imds') - def test_disable_password_from_imds(self, m_get_metadata_from_imds): - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg - } - imds_data_with_os_profile = copy.deepcopy(NETWORK_METADATA) - imds_data_with_os_profile["compute"]["osProfile"] = dict( - adminUsername="username1", - computerName="hostname1", - disablePasswordAuthentication="true" - ) - m_get_metadata_from_imds.return_value = imds_data_with_os_profile - dsrc = self._get_ds(data) - dsrc.get_data() - self.assertTrue(dsrc.metadata["disable_password"]) - - @mock.patch(MOCKPATH + 'get_metadata_from_imds') - def test_userdata_from_imds(self, m_get_metadata_from_imds): - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - odata = {'HostName': "myhost", 'UserName': "myuser"} - data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg - } - userdata = "userdataImds" - imds_data = copy.deepcopy(NETWORK_METADATA) - imds_data["compute"]["osProfile"] = dict( - adminUsername="username1", - computerName="hostname1", - disablePasswordAuthentication="true", - ) - imds_data["compute"]["userData"] = b64e(userdata) - m_get_metadata_from_imds.return_value = imds_data - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(dsrc.userdata_raw, userdata.encode('utf-8')) - - @mock.patch(MOCKPATH + 'get_metadata_from_imds') - def test_userdata_from_imds_with_customdata_from_OVF( - self, m_get_metadata_from_imds): - userdataOVF = "userdataOVF" - odata = { - 'HostName': "myhost", 'UserName': "myuser", - 'UserData': {'text': b64e(userdataOVF), 'encoding': 'base64'} - } - sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} - data = { - 'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': sys_cfg - } - - userdataImds = "userdataImds" - imds_data = copy.deepcopy(NETWORK_METADATA) - imds_data["compute"]["osProfile"] = dict( - adminUsername="username1", - computerName="hostname1", - disablePasswordAuthentication="true", - ) - imds_data["compute"]["userData"] = b64e(userdataImds) - m_get_metadata_from_imds.return_value = imds_data - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(dsrc.userdata_raw, userdataOVF.encode('utf-8')) - - -class TestAzureBounce(CiTestCase): - - with_logs = True - - def mock_out_azure_moving_parts(self): - - def _load_possible_azure_ds(seed_dir, cache_dir): - yield seed_dir - yield dsaz.DEFAULT_PROVISIONING_ISO_DEV - if cache_dir: - yield cache_dir - - self.patches.enter_context( - mock.patch.object(dsaz.util, 'wait_for_files')) - self.patches.enter_context( - mock.patch.object( - dsaz, 'list_possible_azure_ds', - mock.MagicMock(side_effect=_load_possible_azure_ds))) - self.patches.enter_context( - mock.patch.object(dsaz, 'get_metadata_from_fabric', - mock.MagicMock(return_value={}))) - self.patches.enter_context( - mock.patch.object(dsaz, 'get_metadata_from_imds', - mock.MagicMock(return_value={}))) - self.patches.enter_context( - mock.patch.object(dsaz.subp, 'which', lambda x: True)) - self.patches.enter_context(mock.patch.object( - dsaz, '_get_random_seed', return_value='wild')) - - def _dmi_mocks(key): - if key == 'system-uuid': - return 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8' - elif key == 'chassis-asset-tag': - return '7783-7084-3265-9085-8269-3286-77' - raise RuntimeError('should not get here') - - self.patches.enter_context( - mock.patch.object(dsaz.dmi, 'read_dmi_data', - mock.MagicMock(side_effect=_dmi_mocks))) - - def setUp(self): - super(TestAzureBounce, self).setUp() - self.tmp = self.tmp_dir() - self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent') - self.paths = helpers.Paths( - {'cloud_dir': self.tmp, 'run_dir': self.tmp}) - dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d - self.patches = ExitStack() - self.mock_out_azure_moving_parts() - self.get_hostname = self.patches.enter_context( - mock.patch.object(dsaz, 'get_hostname')) - self.set_hostname = self.patches.enter_context( - mock.patch.object(dsaz, 'set_hostname')) - self.subp = self.patches.enter_context( - mock.patch(MOCKPATH + 'subp.subp')) - self.find_fallback_nic = self.patches.enter_context( - mock.patch('cloudinit.net.find_fallback_nic', return_value='eth9')) - - def tearDown(self): - self.patches.close() - super(TestAzureBounce, self).tearDown() - - def _get_ds(self, ovfcontent=None): - if ovfcontent is not None: - populate_dir(os.path.join(self.paths.seed_dir, "azure"), - {'ovf-env.xml': ovfcontent}) - dsrc = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - return dsrc - - def _get_and_setup(self, dsrc): - ret = dsrc.get_data() - if ret: - dsrc.setup(True) - return ret - - def get_ovf_env_with_dscfg(self, hostname, cfg): - odata = { - 'HostName': hostname, - 'dscfg': { - 'text': b64e(yaml.dump(cfg)), - 'encoding': 'base64' - } - } - return construct_valid_ovf_env(data=odata) - - def test_disabled_bounce_does_not_change_hostname(self): - cfg = {'hostname_bounce': {'policy': 'off'}} - ds = self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)) - ds.get_data() - self.assertEqual(0, self.set_hostname.call_count) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_disabled_bounce_does_not_perform_bounce( - self, perform_hostname_bounce): - cfg = {'hostname_bounce': {'policy': 'off'}} - ds = self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)) - ds.get_data() - self.assertEqual(0, perform_hostname_bounce.call_count) - - def test_same_hostname_does_not_change_hostname(self): - host_name = 'unchanged-host-name' - self.get_hostname.return_value = host_name - cfg = {'hostname_bounce': {'policy': 'yes'}} - ds = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) - ds.get_data() - self.assertEqual(0, self.set_hostname.call_count) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_unchanged_hostname_does_not_perform_bounce( - self, perform_hostname_bounce): - host_name = 'unchanged-host-name' - self.get_hostname.return_value = host_name - cfg = {'hostname_bounce': {'policy': 'yes'}} - ds = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) - ds.get_data() - self.assertEqual(0, perform_hostname_bounce.call_count) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_force_performs_bounce_regardless(self, perform_hostname_bounce): - host_name = 'unchanged-host-name' - self.get_hostname.return_value = host_name - cfg = {'hostname_bounce': {'policy': 'force'}} - dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(1, perform_hostname_bounce.call_count) - - def test_bounce_skipped_on_ifupdown_absent(self): - host_name = 'unchanged-host-name' - self.get_hostname.return_value = host_name - cfg = {'hostname_bounce': {'policy': 'force'}} - dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) - patch_path = MOCKPATH + 'subp.which' - with mock.patch(patch_path) as m_which: - m_which.return_value = None - ret = self._get_and_setup(dsrc) - self.assertEqual([mock.call('ifup')], m_which.call_args_list) - self.assertTrue(ret) - self.assertIn( - "Skipping network bounce: ifupdown utils aren't present.", - self.logs.getvalue()) - - def test_different_hostnames_sets_hostname(self): - expected_hostname = 'azure-expected-host-name' - self.get_hostname.return_value = 'default-host-name' - dsrc = self._get_ds( - self.get_ovf_env_with_dscfg(expected_hostname, {})) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(expected_hostname, - self.set_hostname.call_args_list[0][0][0]) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_different_hostnames_performs_bounce( - self, perform_hostname_bounce): - expected_hostname = 'azure-expected-host-name' - self.get_hostname.return_value = 'default-host-name' - dsrc = self._get_ds( - self.get_ovf_env_with_dscfg(expected_hostname, {})) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(1, perform_hostname_bounce.call_count) - - def test_different_hostnames_sets_hostname_back(self): - initial_host_name = 'default-host-name' - self.get_hostname.return_value = initial_host_name - dsrc = self._get_ds( - self.get_ovf_env_with_dscfg('some-host-name', {})) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(initial_host_name, - self.set_hostname.call_args_list[-1][0][0]) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_failure_in_bounce_still_resets_host_name( - self, perform_hostname_bounce): - perform_hostname_bounce.side_effect = Exception - initial_host_name = 'default-host-name' - self.get_hostname.return_value = initial_host_name - dsrc = self._get_ds( - self.get_ovf_env_with_dscfg('some-host-name', {})) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(initial_host_name, - self.set_hostname.call_args_list[-1][0][0]) - - @mock.patch.object(dsaz, 'get_boot_telemetry') - def test_environment_correct_for_bounce_command( - self, mock_get_boot_telemetry): - interface = 'int0' - hostname = 'my-new-host' - old_hostname = 'my-old-host' - self.get_hostname.return_value = old_hostname - cfg = {'hostname_bounce': {'interface': interface, 'policy': 'force'}} - data = self.get_ovf_env_with_dscfg(hostname, cfg) - dsrc = self._get_ds(data) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(1, self.subp.call_count) - bounce_env = self.subp.call_args[1]['env'] - self.assertEqual(interface, bounce_env['interface']) - self.assertEqual(hostname, bounce_env['hostname']) - self.assertEqual(old_hostname, bounce_env['old_hostname']) - - @mock.patch.object(dsaz, 'get_boot_telemetry') - def test_default_bounce_command_ifup_used_by_default( - self, mock_get_boot_telemetry): - cfg = {'hostname_bounce': {'policy': 'force'}} - data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - dsrc = self._get_ds(data) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(1, self.subp.call_count) - bounce_args = self.subp.call_args[1]['args'] - self.assertEqual( - dsaz.BOUNCE_COMMAND_IFUP, bounce_args) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_set_hostname_option_can_disable_bounce( - self, perform_hostname_bounce): - cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}} - data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - self._get_ds(data).get_data() - - self.assertEqual(0, perform_hostname_bounce.call_count) - - def test_set_hostname_option_can_disable_hostname_set(self): - cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}} - data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - self._get_ds(data).get_data() - - self.assertEqual(0, self.set_hostname.call_count) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_set_hostname_failed_disable_bounce( - self, perform_hostname_bounce): - cfg = {'set_hostname': True, 'hostname_bounce': {'policy': 'force'}} - self.get_hostname.return_value = "old-hostname" - self.set_hostname.side_effect = Exception - data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - self._get_ds(data).get_data() - - self.assertEqual(0, perform_hostname_bounce.call_count) - - -class TestLoadAzureDsDir(CiTestCase): - """Tests for load_azure_ds_dir.""" - - def setUp(self): - self.source_dir = self.tmp_dir() - super(TestLoadAzureDsDir, self).setUp() - - def test_missing_ovf_env_xml_raises_non_azure_datasource_error(self): - """load_azure_ds_dir raises an error When ovf-env.xml doesn't exit.""" - with self.assertRaises(dsaz.NonAzureDataSource) as context_manager: - dsaz.load_azure_ds_dir(self.source_dir) - self.assertEqual( - 'No ovf-env file found', - str(context_manager.exception)) - - def test_wb_invalid_ovf_env_xml_calls_read_azure_ovf(self): - """load_azure_ds_dir calls read_azure_ovf to parse the xml.""" - ovf_path = os.path.join(self.source_dir, 'ovf-env.xml') - with open(ovf_path, 'wb') as stream: - stream.write(b'invalid xml') - with self.assertRaises(dsaz.BrokenAzureDataSource) as context_manager: - dsaz.load_azure_ds_dir(self.source_dir) - self.assertEqual( - 'Invalid ovf-env.xml: syntax error: line 1, column 0', - str(context_manager.exception)) - - -class TestReadAzureOvf(CiTestCase): - - def test_invalid_xml_raises_non_azure_ds(self): - invalid_xml = "" + construct_valid_ovf_env(data={}) - self.assertRaises(dsaz.BrokenAzureDataSource, - dsaz.read_azure_ovf, invalid_xml) - - def test_load_with_pubkeys(self): - mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}] - pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] - content = construct_valid_ovf_env(pubkeys=pubkeys) - (_md, _ud, cfg) = dsaz.read_azure_ovf(content) - for mypk in mypklist: - self.assertIn(mypk, cfg['_pubkeys']) - - -class TestCanDevBeReformatted(CiTestCase): - warning_file = 'dataloss_warning_readme.txt' - - def _domock(self, mockpath, sattr=None): - patcher = mock.patch(mockpath) - setattr(self, sattr, patcher.start()) - self.addCleanup(patcher.stop) - - def patchup(self, devs): - bypath = {} - for path, data in devs.items(): - bypath[path] = data - if 'realpath' in data: - bypath[data['realpath']] = data - for ppath, pdata in data.get('partitions', {}).items(): - bypath[ppath] = pdata - if 'realpath' in data: - bypath[pdata['realpath']] = pdata - - def realpath(d): - return bypath[d].get('realpath', d) - - def partitions_on_device(devpath): - parts = bypath.get(devpath, {}).get('partitions', {}) - ret = [] - for path, data in parts.items(): - ret.append((data.get('num'), realpath(path))) - # return sorted by partition number - return sorted(ret, key=lambda d: d[0]) - - def mount_cb(device, callback, mtype, update_env_for_mount): - self.assertEqual('ntfs', mtype) - self.assertEqual('C', update_env_for_mount.get('LANG')) - p = self.tmp_dir() - for f in bypath.get(device).get('files', []): - write_file(os.path.join(p, f), content=f) - return callback(p) - - def has_ntfs_fs(device): - return bypath.get(device, {}).get('fs') == 'ntfs' - - p = MOCKPATH - self._domock(p + "_partitions_on_device", 'm_partitions_on_device') - self._domock(p + "_has_ntfs_filesystem", 'm_has_ntfs_filesystem') - self._domock(p + "util.mount_cb", 'm_mount_cb') - self._domock(p + "os.path.realpath", 'm_realpath') - self._domock(p + "os.path.exists", 'm_exists') - self._domock(p + "util.SeLinuxGuard", 'm_selguard') - - self.m_exists.side_effect = lambda p: p in bypath - self.m_realpath.side_effect = realpath - self.m_has_ntfs_filesystem.side_effect = has_ntfs_fs - self.m_mount_cb.side_effect = mount_cb - self.m_partitions_on_device.side_effect = partitions_on_device - self.m_selguard.__enter__ = mock.Mock(return_value=False) - self.m_selguard.__exit__ = mock.Mock() - - def test_three_partitions_is_false(self): - """A disk with 3 partitions can not be formatted.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1}, - '/dev/sda2': {'num': 2}, - '/dev/sda3': {'num': 3}, - }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=False) - self.assertFalse(value) - self.assertIn("3 or more", msg.lower()) - - def test_no_partitions_is_false(self): - """A disk with no partitions can not be formatted.""" - self.patchup({'/dev/sda': {}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=False) - self.assertFalse(value) - self.assertIn("not partitioned", msg.lower()) - - def test_two_partitions_not_ntfs_false(self): - """2 partitions and 2nd not ntfs can not be formatted.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1}, - '/dev/sda2': {'num': 2, 'fs': 'ext4', 'files': []}, - }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=False) - self.assertFalse(value) - self.assertIn("not ntfs", msg.lower()) - - def test_two_partitions_ntfs_populated_false(self): - """2 partitions and populated ntfs fs on 2nd can not be formatted.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1}, - '/dev/sda2': {'num': 2, 'fs': 'ntfs', - 'files': ['secret.txt']}, - }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=False) - self.assertFalse(value) - self.assertIn("files on it", msg.lower()) - - def test_two_partitions_ntfs_empty_is_true(self): - """2 partitions and empty ntfs fs on 2nd can be formatted.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1}, - '/dev/sda2': {'num': 2, 'fs': 'ntfs', 'files': []}, - }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=False) - self.assertTrue(value) - self.assertIn("safe for", msg.lower()) - - def test_one_partition_not_ntfs_false(self): - """1 partition witih fs other than ntfs can not be formatted.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1, 'fs': 'zfs'}, - }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=False) - self.assertFalse(value) - self.assertIn("not ntfs", msg.lower()) - - def test_one_partition_ntfs_populated_false(self): - """1 mountable ntfs partition with many files can not be formatted.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1, 'fs': 'ntfs', - 'files': ['file1.txt', 'file2.exe']}, - }}}) - with mock.patch.object(dsaz.LOG, 'warning') as warning: - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=False) - wmsg = warning.call_args[0][0] - self.assertIn("looks like you're using NTFS on the ephemeral disk", - wmsg) - self.assertFalse(value) - self.assertIn("files on it", msg.lower()) - - def test_one_partition_ntfs_empty_is_true(self): - """1 mountable ntfs partition and no files can be formatted.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []} - }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=False) - self.assertTrue(value) - self.assertIn("safe for", msg.lower()) - - def test_one_partition_ntfs_empty_with_dataloss_file_is_true(self): - """1 mountable ntfs partition and only warn file can be formatted.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1, 'fs': 'ntfs', - 'files': ['dataloss_warning_readme.txt']} - }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=False) - self.assertTrue(value) - self.assertIn("safe for", msg.lower()) - - def test_one_partition_through_realpath_is_true(self): - """A symlink to a device with 1 ntfs partition can be formatted.""" - epath = '/dev/disk/cloud/azure_resource' - self.patchup({ - epath: { - 'realpath': '/dev/sdb', - 'partitions': { - epath + '-part1': { - 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file], - 'realpath': '/dev/sdb1'} - }}}) - value, msg = dsaz.can_dev_be_reformatted(epath, - preserve_ntfs=False) - self.assertTrue(value) - self.assertIn("safe for", msg.lower()) - - def test_three_partition_through_realpath_is_false(self): - """A symlink to a device with 3 partitions can not be formatted.""" - epath = '/dev/disk/cloud/azure_resource' - self.patchup({ - epath: { - 'realpath': '/dev/sdb', - 'partitions': { - epath + '-part1': { - 'num': 1, 'fs': 'ntfs', 'files': [self.warning_file], - 'realpath': '/dev/sdb1'}, - epath + '-part2': {'num': 2, 'fs': 'ext3', - 'realpath': '/dev/sdb2'}, - epath + '-part3': {'num': 3, 'fs': 'ext', - 'realpath': '/dev/sdb3'} - }}}) - value, msg = dsaz.can_dev_be_reformatted(epath, - preserve_ntfs=False) - self.assertFalse(value) - self.assertIn("3 or more", msg.lower()) - - def test_ntfs_mount_errors_true(self): - """can_dev_be_reformatted does not fail if NTFS is unknown fstype.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []} - }}}) - - error_msgs = [ - "Stderr: mount: unknown filesystem type 'ntfs'", # RHEL - "Stderr: mount: /dev/sdb1: unknown filesystem type 'ntfs'" # SLES - ] - - for err_msg in error_msgs: - self.m_mount_cb.side_effect = MountFailedError( - "Failed mounting %s to %s due to: \nUnexpected.\n%s" % - ('/dev/sda', '/fake-tmp/dir', err_msg)) - - value, msg = dsaz.can_dev_be_reformatted('/dev/sda', - preserve_ntfs=False) - self.assertTrue(value) - self.assertIn('cannot mount NTFS, assuming', msg) - - def test_never_destroy_ntfs_config_false(self): - """Normally formattable situation with never_destroy_ntfs set.""" - self.patchup({ - '/dev/sda': { - 'partitions': { - '/dev/sda1': {'num': 1, 'fs': 'ntfs', - 'files': ['dataloss_warning_readme.txt']} - }}}) - value, msg = dsaz.can_dev_be_reformatted("/dev/sda", - preserve_ntfs=True) - self.assertFalse(value) - self.assertIn("config says to never destroy NTFS " - "(datasource.Azure.never_destroy_ntfs)", msg) - - -class TestClearCachedData(CiTestCase): - - def test_clear_cached_attrs_clears_imds(self): - """All class attributes are reset to defaults, including imds data.""" - tmp = self.tmp_dir() - paths = helpers.Paths( - {'cloud_dir': tmp, 'run_dir': tmp}) - dsrc = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=paths) - clean_values = [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds] - dsrc.metadata = 'md' - dsrc.userdata = 'ud' - dsrc._metadata_imds = 'imds' - dsrc._dirty_cache = True - dsrc.clear_cached_attrs() - self.assertEqual( - [dsrc.metadata, dsrc.userdata, dsrc._metadata_imds], - clean_values) - - -class TestAzureNetExists(CiTestCase): - - def test_azure_net_must_exist_for_legacy_objpkl(self): - """DataSourceAzureNet must exist for old obj.pkl files - that reference it.""" - self.assertTrue(hasattr(dsaz, "DataSourceAzureNet")) - - -class TestPreprovisioningReadAzureOvfFlag(CiTestCase): - - def test_read_azure_ovf_with_true_flag(self): - """The read_azure_ovf method should set the PreprovisionedVM - cfg flag if the proper setting is present.""" - content = construct_valid_ovf_env( - platform_settings={"PreprovisionedVm": "True"}) - ret = dsaz.read_azure_ovf(content) - cfg = ret[2] - self.assertTrue(cfg['PreprovisionedVm']) - - def test_read_azure_ovf_with_false_flag(self): - """The read_azure_ovf method should set the PreprovisionedVM - cfg flag to false if the proper setting is false.""" - content = construct_valid_ovf_env( - platform_settings={"PreprovisionedVm": "False"}) - ret = dsaz.read_azure_ovf(content) - cfg = ret[2] - self.assertFalse(cfg['PreprovisionedVm']) - - def test_read_azure_ovf_without_flag(self): - """The read_azure_ovf method should not set the - PreprovisionedVM cfg flag.""" - content = construct_valid_ovf_env() - ret = dsaz.read_azure_ovf(content) - cfg = ret[2] - self.assertFalse(cfg['PreprovisionedVm']) - self.assertEqual(None, cfg["PreprovisionedVMType"]) - - def test_read_azure_ovf_with_running_type(self): - """The read_azure_ovf method should set PreprovisionedVMType - cfg flag to Running.""" - content = construct_valid_ovf_env( - platform_settings={"PreprovisionedVMType": "Running", - "PreprovisionedVm": "True"}) - ret = dsaz.read_azure_ovf(content) - cfg = ret[2] - self.assertTrue(cfg['PreprovisionedVm']) - self.assertEqual("Running", cfg['PreprovisionedVMType']) - - def test_read_azure_ovf_with_savable_type(self): - """The read_azure_ovf method should set PreprovisionedVMType - cfg flag to Savable.""" - content = construct_valid_ovf_env( - platform_settings={"PreprovisionedVMType": "Savable", - "PreprovisionedVm": "True"}) - ret = dsaz.read_azure_ovf(content) - cfg = ret[2] - self.assertTrue(cfg['PreprovisionedVm']) - self.assertEqual("Savable", cfg['PreprovisionedVMType']) - - -@mock.patch('os.path.isfile') -class TestPreprovisioningShouldReprovision(CiTestCase): - - def setUp(self): - super(TestPreprovisioningShouldReprovision, self).setUp() - tmp = self.tmp_dir() - self.waagent_d = self.tmp_path('/var/lib/waagent', tmp) - self.paths = helpers.Paths({'cloud_dir': tmp}) - dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d - - @mock.patch(MOCKPATH + 'util.write_file') - def test__should_reprovision_with_true_cfg(self, isfile, write_f): - """The _should_reprovision method should return true with config - flag present.""" - isfile.return_value = False - dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - self.assertTrue(dsa._should_reprovision( - (None, None, {'PreprovisionedVm': True}, None))) - - def test__should_reprovision_with_file_existing(self, isfile): - """The _should_reprovision method should return True if the sentinal - exists.""" - isfile.return_value = True - dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - self.assertTrue(dsa._should_reprovision( - (None, None, {'preprovisionedvm': False}, None))) - - def test__should_reprovision_returns_false(self, isfile): - """The _should_reprovision method should return False - if config and sentinal are not present.""" - isfile.return_value = False - dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - self.assertFalse(dsa._should_reprovision((None, None, {}, None))) - - @mock.patch(MOCKPATH + 'util.write_file', autospec=True) - def test__should_reprovision_uses_imds_md(self, write_file, isfile): - """The _should_reprovision method should be able to - retrieve the preprovisioning VM type from imds metadata""" - isfile.return_value = False - dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - self.assertTrue(dsa._should_reprovision( - (None, None, {}, None), - {'extended': {'compute': {'ppsType': 'Running'}}})) - self.assertFalse(dsa._should_reprovision( - (None, None, {}, None), - {})) - self.assertFalse(dsa._should_reprovision( - (None, None, {}, None), - {'extended': {'compute': {"hasCustomData": False}}})) - - @mock.patch(MOCKPATH + 'DataSourceAzure._poll_imds') - def test_reprovision_calls__poll_imds(self, _poll_imds, isfile): - """_reprovision will poll IMDS.""" - isfile.return_value = False - hostname = "myhost" - username = "myuser" - odata = {'HostName': hostname, 'UserName': username} - _poll_imds.return_value = construct_valid_ovf_env(data=odata) - dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - dsa._reprovision() - _poll_imds.assert_called_with() - - -class TestPreprovisioningHotAttachNics(CiTestCase): - - def setUp(self): - super(TestPreprovisioningHotAttachNics, self).setUp() - self.tmp = self.tmp_dir() - self.waagent_d = self.tmp_path('/var/lib/waagent', self.tmp) - self.paths = helpers.Paths({'cloud_dir': self.tmp}) - dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d - self.paths = helpers.Paths({'cloud_dir': self.tmp}) - - @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_detach_event', - autospec=True) - @mock.patch(MOCKPATH + 'util.write_file', autospec=True) - def test_nic_detach_writes_marker(self, m_writefile, m_detach): - """When we detect that a nic gets detached, we write a marker for it""" - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - nl_sock = mock.MagicMock() - dsa._wait_for_nic_detach(nl_sock) - m_detach.assert_called_with(nl_sock) - self.assertEqual(1, m_detach.call_count) - m_writefile.assert_called_with( - dsaz.REPROVISION_NIC_DETACHED_MARKER_FILE, mock.ANY) - - @mock.patch(MOCKPATH + 'util.write_file', autospec=True) - @mock.patch(MOCKPATH + 'DataSourceAzure.fallback_interface') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting') - @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready') - @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') - def test_detect_nic_attach_reports_ready_and_waits_for_detach( - self, m_detach, m_report_ready, m_dhcp, m_fallback_if, - m_writefile): - """Report ready first and then wait for nic detach""" - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - dsa._wait_for_all_nics_ready() - m_fallback_if.return_value = "Dummy interface" - self.assertEqual(1, m_report_ready.call_count) - self.assertEqual(1, m_detach.call_count) - self.assertEqual(1, m_writefile.call_count) - self.assertEqual(1, m_dhcp.call_count) - m_writefile.assert_called_with(dsaz.REPORTED_READY_MARKER_FILE, - mock.ANY) - - @mock.patch('os.path.isfile') - @mock.patch(MOCKPATH + 'DataSourceAzure.fallback_interface') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting') - @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready') - @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') - def test_detect_nic_attach_skips_report_ready_when_marker_present( - self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile): - """Skip reporting ready if we already have a marker file.""" - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - - def isfile(key): - return key == dsaz.REPORTED_READY_MARKER_FILE - - m_isfile.side_effect = isfile - dsa._wait_for_all_nics_ready() - m_fallback_if.return_value = "Dummy interface" - self.assertEqual(0, m_report_ready.call_count) - self.assertEqual(0, m_dhcp.call_count) - self.assertEqual(1, m_detach.call_count) - - @mock.patch('os.path.isfile') - @mock.patch(MOCKPATH + 'DataSourceAzure.fallback_interface') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4WithReporting') - @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready') - @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') - def test_detect_nic_attach_skips_nic_detach_when_marker_present( - self, m_detach, m_report_ready, m_dhcp, m_fallback_if, m_isfile): - """Skip wait for nic detach if it already happened.""" - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - - m_isfile.return_value = True - dsa._wait_for_all_nics_ready() - m_fallback_if.return_value = "Dummy interface" - self.assertEqual(0, m_report_ready.call_count) - self.assertEqual(0, m_dhcp.call_count) - self.assertEqual(0, m_detach.call_count) - - @mock.patch(MOCKPATH + 'DataSourceAzure.wait_for_link_up', autospec=True) - @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_attach_event') - @mock.patch('cloudinit.sources.net.find_fallback_nic') - @mock.patch(MOCKPATH + 'get_metadata_from_imds') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') - @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') - @mock.patch('os.path.isfile') - def test_wait_for_nic_attach_if_no_fallback_interface( - self, m_isfile, m_detach, m_dhcpv4, m_imds, m_fallback_if, - m_attach, m_link_up): - """Wait for nic attach if we do not have a fallback interface""" - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - lease = { - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'unknown-245': '624c3620'} - - m_isfile.return_value = True - m_attach.return_value = "eth0" - dhcp_ctx = mock.MagicMock(lease=lease) - dhcp_ctx.obtain_lease.return_value = lease - m_dhcpv4.return_value = dhcp_ctx - m_imds.return_value = IMDS_NETWORK_METADATA - m_fallback_if.return_value = None - - dsa._wait_for_all_nics_ready() - - self.assertEqual(0, m_detach.call_count) - self.assertEqual(1, m_attach.call_count) - self.assertEqual(1, m_dhcpv4.call_count) - self.assertEqual(1, m_imds.call_count) - self.assertEqual(1, m_link_up.call_count) - m_link_up.assert_called_with(mock.ANY, "eth0") - - @mock.patch(MOCKPATH + 'DataSourceAzure.wait_for_link_up') - @mock.patch('cloudinit.sources.helpers.netlink.wait_for_nic_attach_event') - @mock.patch('cloudinit.sources.net.find_fallback_nic') - @mock.patch(MOCKPATH + 'DataSourceAzure.get_imds_data_with_api_fallback') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') - @mock.patch(MOCKPATH + 'DataSourceAzure._wait_for_nic_detach') - @mock.patch('os.path.isfile') - def test_wait_for_nic_attach_multinic_attach( - self, m_isfile, m_detach, m_dhcpv4, m_imds, m_fallback_if, - m_attach, m_link_up): - """Wait for nic attach if we do not have a fallback interface""" - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - lease = { - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'unknown-245': '624c3620'} - m_attach_call_count = 0 - - def nic_attach_ret(nl_sock, nics_found): - nonlocal m_attach_call_count - m_attach_call_count = m_attach_call_count + 1 - if m_attach_call_count == 1: - return "eth0" - elif m_attach_call_count == 2: - return "eth1" - raise RuntimeError("Must have found primary nic by now.") - - # Simulate two NICs by adding the same one twice. - md = { - "interface": [ - IMDS_NETWORK_METADATA['interface'][0], - IMDS_NETWORK_METADATA['interface'][0] - ] - } - - def network_metadata_ret(ifname, retries, type, exc_cb, infinite): - if ifname == "eth0": - return md - raise requests.Timeout('Fake connection timeout') - - m_isfile.return_value = True - m_attach.side_effect = nic_attach_ret - dhcp_ctx = mock.MagicMock(lease=lease) - dhcp_ctx.obtain_lease.return_value = lease - m_dhcpv4.return_value = dhcp_ctx - m_imds.side_effect = network_metadata_ret - m_fallback_if.return_value = None - - dsa._wait_for_all_nics_ready() - - self.assertEqual(0, m_detach.call_count) - self.assertEqual(2, m_attach.call_count) - # DHCP and network metadata calls will only happen on the primary NIC. - self.assertEqual(1, m_dhcpv4.call_count) - self.assertEqual(1, m_imds.call_count) - self.assertEqual(2, m_link_up.call_count) - - @mock.patch(MOCKPATH + 'DataSourceAzure.get_imds_data_with_api_fallback') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') - def test_check_if_nic_is_primary_retries_on_failures( - self, m_dhcpv4, m_imds): - """Retry polling for network metadata on all failures except timeout - and network unreachable errors""" - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - lease = { - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'unknown-245': '624c3620'} - - eth0Retries = [] - eth1Retries = [] - # Simulate two NICs by adding the same one twice. - md = { - "interface": [ - IMDS_NETWORK_METADATA['interface'][0], - IMDS_NETWORK_METADATA['interface'][0] - ] - } - - def network_metadata_ret(ifname, retries, type, exc_cb, infinite): - nonlocal eth0Retries, eth1Retries - - # Simulate readurl functionality with retries and - # exception callbacks so that the callback logic can be - # validated. - if ifname == "eth0": - cause = requests.HTTPError() - for _ in range(0, 15): - error = url_helper.UrlError(cause=cause, code=410) - eth0Retries.append(exc_cb("No goal state.", error)) - else: - for _ in range(0, 10): - # We are expected to retry for a certain period for both - # timeout errors and network unreachable errors. - if _ < 5: - cause = requests.Timeout('Fake connection timeout') - else: - cause = requests.ConnectionError('Network Unreachable') - error = url_helper.UrlError(cause=cause) - eth1Retries.append(exc_cb("Connection timeout", error)) - # Should stop retrying after 10 retries - eth1Retries.append(exc_cb("Connection timeout", error)) - raise cause - return md - - m_imds.side_effect = network_metadata_ret - - dhcp_ctx = mock.MagicMock(lease=lease) - dhcp_ctx.obtain_lease.return_value = lease - m_dhcpv4.return_value = dhcp_ctx - - is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth0") - self.assertEqual(True, is_primary) - self.assertEqual(2, expected_nic_count) - - # All Eth0 errors are non-timeout errors. So we should have been - # retrying indefinitely until success. - for i in eth0Retries: - self.assertTrue(i) - - is_primary, expected_nic_count = dsa._check_if_nic_is_primary("eth1") - self.assertEqual(False, is_primary) - - # All Eth1 errors are timeout errors. Retry happens for a max of 10 and - # then we should have moved on assuming it is not the primary nic. - for i in range(0, 10): - self.assertTrue(eth1Retries[i]) - self.assertFalse(eth1Retries[10]) - - @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up') - def test_wait_for_link_up_returns_if_already_up( - self, m_is_link_up): - """Waiting for link to be up should return immediately if the link is - already up.""" - - distro_cls = distros.fetch('ubuntu') - distro = distro_cls('ubuntu', {}, self.paths) - dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths) - m_is_link_up.return_value = True - - dsa.wait_for_link_up("eth0") - self.assertEqual(1, m_is_link_up.call_count) - - @mock.patch(MOCKPATH + 'net.is_up', autospec=True) - @mock.patch(MOCKPATH + 'util.write_file') - @mock.patch('cloudinit.net.read_sys_net') - @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up') - def test_wait_for_link_up_checks_link_after_sleep( - self, m_try_set_link_up, m_read_sys_net, m_writefile, m_is_up): - """Waiting for link to be up should return immediately if the link is - already up.""" - - distro_cls = distros.fetch('ubuntu') - distro = distro_cls('ubuntu', {}, self.paths) - dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths) - m_try_set_link_up.return_value = False - - callcount = 0 - - def is_up_mock(key): - nonlocal callcount - if callcount == 0: - callcount += 1 - return False - return True - - m_is_up.side_effect = is_up_mock - - dsa.wait_for_link_up("eth0") - self.assertEqual(2, m_try_set_link_up.call_count) - self.assertEqual(2, m_is_up.call_count) - - @mock.patch(MOCKPATH + 'util.write_file') - @mock.patch('cloudinit.net.read_sys_net') - @mock.patch('cloudinit.distros.networking.LinuxNetworking.try_set_link_up') - def test_wait_for_link_up_writes_to_device_file( - self, m_is_link_up, m_read_sys_net, m_writefile): - """Waiting for link to be up should return immediately if the link is - already up.""" - - distro_cls = distros.fetch('ubuntu') - distro = distro_cls('ubuntu', {}, self.paths) - dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths) - - callcount = 0 - - def linkup(key): - nonlocal callcount - if callcount == 0: - callcount += 1 - return False - return True - - m_is_link_up.side_effect = linkup - - dsa.wait_for_link_up("eth0") - self.assertEqual(2, m_is_link_up.call_count) - self.assertEqual(1, m_read_sys_net.call_count) - self.assertEqual(2, m_writefile.call_count) - - @mock.patch('cloudinit.sources.helpers.netlink.' - 'create_bound_netlink_socket') - def test_wait_for_all_nics_ready_raises_if_socket_fails(self, m_socket): - """Waiting for all nics should raise exception if netlink socket - creation fails.""" - - m_socket.side_effect = netlink.NetlinkCreateSocketError - distro_cls = distros.fetch('ubuntu') - distro = distro_cls('ubuntu', {}, self.paths) - dsa = dsaz.DataSourceAzure({}, distro=distro, paths=self.paths) - - self.assertRaises(netlink.NetlinkCreateSocketError, - dsa._wait_for_all_nics_ready) - # dsa._wait_for_all_nics_ready() - - -@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') -@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') -@mock.patch('cloudinit.sources.helpers.netlink.' - 'wait_for_media_disconnect_connect') -@mock.patch('requests.Session.request') -@mock.patch(MOCKPATH + 'DataSourceAzure._report_ready') -class TestPreprovisioningPollIMDS(CiTestCase): - - def setUp(self): - super(TestPreprovisioningPollIMDS, self).setUp() - self.tmp = self.tmp_dir() - self.waagent_d = self.tmp_path('/var/lib/waagent', self.tmp) - self.paths = helpers.Paths({'cloud_dir': self.tmp}) - dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d - - @mock.patch('time.sleep', mock.MagicMock()) - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') - def test_poll_imds_re_dhcp_on_timeout(self, m_dhcpv4, m_report_ready, - m_request, m_media_switch, m_dhcp, - m_net): - """The poll_imds will retry DHCP on IMDS timeout.""" - report_file = self.tmp_path('report_marker', self.tmp) - lease = { - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'unknown-245': '624c3620'} - m_dhcp.return_value = [lease] - m_media_switch.return_value = None - dhcp_ctx = mock.MagicMock(lease=lease) - dhcp_ctx.obtain_lease.return_value = lease - m_dhcpv4.return_value = dhcp_ctx - - self.tries = 0 - - def fake_timeout_once(**kwargs): - self.tries += 1 - if self.tries == 1: - raise requests.Timeout('Fake connection timeout') - elif self.tries in (2, 3): - response = requests.Response() - response.status_code = 404 if self.tries == 2 else 410 - raise requests.exceptions.HTTPError( - "fake {}".format(response.status_code), response=response - ) - # Third try should succeed and stop retries or redhcp - return mock.MagicMock(status_code=200, text="good", content="good") - - m_request.side_effect = fake_timeout_once - - dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): - dsa._poll_imds() - self.assertEqual(m_report_ready.call_count, 1) - m_report_ready.assert_called_with(lease=lease) - self.assertEqual(3, m_dhcpv4.call_count, 'Expected 3 DHCP calls') - self.assertEqual(4, self.tries, 'Expected 4 total reads from IMDS') - - @mock.patch('os.path.isfile') - def test_poll_imds_skips_dhcp_if_ctx_present( - self, m_isfile, report_ready_func, fake_resp, m_media_switch, - m_dhcp, m_net): - """The poll_imds function should reuse the dhcp ctx if it is already - present. This happens when we wait for nic to be hot-attached before - polling for reprovisiondata. Note that if this ctx is set when - _poll_imds is called, then it is not expected to be waiting for - media_disconnect_connect either.""" - report_file = self.tmp_path('report_marker', self.tmp) - m_isfile.return_value = True - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - dsa._ephemeral_dhcp_ctx = "Dummy dhcp ctx" - with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): - dsa._poll_imds() - self.assertEqual(0, m_dhcp.call_count) - self.assertEqual(0, m_media_switch.call_count) - - @mock.patch('os.path.isfile') - @mock.patch(MOCKPATH + 'EphemeralDHCPv4') - def test_poll_imds_does_dhcp_on_retries_if_ctx_present( - self, m_ephemeral_dhcpv4, m_isfile, report_ready_func, m_request, - m_media_switch, m_dhcp, m_net): - """The poll_imds function should reuse the dhcp ctx if it is already - present. This happens when we wait for nic to be hot-attached before - polling for reprovisiondata. Note that if this ctx is set when - _poll_imds is called, then it is not expected to be waiting for - media_disconnect_connect either.""" - - tries = 0 - - def fake_timeout_once(**kwargs): - nonlocal tries - tries += 1 - if tries == 1: - raise requests.Timeout('Fake connection timeout') - return mock.MagicMock(status_code=200, text="good", content="good") - - m_request.side_effect = fake_timeout_once - report_file = self.tmp_path('report_marker', self.tmp) - m_isfile.return_value = True - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file),\ - mock.patch.object(dsa, '_ephemeral_dhcp_ctx') as m_dhcp_ctx: - m_dhcp_ctx.obtain_lease.return_value = "Dummy lease" - dsa._ephemeral_dhcp_ctx = m_dhcp_ctx - dsa._poll_imds() - self.assertEqual(1, m_dhcp_ctx.clean_network.call_count) - self.assertEqual(1, m_ephemeral_dhcpv4.call_count) - self.assertEqual(0, m_media_switch.call_count) - self.assertEqual(2, m_request.call_count) - - def test_does_not_poll_imds_report_ready_when_marker_file_exists( - self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net): - """poll_imds should not call report ready when the reported ready - marker file exists""" - report_file = self.tmp_path('report_marker', self.tmp) - write_file(report_file, content='dont run report_ready :)') - m_dhcp.return_value = [{ - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'unknown-245': '624c3620'}] - m_media_switch.return_value = None - dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): - dsa._poll_imds() - self.assertEqual(m_report_ready.call_count, 0) - - def test_poll_imds_report_ready_success_writes_marker_file( - self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net): - """poll_imds should write the report_ready marker file if - reporting ready succeeds""" - report_file = self.tmp_path('report_marker', self.tmp) - m_dhcp.return_value = [{ - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'unknown-245': '624c3620'}] - m_media_switch.return_value = None - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - self.assertFalse(os.path.exists(report_file)) - with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): - dsa._poll_imds() - self.assertEqual(m_report_ready.call_count, 1) - self.assertTrue(os.path.exists(report_file)) - - def test_poll_imds_report_ready_failure_raises_exc_and_doesnt_write_marker( - self, m_report_ready, m_request, m_media_switch, m_dhcp, m_net): - """poll_imds should write the report_ready marker file if - reporting ready succeeds""" - report_file = self.tmp_path('report_marker', self.tmp) - m_dhcp.return_value = [{ - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'unknown-245': '624c3620'}] - m_media_switch.return_value = None - m_report_ready.return_value = False - dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - self.assertFalse(os.path.exists(report_file)) - with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): - self.assertRaises( - InvalidMetaDataException, - dsa._poll_imds) - self.assertEqual(m_report_ready.call_count, 1) - self.assertFalse(os.path.exists(report_file)) - - -@mock.patch(MOCKPATH + 'DataSourceAzure._report_ready', mock.MagicMock()) -@mock.patch(MOCKPATH + 'subp.subp', mock.MagicMock()) -@mock.patch(MOCKPATH + 'util.write_file', mock.MagicMock()) -@mock.patch('cloudinit.sources.helpers.netlink.' - 'wait_for_media_disconnect_connect') -@mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network', autospec=True) -@mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') -@mock.patch('requests.Session.request') -class TestAzureDataSourcePreprovisioning(CiTestCase): - - def setUp(self): - super(TestAzureDataSourcePreprovisioning, self).setUp() - tmp = self.tmp_dir() - self.waagent_d = self.tmp_path('/var/lib/waagent', tmp) - self.paths = helpers.Paths({'cloud_dir': tmp}) - dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d - - def test_poll_imds_returns_ovf_env(self, m_request, - m_dhcp, m_net, - m_media_switch): - """The _poll_imds method should return the ovf_env.xml.""" - m_media_switch.return_value = None - m_dhcp.return_value = [{ - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0'}] - url = 'http://{0}/metadata/reprovisiondata?api-version=2019-06-01' - host = "169.254.169.254" - full_url = url.format(host) - m_request.return_value = mock.MagicMock(status_code=200, text="ovf", - content="ovf") - dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - self.assertTrue(len(dsa._poll_imds()) > 0) - self.assertEqual(m_request.call_args_list, - [mock.call(allow_redirects=True, - headers={'Metadata': 'true', - 'User-Agent': - 'Cloud-Init/%s' % vs() - }, method='GET', - timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, - url=full_url)]) - self.assertEqual(m_dhcp.call_count, 2) - m_net.assert_any_call( - broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', - prefix_or_mask='255.255.255.0', router='192.168.2.1', - static_routes=None) - self.assertEqual(m_net.call_count, 2) - - def test__reprovision_calls__poll_imds(self, m_request, - m_dhcp, m_net, - m_media_switch): - """The _reprovision method should call poll IMDS.""" - m_media_switch.return_value = None - m_dhcp.return_value = [{ - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'unknown-245': '624c3620'}] - url = 'http://{0}/metadata/reprovisiondata?api-version=2019-06-01' - host = "169.254.169.254" - full_url = url.format(host) - hostname = "myhost" - username = "myuser" - odata = {'HostName': hostname, 'UserName': username} - content = construct_valid_ovf_env(data=odata) - m_request.return_value = mock.MagicMock(status_code=200, text=content, - content=content) - dsa = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - md, _ud, cfg, _d = dsa._reprovision() - self.assertEqual(md['local-hostname'], hostname) - self.assertEqual(cfg['system_info']['default_user']['name'], username) - self.assertIn( - mock.call( - allow_redirects=True, - headers={ - 'Metadata': 'true', - 'User-Agent': 'Cloud-Init/%s' % vs() - }, - method='GET', - timeout=dsaz.IMDS_TIMEOUT_IN_SECONDS, - url=full_url - ), - m_request.call_args_list) - self.assertEqual(m_dhcp.call_count, 2) - m_net.assert_any_call( - broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', - prefix_or_mask='255.255.255.0', router='192.168.2.1', - static_routes=None) - self.assertEqual(m_net.call_count, 2) - - -class TestRemoveUbuntuNetworkConfigScripts(CiTestCase): - - with_logs = True - - def setUp(self): - super(TestRemoveUbuntuNetworkConfigScripts, self).setUp() - self.tmp = self.tmp_dir() - - def test_remove_network_scripts_removes_both_files_and_directories(self): - """Any files or directories in paths are removed when present.""" - file1 = self.tmp_path('file1', dir=self.tmp) - subdir = self.tmp_path('sub1', dir=self.tmp) - subfile = self.tmp_path('leaf1', dir=subdir) - write_file(file1, 'file1content') - write_file(subfile, 'leafcontent') - dsaz.maybe_remove_ubuntu_network_config_scripts(paths=[subdir, file1]) - - for path in (file1, subdir, subfile): - self.assertFalse(os.path.exists(path), - 'Found unremoved: %s' % path) - - expected_logs = [ - 'INFO: Removing Ubuntu extended network scripts because cloud-init' - ' updates Azure network configuration on the following events:' - " ['boot', 'boot-legacy']", - 'Recursively deleting %s' % subdir, - 'Attempting to remove %s' % file1] - for log in expected_logs: - self.assertIn(log, self.logs.getvalue()) - - def test_remove_network_scripts_only_attempts_removal_if_path_exists(self): - """Any files or directories absent are skipped without error.""" - dsaz.maybe_remove_ubuntu_network_config_scripts(paths=[ - self.tmp_path('nodirhere/', dir=self.tmp), - self.tmp_path('notfilehere', dir=self.tmp)]) - self.assertNotIn('/not/a', self.logs.getvalue()) # No delete logs - - @mock.patch(MOCKPATH + 'os.path.exists') - def test_remove_network_scripts_default_removes_stock_scripts(self, - m_exists): - """Azure's stock ubuntu image scripts and artifacts are removed.""" - # Report path absent on all to avoid delete operation - m_exists.return_value = False - dsaz.maybe_remove_ubuntu_network_config_scripts() - calls = m_exists.call_args_list - for path in dsaz.UBUNTU_EXTENDED_NETWORK_SCRIPTS: - self.assertIn(mock.call(path), calls) - - -class TestWBIsPlatformViable(CiTestCase): - """White box tests for _is_platform_viable.""" - with_logs = True - - @mock.patch(MOCKPATH + 'dmi.read_dmi_data') - def test_true_on_non_azure_chassis(self, m_read_dmi_data): - """Return True if DMI chassis-asset-tag is AZURE_CHASSIS_ASSET_TAG.""" - m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG - self.assertTrue(dsaz._is_platform_viable('doesnotmatter')) - - @mock.patch(MOCKPATH + 'os.path.exists') - @mock.patch(MOCKPATH + 'dmi.read_dmi_data') - def test_true_on_azure_ovf_env_in_seed_dir(self, m_read_dmi_data, m_exist): - """Return True if ovf-env.xml exists in known seed dirs.""" - # Non-matching Azure chassis-asset-tag - m_read_dmi_data.return_value = dsaz.AZURE_CHASSIS_ASSET_TAG + 'X' - - m_exist.return_value = True - self.assertTrue(dsaz._is_platform_viable('/some/seed/dir')) - m_exist.called_once_with('/other/seed/dir') - - def test_false_on_no_matching_azure_criteria(self): - """Report non-azure on unmatched asset tag, ovf-env absent and no dev. - - Return False when the asset tag doesn't match Azure's static - AZURE_CHASSIS_ASSET_TAG, no ovf-env.xml files exist in known seed dirs - and no devices have a label starting with prefix 'rd_rdfe_'. - """ - self.assertFalse(wrap_and_call( - MOCKPATH, - {'os.path.exists': False, - # Non-matching Azure chassis-asset-tag - 'dmi.read_dmi_data': dsaz.AZURE_CHASSIS_ASSET_TAG + 'X', - 'subp.which': None}, - dsaz._is_platform_viable, 'doesnotmatter')) - self.assertIn( - "DEBUG: Non-Azure DMI asset tag '{0}' discovered.\n".format( - dsaz.AZURE_CHASSIS_ASSET_TAG + 'X'), - self.logs.getvalue()) - - -class TestRandomSeed(CiTestCase): - """Test proper handling of random_seed""" - - def test_non_ascii_seed_is_serializable(self): - """Pass if a random string from the Azure infrastructure which - contains at least one non-Unicode character can be converted to/from - JSON without alteration and without throwing an exception. - """ - path = resourceLocation("azure/non_unicode_random_string") - result = dsaz._get_random_seed(path) - - obj = {'seed': result} - try: - serialized = json_dumps(obj) - deserialized = load_json(serialized) - except UnicodeDecodeError: - self.fail("Non-serializable random seed returned") - - self.assertEqual(deserialized['seed'], result) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py deleted file mode 100644 index ab4f0b50..00000000 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ /dev/null @@ -1,1441 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import copy -import os -import re -import unittest -from textwrap import dedent -from xml.etree import ElementTree -from xml.sax.saxutils import escape, unescape - -from cloudinit.sources.helpers import azure as azure_helper -from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, populate_dir - -from cloudinit.util import load_file -from cloudinit.sources.helpers.azure import WALinuxAgentShim as wa_shim - -GOAL_STATE_TEMPLATE = """\ - - - 2012-11-30 - {incarnation} - - Started - 300000 - - 16001 - - FALSE - - - {container_id} - - - {instance_id} - Started - - - http://100.86.192.70:80/...hostingEnvironmentConfig... - - http://100.86.192.70:80/..SharedConfig.. - - http://100.86.192.70:80/...extensionsConfig... - - http://100.86.192.70:80/...fullConfig... - {certificates_url} - 68ce47.0.68ce47.0.utl-trusty--292258.1.xml - - - - - -""" - -HEALTH_REPORT_XML_TEMPLATE = '''\ - - - {incarnation} - - {container_id} - - - {instance_id} - - {health_status} - {health_detail_subsection} - - - - - -''' - -HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = dedent('''\ -
- {health_substatus} - {health_description} -
- ''') - -HEALTH_REPORT_DESCRIPTION_TRIM_LEN = 512 - - -class SentinelException(Exception): - pass - - -class TestFindEndpoint(CiTestCase): - - def setUp(self): - super(TestFindEndpoint, self).setUp() - patches = ExitStack() - self.addCleanup(patches.close) - - self.load_file = patches.enter_context( - mock.patch.object(azure_helper.util, 'load_file')) - - self.dhcp_options = patches.enter_context( - mock.patch.object(wa_shim, '_load_dhclient_json')) - - self.networkd_leases = patches.enter_context( - mock.patch.object(wa_shim, '_networkd_get_value_from_leases')) - self.networkd_leases.return_value = None - - def test_missing_file(self): - """wa_shim find_endpoint uses default endpoint if leasefile not found - """ - self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16") - - def test_missing_special_azure_line(self): - """wa_shim find_endpoint uses default endpoint if leasefile is found - but does not contain DHCP Option 245 (whose value is the endpoint) - """ - self.load_file.return_value = '' - self.dhcp_options.return_value = {'eth0': {'key': 'value'}} - self.assertEqual(wa_shim.find_endpoint(), "168.63.129.16") - - @staticmethod - def _build_lease_content(encoded_address): - endpoint = azure_helper._get_dhcp_endpoint_option_name() - return '\n'.join([ - 'lease {', - ' interface "eth0";', - ' option {0} {1};'.format(endpoint, encoded_address), - '}']) - - def test_from_dhcp_client(self): - self.dhcp_options.return_value = {"eth0": {"unknown_245": "5:4:3:2"}} - self.assertEqual('5.4.3.2', wa_shim.find_endpoint(None)) - - def test_latest_lease_used(self): - encoded_addresses = ['5:4:3:2', '4:3:2:1'] - file_content = '\n'.join([self._build_lease_content(encoded_address) - for encoded_address in encoded_addresses]) - self.load_file.return_value = file_content - self.assertEqual(encoded_addresses[-1].replace(':', '.'), - wa_shim.find_endpoint("foobar")) - - -class TestExtractIpAddressFromLeaseValue(CiTestCase): - - def test_hex_string(self): - ip_address, encoded_address = '98.76.54.32', '62:4c:36:20' - self.assertEqual( - ip_address, wa_shim.get_ip_from_lease_value(encoded_address)) - - def test_hex_string_with_single_character_part(self): - ip_address, encoded_address = '4.3.2.1', '4:3:2:1' - self.assertEqual( - ip_address, wa_shim.get_ip_from_lease_value(encoded_address)) - - def test_packed_string(self): - ip_address, encoded_address = '98.76.54.32', 'bL6 ' - self.assertEqual( - ip_address, wa_shim.get_ip_from_lease_value(encoded_address)) - - def test_packed_string_with_escaped_quote(self): - ip_address, encoded_address = '100.72.34.108', 'dH\\"l' - self.assertEqual( - ip_address, wa_shim.get_ip_from_lease_value(encoded_address)) - - def test_packed_string_containing_a_colon(self): - ip_address, encoded_address = '100.72.58.108', 'dH:l' - self.assertEqual( - ip_address, wa_shim.get_ip_from_lease_value(encoded_address)) - - -class TestGoalStateParsing(CiTestCase): - - default_parameters = { - 'incarnation': 1, - 'container_id': 'MyContainerId', - 'instance_id': 'MyInstanceId', - 'certificates_url': 'MyCertificatesUrl', - } - - def _get_formatted_goal_state_xml_string(self, **kwargs): - parameters = self.default_parameters.copy() - parameters.update(kwargs) - xml = GOAL_STATE_TEMPLATE.format(**parameters) - if parameters['certificates_url'] is None: - new_xml_lines = [] - for line in xml.splitlines(): - if 'Certificates' in line: - continue - new_xml_lines.append(line) - xml = '\n'.join(new_xml_lines) - return xml - - def _get_goal_state(self, m_azure_endpoint_client=None, **kwargs): - if m_azure_endpoint_client is None: - m_azure_endpoint_client = mock.MagicMock() - xml = self._get_formatted_goal_state_xml_string(**kwargs) - return azure_helper.GoalState(xml, m_azure_endpoint_client) - - def test_incarnation_parsed_correctly(self): - incarnation = '123' - goal_state = self._get_goal_state(incarnation=incarnation) - self.assertEqual(incarnation, goal_state.incarnation) - - def test_container_id_parsed_correctly(self): - container_id = 'TestContainerId' - goal_state = self._get_goal_state(container_id=container_id) - self.assertEqual(container_id, goal_state.container_id) - - def test_instance_id_parsed_correctly(self): - instance_id = 'TestInstanceId' - goal_state = self._get_goal_state(instance_id=instance_id) - self.assertEqual(instance_id, goal_state.instance_id) - - def test_instance_id_byte_swap(self): - """Return true when previous_iid is byteswapped current_iid""" - previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" - current_iid = "544CDFD0-CB4E-4B4A-9954-5BDF3ED5C3B8" - self.assertTrue( - azure_helper.is_byte_swapped(previous_iid, current_iid)) - - def test_instance_id_no_byte_swap_same_instance_id(self): - previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" - current_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" - self.assertFalse( - azure_helper.is_byte_swapped(previous_iid, current_iid)) - - def test_instance_id_no_byte_swap_diff_instance_id(self): - previous_iid = "D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" - current_iid = "G0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8" - self.assertFalse( - azure_helper.is_byte_swapped(previous_iid, current_iid)) - - def test_certificates_xml_parsed_and_fetched_correctly(self): - m_azure_endpoint_client = mock.MagicMock() - certificates_url = 'TestCertificatesUrl' - goal_state = self._get_goal_state( - m_azure_endpoint_client=m_azure_endpoint_client, - certificates_url=certificates_url) - certificates_xml = goal_state.certificates_xml - self.assertEqual(1, m_azure_endpoint_client.get.call_count) - self.assertEqual( - certificates_url, - m_azure_endpoint_client.get.call_args[0][0]) - self.assertTrue( - m_azure_endpoint_client.get.call_args[1].get( - 'secure', False)) - self.assertEqual( - m_azure_endpoint_client.get.return_value.contents, - certificates_xml) - - def test_missing_certificates_skips_http_get(self): - m_azure_endpoint_client = mock.MagicMock() - goal_state = self._get_goal_state( - m_azure_endpoint_client=m_azure_endpoint_client, - certificates_url=None) - certificates_xml = goal_state.certificates_xml - self.assertEqual(0, m_azure_endpoint_client.get.call_count) - self.assertIsNone(certificates_xml) - - def test_invalid_goal_state_xml_raises_parse_error(self): - xml = 'random non-xml data' - with self.assertRaises(ElementTree.ParseError): - azure_helper.GoalState(xml, mock.MagicMock()) - - def test_missing_container_id_in_goal_state_xml_raises_exc(self): - xml = self._get_formatted_goal_state_xml_string() - xml = re.sub('.*', '', xml) - with self.assertRaises(azure_helper.InvalidGoalStateXMLException): - azure_helper.GoalState(xml, mock.MagicMock()) - - def test_missing_instance_id_in_goal_state_xml_raises_exc(self): - xml = self._get_formatted_goal_state_xml_string() - xml = re.sub('.*', '', xml) - with self.assertRaises(azure_helper.InvalidGoalStateXMLException): - azure_helper.GoalState(xml, mock.MagicMock()) - - def test_missing_incarnation_in_goal_state_xml_raises_exc(self): - xml = self._get_formatted_goal_state_xml_string() - xml = re.sub('.*', '', xml) - with self.assertRaises(azure_helper.InvalidGoalStateXMLException): - azure_helper.GoalState(xml, mock.MagicMock()) - - -class TestAzureEndpointHttpClient(CiTestCase): - - regular_headers = { - 'x-ms-agent-name': 'WALinuxAgent', - 'x-ms-version': '2012-11-30', - } - - def setUp(self): - super(TestAzureEndpointHttpClient, self).setUp() - patches = ExitStack() - self.addCleanup(patches.close) - self.m_http_with_retries = patches.enter_context( - mock.patch.object(azure_helper, 'http_with_retries')) - - def test_non_secure_get(self): - client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) - url = 'MyTestUrl' - response = client.get(url, secure=False) - self.assertEqual(1, self.m_http_with_retries.call_count) - self.assertEqual(self.m_http_with_retries.return_value, response) - self.assertEqual( - mock.call(url, headers=self.regular_headers), - self.m_http_with_retries.call_args) - - def test_non_secure_get_raises_exception(self): - client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) - url = 'MyTestUrl' - self.m_http_with_retries.side_effect = SentinelException - self.assertRaises(SentinelException, client.get, url, secure=False) - self.assertEqual(1, self.m_http_with_retries.call_count) - - def test_secure_get(self): - url = 'MyTestUrl' - m_certificate = mock.MagicMock() - expected_headers = self.regular_headers.copy() - expected_headers.update({ - "x-ms-cipher-name": "DES_EDE3_CBC", - "x-ms-guest-agent-public-x509-cert": m_certificate, - }) - client = azure_helper.AzureEndpointHttpClient(m_certificate) - response = client.get(url, secure=True) - self.assertEqual(1, self.m_http_with_retries.call_count) - self.assertEqual(self.m_http_with_retries.return_value, response) - self.assertEqual( - mock.call(url, headers=expected_headers), - self.m_http_with_retries.call_args) - - def test_secure_get_raises_exception(self): - url = 'MyTestUrl' - client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) - self.m_http_with_retries.side_effect = SentinelException - self.assertRaises(SentinelException, client.get, url, secure=True) - self.assertEqual(1, self.m_http_with_retries.call_count) - - def test_post(self): - m_data = mock.MagicMock() - url = 'MyTestUrl' - client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) - response = client.post(url, data=m_data) - self.assertEqual(1, self.m_http_with_retries.call_count) - self.assertEqual(self.m_http_with_retries.return_value, response) - self.assertEqual( - mock.call(url, data=m_data, headers=self.regular_headers), - self.m_http_with_retries.call_args) - - def test_post_raises_exception(self): - m_data = mock.MagicMock() - url = 'MyTestUrl' - client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) - self.m_http_with_retries.side_effect = SentinelException - self.assertRaises(SentinelException, client.post, url, data=m_data) - self.assertEqual(1, self.m_http_with_retries.call_count) - - def test_post_with_extra_headers(self): - url = 'MyTestUrl' - client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) - extra_headers = {'test': 'header'} - client.post(url, extra_headers=extra_headers) - expected_headers = self.regular_headers.copy() - expected_headers.update(extra_headers) - self.assertEqual(1, self.m_http_with_retries.call_count) - self.assertEqual( - mock.call(url, data=mock.ANY, headers=expected_headers), - self.m_http_with_retries.call_args) - - def test_post_with_sleep_with_extra_headers_raises_exception(self): - m_data = mock.MagicMock() - url = 'MyTestUrl' - extra_headers = {'test': 'header'} - client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) - self.m_http_with_retries.side_effect = SentinelException - self.assertRaises( - SentinelException, client.post, - url, data=m_data, extra_headers=extra_headers) - self.assertEqual(1, self.m_http_with_retries.call_count) - - -class TestAzureHelperHttpWithRetries(CiTestCase): - - with_logs = True - - max_readurl_attempts = 240 - default_readurl_timeout = 5 - sleep_duration_between_retries = 5 - periodic_logging_attempts = 12 - - def setUp(self): - super(TestAzureHelperHttpWithRetries, self).setUp() - patches = ExitStack() - self.addCleanup(patches.close) - - self.m_readurl = patches.enter_context( - mock.patch.object( - azure_helper.url_helper, 'readurl', mock.MagicMock())) - self.m_sleep = patches.enter_context( - mock.patch.object(azure_helper.time, 'sleep', autospec=True)) - - def test_http_with_retries(self): - self.m_readurl.return_value = 'TestResp' - self.assertEqual( - azure_helper.http_with_retries('testurl'), - self.m_readurl.return_value) - self.assertEqual(self.m_readurl.call_count, 1) - - def test_http_with_retries_propagates_readurl_exc_and_logs_exc( - self): - self.m_readurl.side_effect = SentinelException - - self.assertRaises( - SentinelException, azure_helper.http_with_retries, 'testurl') - self.assertEqual(self.m_readurl.call_count, self.max_readurl_attempts) - - self.assertIsNotNone( - re.search( - r'Failed HTTP request with Azure endpoint \S* during ' - r'attempt \d+ with exception: \S*', - self.logs.getvalue())) - self.assertIsNone( - re.search( - r'Successful HTTP request with Azure endpoint \S* after ' - r'\d+ attempts', - self.logs.getvalue())) - - def test_http_with_retries_delayed_success_due_to_temporary_readurl_exc( - self): - self.m_readurl.side_effect = \ - [SentinelException] * self.periodic_logging_attempts + \ - ['TestResp'] - self.m_readurl.return_value = 'TestResp' - - response = azure_helper.http_with_retries('testurl') - self.assertEqual( - response, - self.m_readurl.return_value) - self.assertEqual( - self.m_readurl.call_count, - self.periodic_logging_attempts + 1) - - # Ensure that cloud-init did sleep between each failed request - self.assertEqual( - self.m_sleep.call_count, - self.periodic_logging_attempts) - self.m_sleep.assert_called_with(self.sleep_duration_between_retries) - - def test_http_with_retries_long_delay_logs_periodic_failure_msg(self): - self.m_readurl.side_effect = \ - [SentinelException] * self.periodic_logging_attempts + \ - ['TestResp'] - self.m_readurl.return_value = 'TestResp' - - azure_helper.http_with_retries('testurl') - - self.assertEqual( - self.m_readurl.call_count, - self.periodic_logging_attempts + 1) - self.assertIsNotNone( - re.search( - r'Failed HTTP request with Azure endpoint \S* during ' - r'attempt \d+ with exception: \S*', - self.logs.getvalue())) - self.assertIsNotNone( - re.search( - r'Successful HTTP request with Azure endpoint \S* after ' - r'\d+ attempts', - self.logs.getvalue())) - - def test_http_with_retries_short_delay_does_not_log_periodic_failure_msg( - self): - self.m_readurl.side_effect = \ - [SentinelException] * \ - (self.periodic_logging_attempts - 1) + \ - ['TestResp'] - self.m_readurl.return_value = 'TestResp' - - azure_helper.http_with_retries('testurl') - self.assertEqual( - self.m_readurl.call_count, - self.periodic_logging_attempts) - - self.assertIsNone( - re.search( - r'Failed HTTP request with Azure endpoint \S* during ' - r'attempt \d+ with exception: \S*', - self.logs.getvalue())) - self.assertIsNotNone( - re.search( - r'Successful HTTP request with Azure endpoint \S* after ' - r'\d+ attempts', - self.logs.getvalue())) - - def test_http_with_retries_calls_url_helper_readurl_with_args_kwargs(self): - testurl = mock.MagicMock() - kwargs = { - 'headers': mock.MagicMock(), - 'data': mock.MagicMock(), - # timeout kwarg should not be modified or deleted if present - 'timeout': mock.MagicMock() - } - azure_helper.http_with_retries(testurl, **kwargs) - self.m_readurl.assert_called_once_with(testurl, **kwargs) - - def test_http_with_retries_adds_timeout_kwarg_if_not_present(self): - testurl = mock.MagicMock() - kwargs = { - 'headers': mock.MagicMock(), - 'data': mock.MagicMock() - } - expected_kwargs = copy.deepcopy(kwargs) - expected_kwargs['timeout'] = self.default_readurl_timeout - - azure_helper.http_with_retries(testurl, **kwargs) - self.m_readurl.assert_called_once_with(testurl, **expected_kwargs) - - def test_http_with_retries_deletes_retries_kwargs_passed_in( - self): - """http_with_retries already implements retry logic, - so url_helper.readurl should not have retries. - http_with_retries should delete kwargs that - cause url_helper.readurl to retry. - """ - testurl = mock.MagicMock() - kwargs = { - 'headers': mock.MagicMock(), - 'data': mock.MagicMock(), - 'timeout': mock.MagicMock(), - 'retries': mock.MagicMock(), - 'infinite': mock.MagicMock() - } - expected_kwargs = copy.deepcopy(kwargs) - expected_kwargs.pop('retries', None) - expected_kwargs.pop('infinite', None) - - azure_helper.http_with_retries(testurl, **kwargs) - self.m_readurl.assert_called_once_with(testurl, **expected_kwargs) - self.assertIn( - 'retries kwarg passed in for communication with Azure endpoint.', - self.logs.getvalue()) - self.assertIn( - 'infinite kwarg passed in for communication with Azure endpoint.', - self.logs.getvalue()) - - -class TestOpenSSLManager(CiTestCase): - - def setUp(self): - super(TestOpenSSLManager, self).setUp() - patches = ExitStack() - self.addCleanup(patches.close) - - self.subp = patches.enter_context( - mock.patch.object(azure_helper.subp, 'subp')) - try: - self.open = patches.enter_context( - mock.patch('__builtin__.open')) - except ImportError: - self.open = patches.enter_context( - mock.patch('builtins.open')) - - @mock.patch.object(azure_helper, 'cd', mock.MagicMock()) - @mock.patch.object(azure_helper.temp_utils, 'mkdtemp') - def test_openssl_manager_creates_a_tmpdir(self, mkdtemp): - manager = azure_helper.OpenSSLManager() - self.assertEqual(mkdtemp.return_value, manager.tmpdir) - - def test_generate_certificate_uses_tmpdir(self): - subp_directory = {} - - def capture_directory(*args, **kwargs): - subp_directory['path'] = os.getcwd() - - self.subp.side_effect = capture_directory - manager = azure_helper.OpenSSLManager() - self.assertEqual(manager.tmpdir, subp_directory['path']) - manager.clean_up() - - @mock.patch.object(azure_helper, 'cd', mock.MagicMock()) - @mock.patch.object(azure_helper.temp_utils, 'mkdtemp', mock.MagicMock()) - @mock.patch.object(azure_helper.util, 'del_dir') - def test_clean_up(self, del_dir): - manager = azure_helper.OpenSSLManager() - manager.clean_up() - self.assertEqual([mock.call(manager.tmpdir)], del_dir.call_args_list) - - -class TestOpenSSLManagerActions(CiTestCase): - - def setUp(self): - super(TestOpenSSLManagerActions, self).setUp() - - self.allowed_subp = True - - def _data_file(self, name): - path = 'tests/data/azure' - return os.path.join(path, name) - - @unittest.skip("todo move to cloud_test") - def test_pubkey_extract(self): - cert = load_file(self._data_file('pubkey_extract_cert')) - good_key = load_file(self._data_file('pubkey_extract_ssh_key')) - sslmgr = azure_helper.OpenSSLManager() - key = sslmgr._get_ssh_key_from_cert(cert) - self.assertEqual(good_key, key) - - good_fingerprint = '073E19D14D1C799224C6A0FD8DDAB6A8BF27D473' - fingerprint = sslmgr._get_fingerprint_from_cert(cert) - self.assertEqual(good_fingerprint, fingerprint) - - @unittest.skip("todo move to cloud_test") - @mock.patch.object(azure_helper.OpenSSLManager, '_decrypt_certs_from_xml') - def test_parse_certificates(self, mock_decrypt_certs): - """Azure control plane puts private keys as well as certificates - into the Certificates XML object. Make sure only the public keys - from certs are extracted and that fingerprints are converted to - the form specified in the ovf-env.xml file. - """ - cert_contents = load_file(self._data_file('parse_certificates_pem')) - fingerprints = load_file(self._data_file( - 'parse_certificates_fingerprints') - ).splitlines() - mock_decrypt_certs.return_value = cert_contents - sslmgr = azure_helper.OpenSSLManager() - keys_by_fp = sslmgr.parse_certificates('') - for fp in keys_by_fp.keys(): - self.assertIn(fp, fingerprints) - for fp in fingerprints: - self.assertIn(fp, keys_by_fp) - - -class TestGoalStateHealthReporter(CiTestCase): - - maxDiff = None - - default_parameters = { - 'incarnation': 1634, - 'container_id': 'MyContainerId', - 'instance_id': 'MyInstanceId' - } - - test_azure_endpoint = 'TestEndpoint' - test_health_report_url = 'http://{0}/machine?comp=health'.format( - test_azure_endpoint) - test_default_headers = {'Content-Type': 'text/xml; charset=utf-8'} - - provisioning_success_status = 'Ready' - provisioning_not_ready_status = 'NotReady' - provisioning_failure_substatus = 'ProvisioningFailed' - provisioning_failure_err_description = ( - 'Test error message containing provisioning failure details') - - def setUp(self): - super(TestGoalStateHealthReporter, self).setUp() - patches = ExitStack() - self.addCleanup(patches.close) - - patches.enter_context( - mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock())) - self.read_file_or_url = patches.enter_context( - mock.patch.object(azure_helper.url_helper, 'read_file_or_url')) - - self.post = patches.enter_context( - mock.patch.object(azure_helper.AzureEndpointHttpClient, - 'post')) - - self.GoalState = patches.enter_context( - mock.patch.object(azure_helper, 'GoalState')) - self.GoalState.return_value.container_id = \ - self.default_parameters['container_id'] - self.GoalState.return_value.instance_id = \ - self.default_parameters['instance_id'] - self.GoalState.return_value.incarnation = \ - self.default_parameters['incarnation'] - - def _text_from_xpath_in_xroot(self, xroot, xpath): - element = xroot.find(xpath) - if element is not None: - return element.text - return None - - def _get_formatted_health_report_xml_string(self, **kwargs): - return HEALTH_REPORT_XML_TEMPLATE.format(**kwargs) - - def _get_formatted_health_detail_subsection_xml_string(self, **kwargs): - return HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE.format(**kwargs) - - def _get_report_ready_health_document(self): - return self._get_formatted_health_report_xml_string( - incarnation=escape(str(self.default_parameters['incarnation'])), - container_id=escape(self.default_parameters['container_id']), - instance_id=escape(self.default_parameters['instance_id']), - health_status=escape(self.provisioning_success_status), - health_detail_subsection='') - - def _get_report_failure_health_document(self): - health_detail_subsection = \ - self._get_formatted_health_detail_subsection_xml_string( - health_substatus=escape(self.provisioning_failure_substatus), - health_description=escape( - self.provisioning_failure_err_description)) - - return self._get_formatted_health_report_xml_string( - incarnation=escape(str(self.default_parameters['incarnation'])), - container_id=escape(self.default_parameters['container_id']), - instance_id=escape(self.default_parameters['instance_id']), - health_status=escape(self.provisioning_not_ready_status), - health_detail_subsection=health_detail_subsection) - - def test_send_ready_signal_sends_post_request(self): - with mock.patch.object( - azure_helper.GoalStateHealthReporter, - 'build_report') as m_build_report: - client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) - reporter = azure_helper.GoalStateHealthReporter( - azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), - client, self.test_azure_endpoint) - reporter.send_ready_signal() - - self.assertEqual(1, self.post.call_count) - self.assertEqual( - mock.call( - self.test_health_report_url, - data=m_build_report.return_value, - extra_headers=self.test_default_headers), - self.post.call_args) - - def test_send_failure_signal_sends_post_request(self): - with mock.patch.object( - azure_helper.GoalStateHealthReporter, - 'build_report') as m_build_report: - client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) - reporter = azure_helper.GoalStateHealthReporter( - azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), - client, self.test_azure_endpoint) - reporter.send_failure_signal( - description=self.provisioning_failure_err_description) - - self.assertEqual(1, self.post.call_count) - self.assertEqual( - mock.call( - self.test_health_report_url, - data=m_build_report.return_value, - extra_headers=self.test_default_headers), - self.post.call_args) - - def test_build_report_for_ready_signal_health_document(self): - health_document = self._get_report_ready_health_document() - reporter = azure_helper.GoalStateHealthReporter( - azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), - azure_helper.AzureEndpointHttpClient(mock.MagicMock()), - self.test_azure_endpoint) - generated_health_document = reporter.build_report( - incarnation=self.default_parameters['incarnation'], - container_id=self.default_parameters['container_id'], - instance_id=self.default_parameters['instance_id'], - status=self.provisioning_success_status) - - self.assertEqual(health_document, generated_health_document) - - generated_xroot = ElementTree.fromstring(generated_health_document) - self.assertEqual( - self._text_from_xpath_in_xroot( - generated_xroot, './GoalStateIncarnation'), - str(self.default_parameters['incarnation'])) - self.assertEqual( - self._text_from_xpath_in_xroot( - generated_xroot, './Container/ContainerId'), - str(self.default_parameters['container_id'])) - self.assertEqual( - self._text_from_xpath_in_xroot( - generated_xroot, - './Container/RoleInstanceList/Role/InstanceId'), - str(self.default_parameters['instance_id'])) - self.assertEqual( - self._text_from_xpath_in_xroot( - generated_xroot, - './Container/RoleInstanceList/Role/Health/State'), - escape(self.provisioning_success_status)) - self.assertIsNone( - self._text_from_xpath_in_xroot( - generated_xroot, - './Container/RoleInstanceList/Role/Health/Details')) - self.assertIsNone( - self._text_from_xpath_in_xroot( - generated_xroot, - './Container/RoleInstanceList/Role/Health/Details/SubStatus')) - self.assertIsNone( - self._text_from_xpath_in_xroot( - generated_xroot, - './Container/RoleInstanceList/Role/Health/Details/Description') - ) - - def test_build_report_for_failure_signal_health_document(self): - health_document = self._get_report_failure_health_document() - reporter = azure_helper.GoalStateHealthReporter( - azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), - azure_helper.AzureEndpointHttpClient(mock.MagicMock()), - self.test_azure_endpoint) - generated_health_document = reporter.build_report( - incarnation=self.default_parameters['incarnation'], - container_id=self.default_parameters['container_id'], - instance_id=self.default_parameters['instance_id'], - status=self.provisioning_not_ready_status, - substatus=self.provisioning_failure_substatus, - description=self.provisioning_failure_err_description) - - self.assertEqual(health_document, generated_health_document) - - generated_xroot = ElementTree.fromstring(generated_health_document) - self.assertEqual( - self._text_from_xpath_in_xroot( - generated_xroot, './GoalStateIncarnation'), - str(self.default_parameters['incarnation'])) - self.assertEqual( - self._text_from_xpath_in_xroot( - generated_xroot, './Container/ContainerId'), - self.default_parameters['container_id']) - self.assertEqual( - self._text_from_xpath_in_xroot( - generated_xroot, - './Container/RoleInstanceList/Role/InstanceId'), - self.default_parameters['instance_id']) - self.assertEqual( - self._text_from_xpath_in_xroot( - generated_xroot, - './Container/RoleInstanceList/Role/Health/State'), - escape(self.provisioning_not_ready_status)) - self.assertEqual( - self._text_from_xpath_in_xroot( - generated_xroot, - './Container/RoleInstanceList/Role/Health/Details/' - 'SubStatus'), - escape(self.provisioning_failure_substatus)) - self.assertEqual( - self._text_from_xpath_in_xroot( - generated_xroot, - './Container/RoleInstanceList/Role/Health/Details/' - 'Description'), - escape(self.provisioning_failure_err_description)) - - def test_send_ready_signal_calls_build_report(self): - with mock.patch.object( - azure_helper.GoalStateHealthReporter, 'build_report' - ) as m_build_report: - reporter = azure_helper.GoalStateHealthReporter( - azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), - azure_helper.AzureEndpointHttpClient(mock.MagicMock()), - self.test_azure_endpoint) - reporter.send_ready_signal() - - self.assertEqual(1, m_build_report.call_count) - self.assertEqual( - mock.call( - incarnation=self.default_parameters['incarnation'], - container_id=self.default_parameters['container_id'], - instance_id=self.default_parameters['instance_id'], - status=self.provisioning_success_status), - m_build_report.call_args) - - def test_send_failure_signal_calls_build_report(self): - with mock.patch.object( - azure_helper.GoalStateHealthReporter, 'build_report' - ) as m_build_report: - reporter = azure_helper.GoalStateHealthReporter( - azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), - azure_helper.AzureEndpointHttpClient(mock.MagicMock()), - self.test_azure_endpoint) - reporter.send_failure_signal( - description=self.provisioning_failure_err_description) - - self.assertEqual(1, m_build_report.call_count) - self.assertEqual( - mock.call( - incarnation=self.default_parameters['incarnation'], - container_id=self.default_parameters['container_id'], - instance_id=self.default_parameters['instance_id'], - status=self.provisioning_not_ready_status, - substatus=self.provisioning_failure_substatus, - description=self.provisioning_failure_err_description), - m_build_report.call_args) - - def test_build_report_escapes_chars(self): - incarnation = 'jd8\'9*&^<\'A>' - instance_id = 'Opo>>>jas\'&d;[p&fp\"a<&aa\'sd!@&!)((*<&>' - health_substatus = '&as\"d<d<\'^@!5&6<7' - health_description = '&&&>!#$\"&&><>&\"sd<67<]>>' - - health_detail_subsection = \ - self._get_formatted_health_detail_subsection_xml_string( - health_substatus=escape(health_substatus), - health_description=escape(health_description)) - health_document = self._get_formatted_health_report_xml_string( - incarnation=escape(incarnation), - container_id=escape(container_id), - instance_id=escape(instance_id), - health_status=escape(health_status), - health_detail_subsection=health_detail_subsection) - - reporter = azure_helper.GoalStateHealthReporter( - azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), - azure_helper.AzureEndpointHttpClient(mock.MagicMock()), - self.test_azure_endpoint) - generated_health_document = reporter.build_report( - incarnation=incarnation, - container_id=container_id, - instance_id=instance_id, - status=health_status, - substatus=health_substatus, - description=health_description) - - self.assertEqual(health_document, generated_health_document) - - def test_build_report_conforms_to_length_limits(self): - reporter = azure_helper.GoalStateHealthReporter( - azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), - azure_helper.AzureEndpointHttpClient(mock.MagicMock()), - self.test_azure_endpoint) - long_err_msg = 'a9&ea8>>>e as1< d\"q2*&(^%\'a=5<' * 100 - generated_health_document = reporter.build_report( - incarnation=self.default_parameters['incarnation'], - container_id=self.default_parameters['container_id'], - instance_id=self.default_parameters['instance_id'], - status=self.provisioning_not_ready_status, - substatus=self.provisioning_failure_substatus, - description=long_err_msg) - - generated_xroot = ElementTree.fromstring(generated_health_document) - generated_health_report_description = self._text_from_xpath_in_xroot( - generated_xroot, - './Container/RoleInstanceList/Role/Health/Details/Description') - self.assertEqual( - len(unescape(generated_health_report_description)), - HEALTH_REPORT_DESCRIPTION_TRIM_LEN) - - def test_trim_description_then_escape_conforms_to_len_limits_worst_case( - self): - """When unescaped characters are XML-escaped, the length increases. - Char Escape String - < < - > > - " " - ' ' - & & - - We (step 1) trim the health report XML's description field, - and then (step 2) XML-escape the health report XML's description field. - - The health report XML's description field limit within cloud-init - is HEALTH_REPORT_DESCRIPTION_TRIM_LEN. - - The Azure platform's limit on the health report XML's description field - is 4096 chars. - - For worst-case chars, there is a 5x blowup in length - when the chars are XML-escaped. - ' and " when XML-escaped have a 5x blowup. - - Ensure that (1) trimming and then (2) XML-escaping does not blow past - the Azure platform's limit for health report XML's description field - (4096 chars). - """ - reporter = azure_helper.GoalStateHealthReporter( - azure_helper.GoalState(mock.MagicMock(), mock.MagicMock()), - azure_helper.AzureEndpointHttpClient(mock.MagicMock()), - self.test_azure_endpoint) - long_err_msg = '\'\"' * 10000 - generated_health_document = reporter.build_report( - incarnation=self.default_parameters['incarnation'], - container_id=self.default_parameters['container_id'], - instance_id=self.default_parameters['instance_id'], - status=self.provisioning_not_ready_status, - substatus=self.provisioning_failure_substatus, - description=long_err_msg) - - generated_xroot = ElementTree.fromstring(generated_health_document) - generated_health_report_description = self._text_from_xpath_in_xroot( - generated_xroot, - './Container/RoleInstanceList/Role/Health/Details/Description') - # The escaped description string should be less than - # the Azure platform limit for the escaped description string. - self.assertLessEqual(len(generated_health_report_description), 4096) - - -class TestWALinuxAgentShim(CiTestCase): - - def setUp(self): - super(TestWALinuxAgentShim, self).setUp() - patches = ExitStack() - self.addCleanup(patches.close) - - self.AzureEndpointHttpClient = patches.enter_context( - mock.patch.object(azure_helper, 'AzureEndpointHttpClient')) - self.find_endpoint = patches.enter_context( - mock.patch.object(wa_shim, 'find_endpoint')) - self.GoalState = patches.enter_context( - mock.patch.object(azure_helper, 'GoalState')) - self.OpenSSLManager = patches.enter_context( - mock.patch.object(azure_helper, 'OpenSSLManager', autospec=True)) - patches.enter_context( - mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock())) - - self.test_incarnation = 'TestIncarnation' - self.test_container_id = 'TestContainerId' - self.test_instance_id = 'TestInstanceId' - self.GoalState.return_value.incarnation = self.test_incarnation - self.GoalState.return_value.container_id = self.test_container_id - self.GoalState.return_value.instance_id = self.test_instance_id - - def test_eject_iso_is_called(self): - shim = wa_shim() - with mock.patch.object( - shim, 'eject_iso', autospec=True - ) as m_eject_iso: - shim.register_with_azure_and_fetch_data(iso_dev="/dev/sr0") - m_eject_iso.assert_called_once_with("/dev/sr0") - - def test_http_client_does_not_use_certificate_for_report_ready(self): - shim = wa_shim() - shim.register_with_azure_and_fetch_data() - self.assertEqual( - [mock.call(None)], - self.AzureEndpointHttpClient.call_args_list) - - def test_http_client_does_not_use_certificate_for_report_failure(self): - shim = wa_shim() - shim.register_with_azure_and_report_failure(description='TestDesc') - self.assertEqual( - [mock.call(None)], - self.AzureEndpointHttpClient.call_args_list) - - def test_correct_url_used_for_goalstate_during_report_ready(self): - self.find_endpoint.return_value = 'test_endpoint' - shim = wa_shim() - shim.register_with_azure_and_fetch_data() - m_get = self.AzureEndpointHttpClient.return_value.get - self.assertEqual( - [mock.call('http://test_endpoint/machine/?comp=goalstate')], - m_get.call_args_list) - self.assertEqual( - [mock.call( - m_get.return_value.contents, - self.AzureEndpointHttpClient.return_value, - False - )], - self.GoalState.call_args_list) - - def test_correct_url_used_for_goalstate_during_report_failure(self): - self.find_endpoint.return_value = 'test_endpoint' - shim = wa_shim() - shim.register_with_azure_and_report_failure(description='TestDesc') - m_get = self.AzureEndpointHttpClient.return_value.get - self.assertEqual( - [mock.call('http://test_endpoint/machine/?comp=goalstate')], - m_get.call_args_list) - self.assertEqual( - [mock.call( - m_get.return_value.contents, - self.AzureEndpointHttpClient.return_value, - False - )], - self.GoalState.call_args_list) - - def test_certificates_used_to_determine_public_keys(self): - # if register_with_azure_and_fetch_data() isn't passed some info about - # the user's public keys, there's no point in even trying to parse the - # certificates - shim = wa_shim() - mypk = [{'fingerprint': 'fp1', 'path': 'path1'}, - {'fingerprint': 'fp3', 'path': 'path3', 'value': ''}] - certs = {'fp1': 'expected-key', - 'fp2': 'should-not-be-found', - 'fp3': 'expected-no-value-key', - } - sslmgr = self.OpenSSLManager.return_value - sslmgr.parse_certificates.return_value = certs - data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) - self.assertEqual( - [mock.call(self.GoalState.return_value.certificates_xml)], - sslmgr.parse_certificates.call_args_list) - self.assertIn('expected-key', data['public-keys']) - self.assertIn('expected-no-value-key', data['public-keys']) - self.assertNotIn('should-not-be-found', data['public-keys']) - - def test_absent_certificates_produces_empty_public_keys(self): - mypk = [{'fingerprint': 'fp1', 'path': 'path1'}] - self.GoalState.return_value.certificates_xml = None - shim = wa_shim() - data = shim.register_with_azure_and_fetch_data(pubkey_info=mypk) - self.assertEqual([], data['public-keys']) - - def test_correct_url_used_for_report_ready(self): - self.find_endpoint.return_value = 'test_endpoint' - shim = wa_shim() - shim.register_with_azure_and_fetch_data() - expected_url = 'http://test_endpoint/machine?comp=health' - self.assertEqual( - [mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)], - self.AzureEndpointHttpClient.return_value.post - .call_args_list) - - def test_correct_url_used_for_report_failure(self): - self.find_endpoint.return_value = 'test_endpoint' - shim = wa_shim() - shim.register_with_azure_and_report_failure(description='TestDesc') - expected_url = 'http://test_endpoint/machine?comp=health' - self.assertEqual( - [mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)], - self.AzureEndpointHttpClient.return_value.post - .call_args_list) - - def test_goal_state_values_used_for_report_ready(self): - shim = wa_shim() - shim.register_with_azure_and_fetch_data() - posted_document = ( - self.AzureEndpointHttpClient.return_value.post - .call_args[1]['data'] - ) - self.assertIn(self.test_incarnation, posted_document) - self.assertIn(self.test_container_id, posted_document) - self.assertIn(self.test_instance_id, posted_document) - - def test_goal_state_values_used_for_report_failure(self): - shim = wa_shim() - shim.register_with_azure_and_report_failure(description='TestDesc') - posted_document = ( - self.AzureEndpointHttpClient.return_value.post - .call_args[1]['data'] - ) - self.assertIn(self.test_incarnation, posted_document) - self.assertIn(self.test_container_id, posted_document) - self.assertIn(self.test_instance_id, posted_document) - - def test_xml_elems_in_report_ready_post(self): - shim = wa_shim() - shim.register_with_azure_and_fetch_data() - health_document = HEALTH_REPORT_XML_TEMPLATE.format( - incarnation=escape(self.test_incarnation), - container_id=escape(self.test_container_id), - instance_id=escape(self.test_instance_id), - health_status=escape('Ready'), - health_detail_subsection='') - posted_document = ( - self.AzureEndpointHttpClient.return_value.post - .call_args[1]['data']) - self.assertEqual(health_document, posted_document) - - def test_xml_elems_in_report_failure_post(self): - shim = wa_shim() - shim.register_with_azure_and_report_failure(description='TestDesc') - health_document = HEALTH_REPORT_XML_TEMPLATE.format( - incarnation=escape(self.test_incarnation), - container_id=escape(self.test_container_id), - instance_id=escape(self.test_instance_id), - health_status=escape('NotReady'), - health_detail_subsection=HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE - .format( - health_substatus=escape('ProvisioningFailed'), - health_description=escape('TestDesc'))) - posted_document = ( - self.AzureEndpointHttpClient.return_value.post - .call_args[1]['data']) - self.assertEqual(health_document, posted_document) - - @mock.patch.object(azure_helper, 'GoalStateHealthReporter', autospec=True) - def test_register_with_azure_and_fetch_data_calls_send_ready_signal( - self, m_goal_state_health_reporter): - shim = wa_shim() - shim.register_with_azure_and_fetch_data() - self.assertEqual( - 1, - m_goal_state_health_reporter.return_value.send_ready_signal - .call_count) - - @mock.patch.object(azure_helper, 'GoalStateHealthReporter', autospec=True) - def test_register_with_azure_and_report_failure_calls_send_failure_signal( - self, m_goal_state_health_reporter): - shim = wa_shim() - shim.register_with_azure_and_report_failure(description='TestDesc') - m_goal_state_health_reporter.return_value.send_failure_signal \ - .assert_called_once_with(description='TestDesc') - - def test_register_with_azure_and_report_failure_does_not_need_certificates( - self): - shim = wa_shim() - with mock.patch.object( - shim, '_fetch_goal_state_from_azure', autospec=True - ) as m_fetch_goal_state_from_azure: - shim.register_with_azure_and_report_failure(description='TestDesc') - m_fetch_goal_state_from_azure.assert_called_once_with( - need_certificate=False) - - def test_clean_up_can_be_called_at_any_time(self): - shim = wa_shim() - shim.clean_up() - - def test_openssl_manager_not_instantiated_by_shim_report_status(self): - shim = wa_shim() - shim.register_with_azure_and_fetch_data() - shim.register_with_azure_and_report_failure(description='TestDesc') - shim.clean_up() - self.OpenSSLManager.assert_not_called() - - def test_clean_up_after_report_ready(self): - shim = wa_shim() - shim.register_with_azure_and_fetch_data() - shim.clean_up() - self.OpenSSLManager.return_value.clean_up.assert_not_called() - - def test_clean_up_after_report_failure(self): - shim = wa_shim() - shim.register_with_azure_and_report_failure(description='TestDesc') - shim.clean_up() - self.OpenSSLManager.return_value.clean_up.assert_not_called() - - def test_fetch_goalstate_during_report_ready_raises_exc_on_get_exc(self): - self.AzureEndpointHttpClient.return_value.get \ - .side_effect = SentinelException - shim = wa_shim() - self.assertRaises(SentinelException, - shim.register_with_azure_and_fetch_data) - - def test_fetch_goalstate_during_report_failure_raises_exc_on_get_exc(self): - self.AzureEndpointHttpClient.return_value.get \ - .side_effect = SentinelException - shim = wa_shim() - self.assertRaises(SentinelException, - shim.register_with_azure_and_report_failure, - description='TestDesc') - - def test_fetch_goalstate_during_report_ready_raises_exc_on_parse_exc(self): - self.GoalState.side_effect = SentinelException - shim = wa_shim() - self.assertRaises(SentinelException, - shim.register_with_azure_and_fetch_data) - - def test_fetch_goalstate_during_report_failure_raises_exc_on_parse_exc( - self): - self.GoalState.side_effect = SentinelException - shim = wa_shim() - self.assertRaises(SentinelException, - shim.register_with_azure_and_report_failure, - description='TestDesc') - - def test_failure_to_send_report_ready_health_doc_bubbles_up(self): - self.AzureEndpointHttpClient.return_value.post \ - .side_effect = SentinelException - shim = wa_shim() - self.assertRaises(SentinelException, - shim.register_with_azure_and_fetch_data) - - def test_failure_to_send_report_failure_health_doc_bubbles_up(self): - self.AzureEndpointHttpClient.return_value.post \ - .side_effect = SentinelException - shim = wa_shim() - self.assertRaises(SentinelException, - shim.register_with_azure_and_report_failure, - description='TestDesc') - - -class TestGetMetadataGoalStateXMLAndReportReadyToFabric(CiTestCase): - - def setUp(self): - super(TestGetMetadataGoalStateXMLAndReportReadyToFabric, self).setUp() - patches = ExitStack() - self.addCleanup(patches.close) - - self.m_shim = patches.enter_context( - mock.patch.object(azure_helper, 'WALinuxAgentShim')) - - def test_data_from_shim_returned(self): - ret = azure_helper.get_metadata_from_fabric() - self.assertEqual( - self.m_shim.return_value.register_with_azure_and_fetch_data - .return_value, - ret) - - def test_success_calls_clean_up(self): - azure_helper.get_metadata_from_fabric() - self.assertEqual(1, self.m_shim.return_value.clean_up.call_count) - - def test_failure_in_registration_propagates_exc_and_calls_clean_up( - self): - self.m_shim.return_value.register_with_azure_and_fetch_data \ - .side_effect = SentinelException - self.assertRaises(SentinelException, - azure_helper.get_metadata_from_fabric) - self.assertEqual(1, self.m_shim.return_value.clean_up.call_count) - - def test_calls_shim_register_with_azure_and_fetch_data(self): - m_pubkey_info = mock.MagicMock() - azure_helper.get_metadata_from_fabric( - pubkey_info=m_pubkey_info, iso_dev="/dev/sr0") - self.assertEqual( - 1, - self.m_shim.return_value - .register_with_azure_and_fetch_data.call_count) - self.assertEqual( - mock.call(iso_dev="/dev/sr0", pubkey_info=m_pubkey_info), - self.m_shim.return_value - .register_with_azure_and_fetch_data.call_args) - - def test_instantiates_shim_with_kwargs(self): - m_fallback_lease_file = mock.MagicMock() - m_dhcp_options = mock.MagicMock() - azure_helper.get_metadata_from_fabric( - fallback_lease_file=m_fallback_lease_file, - dhcp_opts=m_dhcp_options) - self.assertEqual(1, self.m_shim.call_count) - self.assertEqual( - mock.call( - fallback_lease_file=m_fallback_lease_file, - dhcp_options=m_dhcp_options), - self.m_shim.call_args) - - -class TestGetMetadataGoalStateXMLAndReportFailureToFabric(CiTestCase): - - def setUp(self): - super( - TestGetMetadataGoalStateXMLAndReportFailureToFabric, self).setUp() - patches = ExitStack() - self.addCleanup(patches.close) - - self.m_shim = patches.enter_context( - mock.patch.object(azure_helper, 'WALinuxAgentShim')) - - def test_success_calls_clean_up(self): - azure_helper.report_failure_to_fabric() - self.assertEqual( - 1, - self.m_shim.return_value.clean_up.call_count) - - def test_failure_in_shim_report_failure_propagates_exc_and_calls_clean_up( - self): - self.m_shim.return_value.register_with_azure_and_report_failure \ - .side_effect = SentinelException - self.assertRaises(SentinelException, - azure_helper.report_failure_to_fabric) - self.assertEqual( - 1, - self.m_shim.return_value.clean_up.call_count) - - def test_report_failure_to_fabric_with_desc_calls_shim_report_failure( - self): - azure_helper.report_failure_to_fabric(description='TestDesc') - self.m_shim.return_value.register_with_azure_and_report_failure \ - .assert_called_once_with(description='TestDesc') - - def test_report_failure_to_fabric_with_no_desc_calls_shim_report_failure( - self): - azure_helper.report_failure_to_fabric() - # default err message description should be shown to the user - # if no description is passed in - self.m_shim.return_value.register_with_azure_and_report_failure \ - .assert_called_once_with( - description=azure_helper - .DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE) - - def test_report_failure_to_fabric_empty_desc_calls_shim_report_failure( - self): - azure_helper.report_failure_to_fabric(description='') - # default err message description should be shown to the user - # if an empty description is passed in - self.m_shim.return_value.register_with_azure_and_report_failure \ - .assert_called_once_with( - description=azure_helper - .DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE) - - def test_instantiates_shim_with_kwargs(self): - m_fallback_lease_file = mock.MagicMock() - m_dhcp_options = mock.MagicMock() - azure_helper.report_failure_to_fabric( - fallback_lease_file=m_fallback_lease_file, - dhcp_opts=m_dhcp_options) - self.m_shim.assert_called_once_with( - fallback_lease_file=m_fallback_lease_file, - dhcp_options=m_dhcp_options) - - -class TestExtractIpAddressFromNetworkd(CiTestCase): - - azure_lease = dedent("""\ - # This is private data. Do not parse. - ADDRESS=10.132.0.5 - NETMASK=255.255.255.255 - ROUTER=10.132.0.1 - SERVER_ADDRESS=169.254.169.254 - NEXT_SERVER=10.132.0.1 - MTU=1460 - T1=43200 - T2=75600 - LIFETIME=86400 - DNS=169.254.169.254 - NTP=169.254.169.254 - DOMAINNAME=c.ubuntu-foundations.internal - DOMAIN_SEARCH_LIST=c.ubuntu-foundations.internal google.internal - HOSTNAME=tribaal-test-171002-1349.c.ubuntu-foundations.internal - ROUTES=10.132.0.1/32,0.0.0.0 0.0.0.0/0,10.132.0.1 - CLIENTID=ff405663a200020000ab11332859494d7a8b4c - OPTION_245=624c3620 - """) - - def setUp(self): - super(TestExtractIpAddressFromNetworkd, self).setUp() - self.lease_d = self.tmp_dir() - - def test_no_valid_leases_is_none(self): - """No valid leases should return None.""" - self.assertIsNone( - wa_shim._networkd_get_value_from_leases(self.lease_d)) - - def test_option_245_is_found_in_single(self): - """A single valid lease with 245 option should return it.""" - populate_dir(self.lease_d, {'9': self.azure_lease}) - self.assertEqual( - '624c3620', wa_shim._networkd_get_value_from_leases(self.lease_d)) - - def test_option_245_not_found_returns_None(self): - """A valid lease, but no option 245 should return None.""" - populate_dir( - self.lease_d, - {'9': self.azure_lease.replace("OPTION_245", "OPTION_999")}) - self.assertIsNone( - wa_shim._networkd_get_value_from_leases(self.lease_d)) - - def test_multiple_returns_first(self): - """Somewhat arbitrarily return the first address when multiple. - - Most important at the moment is that this is consistent behavior - rather than changing randomly as in order of a dictionary.""" - myval = "624c3601" - populate_dir( - self.lease_d, - {'9': self.azure_lease, - '2': self.azure_lease.replace("624c3620", myval)}) - self.assertEqual( - myval, wa_shim._networkd_get_value_from_leases(self.lease_d)) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py deleted file mode 100644 index 7aa3b1d1..00000000 --- a/tests/unittests/test_datasource/test_cloudsigma.py +++ /dev/null @@ -1,137 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import copy - -from cloudinit.cs_utils import Cepko -from cloudinit import distros -from cloudinit import helpers -from cloudinit import sources -from cloudinit.sources import DataSourceCloudSigma - -from cloudinit.tests import helpers as test_helpers - -SERVER_CONTEXT = { - "cpu": 1000, - "cpus_instead_of_cores": False, - "global_context": {"some_global_key": "some_global_val"}, - "mem": 1073741824, - "meta": { - "ssh_public_key": "ssh-rsa AAAAB3NzaC1yc2E.../hQ5D5 john@doe", - "cloudinit-user-data": "#cloud-config\n\n...", - }, - "name": "test_server", - "requirements": [], - "smp": 1, - "tags": ["much server", "very performance"], - "uuid": "65b2fb23-8c03-4187-a3ba-8b7c919e8890", - "vnc_password": "9e84d6cb49e46379", - "vendor_data": { - "location": "zrh", - "cloudinit": "#cloud-config\n\n...", - } -} - -DS_PATH = 'cloudinit.sources.DataSourceCloudSigma.DataSourceCloudSigma' - - -class CepkoMock(Cepko): - def __init__(self, mocked_context): - self.result = mocked_context - - def all(self): - return self - - -class DataSourceCloudSigmaTest(test_helpers.CiTestCase): - def setUp(self): - super(DataSourceCloudSigmaTest, self).setUp() - self.paths = helpers.Paths({'run_dir': self.tmp_dir()}) - self.add_patch(DS_PATH + '.is_running_in_cloudsigma', - "m_is_container", return_value=True) - - distro_cls = distros.fetch("ubuntu") - distro = distro_cls("ubuntu", cfg={}, paths=self.paths) - self.datasource = DataSourceCloudSigma.DataSourceCloudSigma( - sys_cfg={}, distro=distro, paths=self.paths) - self.datasource.cepko = CepkoMock(SERVER_CONTEXT) - - def test_get_hostname(self): - self.datasource.get_data() - self.assertEqual("test_server", self.datasource.get_hostname()) - self.datasource.metadata['name'] = '' - self.assertEqual("65b2fb23", self.datasource.get_hostname()) - utf8_hostname = b'\xd1\x82\xd0\xb5\xd1\x81\xd1\x82'.decode('utf-8') - self.datasource.metadata['name'] = utf8_hostname - self.assertEqual("65b2fb23", self.datasource.get_hostname()) - - def test_get_public_ssh_keys(self): - self.datasource.get_data() - self.assertEqual([SERVER_CONTEXT['meta']['ssh_public_key']], - self.datasource.get_public_ssh_keys()) - - def test_get_instance_id(self): - self.datasource.get_data() - self.assertEqual(SERVER_CONTEXT['uuid'], - self.datasource.get_instance_id()) - - def test_platform(self): - """All platform-related attributes are set.""" - self.datasource.get_data() - self.assertEqual(self.datasource.cloud_name, 'cloudsigma') - self.assertEqual(self.datasource.platform_type, 'cloudsigma') - self.assertEqual(self.datasource.subplatform, 'cepko (/dev/ttyS1)') - - def test_metadata(self): - self.datasource.get_data() - self.assertEqual(self.datasource.metadata, SERVER_CONTEXT) - - def test_user_data(self): - self.datasource.get_data() - self.assertEqual(self.datasource.userdata_raw, - SERVER_CONTEXT['meta']['cloudinit-user-data']) - - def test_encoded_user_data(self): - encoded_context = copy.deepcopy(SERVER_CONTEXT) - encoded_context['meta']['base64_fields'] = 'cloudinit-user-data' - encoded_context['meta']['cloudinit-user-data'] = 'aGkgd29ybGQK' - self.datasource.cepko = CepkoMock(encoded_context) - self.datasource.get_data() - - self.assertEqual(self.datasource.userdata_raw, b'hi world\n') - - def test_vendor_data(self): - self.datasource.get_data() - self.assertEqual(self.datasource.vendordata_raw, - SERVER_CONTEXT['vendor_data']['cloudinit']) - - def test_lack_of_vendor_data(self): - stripped_context = copy.deepcopy(SERVER_CONTEXT) - del stripped_context["vendor_data"] - self.datasource.cepko = CepkoMock(stripped_context) - self.datasource.get_data() - - self.assertIsNone(self.datasource.vendordata_raw) - - def test_lack_of_cloudinit_key_in_vendor_data(self): - stripped_context = copy.deepcopy(SERVER_CONTEXT) - del stripped_context["vendor_data"]["cloudinit"] - self.datasource.cepko = CepkoMock(stripped_context) - self.datasource.get_data() - - self.assertIsNone(self.datasource.vendordata_raw) - - -class DsLoads(test_helpers.TestCase): - def test_get_datasource_list_returns_in_local(self): - deps = (sources.DEP_FILESYSTEM,) - ds_list = DataSourceCloudSigma.get_datasource_list(deps) - self.assertEqual(ds_list, - [DataSourceCloudSigma.DataSourceCloudSigma]) - - def test_list_sources_finds_ds(self): - found = sources.list_sources( - ['CloudSigma'], (sources.DEP_FILESYSTEM,), ['cloudinit.sources']) - self.assertEqual([DataSourceCloudSigma.DataSourceCloudSigma], - found) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_cloudstack.py b/tests/unittests/test_datasource/test_cloudstack.py deleted file mode 100644 index e68168f2..00000000 --- a/tests/unittests/test_datasource/test_cloudstack.py +++ /dev/null @@ -1,186 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit import helpers -from cloudinit import util -from cloudinit.sources.DataSourceCloudStack import ( - DataSourceCloudStack, get_latest_lease) - -from cloudinit.tests.helpers import CiTestCase, ExitStack, mock - -import os -import time - -MOD_PATH = 'cloudinit.sources.DataSourceCloudStack' -DS_PATH = MOD_PATH + '.DataSourceCloudStack' - - -class TestCloudStackPasswordFetching(CiTestCase): - - def setUp(self): - super(TestCloudStackPasswordFetching, self).setUp() - self.patches = ExitStack() - self.addCleanup(self.patches.close) - mod_name = MOD_PATH - self.patches.enter_context(mock.patch('{0}.ec2'.format(mod_name))) - self.patches.enter_context(mock.patch('{0}.uhelp'.format(mod_name))) - default_gw = "192.201.20.0" - get_latest_lease = mock.MagicMock(return_value=None) - self.patches.enter_context(mock.patch( - mod_name + '.get_latest_lease', get_latest_lease)) - - get_default_gw = mock.MagicMock(return_value=default_gw) - self.patches.enter_context(mock.patch( - mod_name + '.get_default_gateway', get_default_gw)) - - get_networkd_server_address = mock.MagicMock(return_value=None) - self.patches.enter_context(mock.patch( - mod_name + '.dhcp.networkd_get_option_from_leases', - get_networkd_server_address)) - self.tmp = self.tmp_dir() - - def _set_password_server_response(self, response_string): - subp = mock.MagicMock(return_value=(response_string, '')) - self.patches.enter_context( - mock.patch('cloudinit.sources.DataSourceCloudStack.subp.subp', - subp)) - return subp - - def test_empty_password_doesnt_create_config(self): - self._set_password_server_response('') - ds = DataSourceCloudStack( - {}, None, helpers.Paths({'run_dir': self.tmp})) - ds.get_data() - self.assertEqual({}, ds.get_config_obj()) - - def test_saved_password_doesnt_create_config(self): - self._set_password_server_response('saved_password') - ds = DataSourceCloudStack( - {}, None, helpers.Paths({'run_dir': self.tmp})) - ds.get_data() - self.assertEqual({}, ds.get_config_obj()) - - @mock.patch(DS_PATH + '.wait_for_metadata_service') - def test_password_sets_password(self, m_wait): - m_wait.return_value = True - password = 'SekritSquirrel' - self._set_password_server_response(password) - ds = DataSourceCloudStack( - {}, None, helpers.Paths({'run_dir': self.tmp})) - ds.get_data() - self.assertEqual(password, ds.get_config_obj()['password']) - - @mock.patch(DS_PATH + '.wait_for_metadata_service') - def test_bad_request_doesnt_stop_ds_from_working(self, m_wait): - m_wait.return_value = True - self._set_password_server_response('bad_request') - ds = DataSourceCloudStack( - {}, None, helpers.Paths({'run_dir': self.tmp})) - self.assertTrue(ds.get_data()) - - def assertRequestTypesSent(self, subp, expected_request_types): - request_types = [] - for call in subp.call_args_list: - args = call[0][0] - for arg in args: - if arg.startswith('DomU_Request'): - request_types.append(arg.split()[1]) - self.assertEqual(expected_request_types, request_types) - - @mock.patch(DS_PATH + '.wait_for_metadata_service') - def test_valid_response_means_password_marked_as_saved(self, m_wait): - m_wait.return_value = True - password = 'SekritSquirrel' - subp = self._set_password_server_response(password) - ds = DataSourceCloudStack( - {}, None, helpers.Paths({'run_dir': self.tmp})) - ds.get_data() - self.assertRequestTypesSent(subp, - ['send_my_password', 'saved_password']) - - def _check_password_not_saved_for(self, response_string): - subp = self._set_password_server_response(response_string) - ds = DataSourceCloudStack( - {}, None, helpers.Paths({'run_dir': self.tmp})) - with mock.patch(DS_PATH + '.wait_for_metadata_service') as m_wait: - m_wait.return_value = True - ds.get_data() - self.assertRequestTypesSent(subp, ['send_my_password']) - - def test_password_not_saved_if_empty(self): - self._check_password_not_saved_for('') - - def test_password_not_saved_if_already_saved(self): - self._check_password_not_saved_for('saved_password') - - def test_password_not_saved_if_bad_request(self): - self._check_password_not_saved_for('bad_request') - - -class TestGetLatestLease(CiTestCase): - - def _populate_dir_list(self, bdir, files): - """populate_dir_list([(name, data), (name, data)]) - - writes files to bdir, and updates timestamps to ensure - that their mtime increases with each file.""" - - start = int(time.time()) - for num, fname in enumerate(reversed(files)): - fpath = os.path.sep.join((bdir, fname)) - util.write_file(fpath, fname.encode()) - os.utime(fpath, (start - num, start - num)) - - def _pop_and_test(self, files, expected): - lease_d = self.tmp_dir() - self._populate_dir_list(lease_d, files) - self.assertEqual(self.tmp_path(expected, lease_d), - get_latest_lease(lease_d)) - - def test_skips_dhcpv6_files(self): - """files started with dhclient6 should be skipped.""" - expected = "dhclient.lease" - self._pop_and_test([expected, "dhclient6.lease"], expected) - - def test_selects_dhclient_dot_files(self): - """files named dhclient.lease or dhclient.leases should be used. - - Ubuntu names files dhclient.eth0.leases dhclient6.leases and - sometimes dhclient.leases.""" - self._pop_and_test(["dhclient.lease"], "dhclient.lease") - self._pop_and_test(["dhclient.leases"], "dhclient.leases") - - def test_selects_dhclient_dash_files(self): - """files named dhclient-lease or dhclient-leases should be used. - - Redhat/Centos names files with dhclient--eth0.lease (centos 7) or - dhclient-eth0.leases (centos 6). - """ - self._pop_and_test(["dhclient-eth0.lease"], "dhclient-eth0.lease") - self._pop_and_test(["dhclient--eth0.lease"], "dhclient--eth0.lease") - - def test_ignores_by_extension(self): - """only .lease or .leases file should be considered.""" - - self._pop_and_test(["dhclient.lease", "dhclient.lease.bk", - "dhclient.lease-old", "dhclient.leaselease"], - "dhclient.lease") - - def test_selects_newest_matching(self): - """If multiple files match, the newest written should be used.""" - lease_d = self.tmp_dir() - valid_1 = "dhclient.leases" - valid_2 = "dhclient.lease" - valid_1_path = self.tmp_path(valid_1, lease_d) - valid_2_path = self.tmp_path(valid_2, lease_d) - - self._populate_dir_list(lease_d, [valid_1, valid_2]) - self.assertEqual(valid_2_path, get_latest_lease(lease_d)) - - # now update mtime on valid_2 to be older than valid_1 and re-check. - mtime = int(os.path.getmtime(valid_1_path)) - 1 - os.utime(valid_2_path, (mtime, mtime)) - - self.assertEqual(valid_1_path, get_latest_lease(lease_d)) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py deleted file mode 100644 index 9089e5de..00000000 --- a/tests/unittests/test_datasource/test_common.py +++ /dev/null @@ -1,121 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit import settings -from cloudinit import sources -from cloudinit import type_utils -from cloudinit.sources import ( - DataSource, - DataSourceAliYun as AliYun, - DataSourceAltCloud as AltCloud, - DataSourceAzure as Azure, - DataSourceBigstep as Bigstep, - DataSourceCloudSigma as CloudSigma, - DataSourceCloudStack as CloudStack, - DataSourceConfigDrive as ConfigDrive, - DataSourceDigitalOcean as DigitalOcean, - DataSourceEc2 as Ec2, - DataSourceExoscale as Exoscale, - DataSourceGCE as GCE, - DataSourceHetzner as Hetzner, - DataSourceIBMCloud as IBMCloud, - DataSourceLXD as LXD, - DataSourceMAAS as MAAS, - DataSourceNoCloud as NoCloud, - DataSourceOpenNebula as OpenNebula, - DataSourceOpenStack as OpenStack, - DataSourceOracle as Oracle, - DataSourceOVF as OVF, - DataSourceRbxCloud as RbxCloud, - DataSourceScaleway as Scaleway, - DataSourceSmartOS as SmartOS, - DataSourceUpCloud as UpCloud, - DataSourceVultr as Vultr, - DataSourceVMware as VMware, -) -from cloudinit.sources import DataSourceNone as DSNone - -from cloudinit.tests import helpers as test_helpers - -DEFAULT_LOCAL = [ - Azure.DataSourceAzure, - CloudSigma.DataSourceCloudSigma, - ConfigDrive.DataSourceConfigDrive, - DigitalOcean.DataSourceDigitalOcean, - GCE.DataSourceGCELocal, - Hetzner.DataSourceHetzner, - IBMCloud.DataSourceIBMCloud, - LXD.DataSourceLXD, - NoCloud.DataSourceNoCloud, - OpenNebula.DataSourceOpenNebula, - Oracle.DataSourceOracle, - OVF.DataSourceOVF, - SmartOS.DataSourceSmartOS, - Vultr.DataSourceVultr, - Ec2.DataSourceEc2Local, - OpenStack.DataSourceOpenStackLocal, - RbxCloud.DataSourceRbxCloud, - Scaleway.DataSourceScaleway, - UpCloud.DataSourceUpCloudLocal, - VMware.DataSourceVMware, -] - -DEFAULT_NETWORK = [ - AliYun.DataSourceAliYun, - AltCloud.DataSourceAltCloud, - Bigstep.DataSourceBigstep, - CloudStack.DataSourceCloudStack, - DSNone.DataSourceNone, - Ec2.DataSourceEc2, - Exoscale.DataSourceExoscale, - GCE.DataSourceGCE, - MAAS.DataSourceMAAS, - NoCloud.DataSourceNoCloudNet, - OpenStack.DataSourceOpenStack, - OVF.DataSourceOVFNet, - UpCloud.DataSourceUpCloud, - VMware.DataSourceVMware, -] - - -class ExpectedDataSources(test_helpers.TestCase): - builtin_list = settings.CFG_BUILTIN['datasource_list'] - deps_local = [sources.DEP_FILESYSTEM] - deps_network = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK] - pkg_list = [type_utils.obj_name(sources)] - - def test_expected_default_local_sources_found(self): - found = sources.list_sources( - self.builtin_list, self.deps_local, self.pkg_list) - self.assertEqual(set(DEFAULT_LOCAL), set(found)) - - def test_expected_default_network_sources_found(self): - found = sources.list_sources( - self.builtin_list, self.deps_network, self.pkg_list) - self.assertEqual(set(DEFAULT_NETWORK), set(found)) - - def test_expected_nondefault_network_sources_found(self): - found = sources.list_sources( - ['AliYun'], self.deps_network, self.pkg_list) - self.assertEqual(set([AliYun.DataSourceAliYun]), set(found)) - - -class TestDataSourceInvariants(test_helpers.TestCase): - def test_data_sources_have_valid_network_config_sources(self): - for ds in DEFAULT_LOCAL + DEFAULT_NETWORK: - for cfg_src in ds.network_config_sources: - fail_msg = ('{} has an invalid network_config_sources entry:' - ' {}'.format(str(ds), cfg_src)) - self.assertTrue(hasattr(sources.NetworkConfigSource, cfg_src), - fail_msg) - - def test_expected_dsname_defined(self): - for ds in DEFAULT_LOCAL + DEFAULT_NETWORK: - fail_msg = ( - '{} has an invalid / missing dsname property: {}'.format( - str(ds), str(ds.dsname) - ) - ) - self.assertNotEqual(ds.dsname, DataSource.dsname, fail_msg) - self.assertIsNotNone(ds.dsname) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py deleted file mode 100644 index be13165c..00000000 --- a/tests/unittests/test_datasource/test_configdrive.py +++ /dev/null @@ -1,844 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from copy import copy, deepcopy -import json -import os - -from cloudinit import helpers -from cloudinit.net import eni -from cloudinit.net import network_state -from cloudinit import settings -from cloudinit.sources import DataSourceConfigDrive as ds -from cloudinit.sources.helpers import openstack -from cloudinit import util - -from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, populate_dir - - -PUBKEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' -EC2_META = { - 'ami-id': 'ami-00000001', - 'ami-launch-index': 0, - 'ami-manifest-path': 'FIXME', - 'block-device-mapping': { - 'ami': 'sda1', - 'ephemeral0': 'sda2', - 'root': '/dev/sda1', - 'swap': 'sda3'}, - 'hostname': 'sm-foo-test.novalocal', - 'instance-action': 'none', - 'instance-id': 'i-00000001', - 'instance-type': 'm1.tiny', - 'local-hostname': 'sm-foo-test.novalocal', - 'local-ipv4': None, - 'placement': {'availability-zone': 'nova'}, - 'public-hostname': 'sm-foo-test.novalocal', - 'public-ipv4': '', - 'public-keys': {'0': {'openssh-key': PUBKEY}}, - 'reservation-id': 'r-iru5qm4m', - 'security-groups': ['default'] -} -USER_DATA = b'#!/bin/sh\necho This is user data\n' -OSTACK_META = { - 'availability_zone': 'nova', - 'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'}, - {'content_path': '/content/0001', 'path': '/etc/bar/bar.cfg'}], - 'hostname': 'sm-foo-test.novalocal', - 'meta': {'dsmode': 'local', 'my-meta': 'my-value'}, - 'name': 'sm-foo-test', - 'public_keys': {'mykey': PUBKEY}, - 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'} - -CONTENT_0 = b'This is contents of /etc/foo.cfg\n' -CONTENT_1 = b'# this is /etc/bar/bar.cfg\n' -NETWORK_DATA = { - 'services': [ - {'type': 'dns', 'address': '199.204.44.24'}, - {'type': 'dns', 'address': '199.204.47.54'} - ], - 'links': [ - {'vif_id': '2ecc7709-b3f7-4448-9580-e1ec32d75bbd', - 'ethernet_mac_address': 'fa:16:3e:69:b0:58', - 'type': 'ovs', 'mtu': None, 'id': 'tap2ecc7709-b3'}, - {'vif_id': '2f88d109-5b57-40e6-af32-2472df09dc33', - 'ethernet_mac_address': 'fa:16:3e:d4:57:ad', - 'type': 'ovs', 'mtu': None, 'id': 'tap2f88d109-5b'}, - {'vif_id': '1a5382f8-04c5-4d75-ab98-d666c1ef52cc', - 'ethernet_mac_address': 'fa:16:3e:05:30:fe', - 'type': 'ovs', 'mtu': None, 'id': 'tap1a5382f8-04', 'name': 'nic0'} - ], - 'networks': [ - {'link': 'tap2ecc7709-b3', 'type': 'ipv4_dhcp', - 'network_id': '6d6357ac-0f70-4afa-8bd7-c274cc4ea235', - 'id': 'network0'}, - {'link': 'tap2f88d109-5b', 'type': 'ipv4_dhcp', - 'network_id': 'd227a9b3-6960-4d94-8976-ee5788b44f54', - 'id': 'network1'}, - {'link': 'tap1a5382f8-04', 'type': 'ipv4_dhcp', - 'network_id': 'dab2ba57-cae2-4311-a5ed-010b263891f5', - 'id': 'network2'} - ] -} - -NETWORK_DATA_2 = { - "services": [ - {"type": "dns", "address": "1.1.1.191"}, - {"type": "dns", "address": "1.1.1.4"}], - "networks": [ - {"network_id": "d94bbe94-7abc-48d4-9c82-4628ea26164a", "type": "ipv4", - "netmask": "255.255.255.248", "link": "eth0", - "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0", - "gateway": "2.2.2.9"}], - "ip_address": "2.2.2.10", "id": "network0-ipv4"}, - {"network_id": "ca447c83-6409-499b-aaef-6ad1ae995348", "type": "ipv4", - "netmask": "255.255.255.224", "link": "eth1", - "routes": [], "ip_address": "3.3.3.24", "id": "network1-ipv4"}], - "links": [ - {"ethernet_mac_address": "fa:16:3e:dd:50:9a", "mtu": 1500, - "type": "vif", "id": "eth0", "vif_id": "vif-foo1"}, - {"ethernet_mac_address": "fa:16:3e:a8:14:69", "mtu": 1500, - "type": "vif", "id": "eth1", "vif_id": "vif-foo2"}] -} - -# This network data ha 'tap' or null type for a link. -NETWORK_DATA_3 = { - "services": [{"type": "dns", "address": "172.16.36.11"}, - {"type": "dns", "address": "172.16.36.12"}], - "networks": [ - {"network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e", - "type": "ipv4", "netmask": "255.255.255.128", - "link": "tap77a0dc5b-72", "ip_address": "172.17.48.18", - "id": "network0", - "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0", - "gateway": "172.17.48.1"}]}, - {"network_id": "7c41450c-ba44-401a-9ab1-1604bb2da51e", - "type": "ipv6", "netmask": "ffff:ffff:ffff:ffff::", - "link": "tap77a0dc5b-72", - "ip_address": "fdb8:52d0:9d14:0:f816:3eff:fe9f:70d", - "id": "network1", - "routes": [{"netmask": "::", "network": "::", - "gateway": "fdb8:52d0:9d14::1"}]}, - {"network_id": "1f53cb0e-72d3-47c7-94b9-ff4397c5fe54", - "type": "ipv4", "netmask": "255.255.255.128", - "link": "tap7d6b7bec-93", "ip_address": "172.16.48.13", - "id": "network2", - "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0", - "gateway": "172.16.48.1"}, - {"netmask": "255.255.0.0", "network": "172.16.0.0", - "gateway": "172.16.48.1"}]}], - "links": [ - {"ethernet_mac_address": "fa:16:3e:dd:50:9a", "mtu": None, - "type": "tap", "id": "tap77a0dc5b-72", - "vif_id": "77a0dc5b-720e-41b7-bfa7-1b2ff62e0d48"}, - {"ethernet_mac_address": "fa:16:3e:a8:14:69", "mtu": None, - "type": None, "id": "tap7d6b7bec-93", - "vif_id": "7d6b7bec-93e6-4c03-869a-ddc5014892d5"} - ] -} - -BOND_MAC = "fa:16:3e:b3:72:36" -NETWORK_DATA_BOND = { - "services": [ - {"type": "dns", "address": "1.1.1.191"}, - {"type": "dns", "address": "1.1.1.4"}, - ], - "networks": [ - {"id": "network2-ipv4", "ip_address": "2.2.2.13", - "link": "vlan2", "netmask": "255.255.255.248", - "network_id": "4daf5ce8-38cf-4240-9f1a-04e86d7c6117", - "type": "ipv4", - "routes": [{"netmask": "0.0.0.0", "network": "0.0.0.0", - "gateway": "2.2.2.9"}]}, - {"id": "network3-ipv4", "ip_address": "10.0.1.5", - "link": "vlan3", "netmask": "255.255.255.248", - "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d", - "type": "ipv4", - "routes": [{"netmask": "255.255.255.255", - "network": "192.168.1.0", "gateway": "10.0.1.1"}]} - ], - "links": [ - {"ethernet_mac_address": "0c:c4:7a:34:6e:3c", - "id": "eth0", "mtu": 1500, "type": "phy"}, - {"ethernet_mac_address": "0c:c4:7a:34:6e:3d", - "id": "eth1", "mtu": 1500, "type": "phy"}, - {"bond_links": ["eth0", "eth1"], - "bond_miimon": 100, "bond_mode": "4", - "bond_xmit_hash_policy": "layer3+4", - "ethernet_mac_address": BOND_MAC, - "id": "bond0", "type": "bond"}, - {"ethernet_mac_address": "fa:16:3e:b3:72:30", - "id": "vlan2", "type": "vlan", "vlan_id": 602, - "vlan_link": "bond0", "vlan_mac_address": "fa:16:3e:b3:72:30"}, - {"ethernet_mac_address": "fa:16:3e:66:ab:a6", - "id": "vlan3", "type": "vlan", "vlan_id": 612, "vlan_link": "bond0", - "vlan_mac_address": "fa:16:3e:66:ab:a6"} - ] -} - -NETWORK_DATA_VLAN = { - "services": [{"type": "dns", "address": "1.1.1.191"}], - "networks": [ - {"id": "network1-ipv4", "ip_address": "10.0.1.5", - "link": "vlan1", "netmask": "255.255.255.248", - "network_id": "a9e2f47c-3c43-4782-94d0-e1eeef1c8c9d", - "type": "ipv4", - "routes": [{"netmask": "255.255.255.255", - "network": "192.168.1.0", "gateway": "10.0.1.1"}]} - ], - "links": [ - {"ethernet_mac_address": "fa:16:3e:69:b0:58", - "id": "eth0", "mtu": 1500, "type": "phy"}, - {"ethernet_mac_address": "fa:16:3e:b3:72:30", - "id": "vlan1", "type": "vlan", "vlan_id": 602, - "vlan_link": "eth0", "vlan_mac_address": "fa:16:3e:b3:72:30"}, - ] -} - -KNOWN_MACS = { - 'fa:16:3e:69:b0:58': 'enp0s1', - 'fa:16:3e:d4:57:ad': 'enp0s2', - 'fa:16:3e:dd:50:9a': 'foo1', - 'fa:16:3e:a8:14:69': 'foo2', - 'fa:16:3e:ed:9a:59': 'foo3', - '0c:c4:7a:34:6e:3d': 'oeth1', - '0c:c4:7a:34:6e:3c': 'oeth0', -} - -CFG_DRIVE_FILES_V2 = { - 'ec2/2009-04-04/meta-data.json': json.dumps(EC2_META), - 'ec2/2009-04-04/user-data': USER_DATA, - 'ec2/latest/meta-data.json': json.dumps(EC2_META), - 'ec2/latest/user-data': USER_DATA, - 'openstack/2012-08-10/meta_data.json': json.dumps(OSTACK_META), - 'openstack/2012-08-10/user_data': USER_DATA, - 'openstack/content/0000': CONTENT_0, - 'openstack/content/0001': CONTENT_1, - 'openstack/latest/meta_data.json': json.dumps(OSTACK_META), - 'openstack/latest/user_data': USER_DATA, - 'openstack/latest/network_data.json': json.dumps(NETWORK_DATA), - 'openstack/2015-10-15/meta_data.json': json.dumps(OSTACK_META), - 'openstack/2015-10-15/user_data': USER_DATA, - 'openstack/2015-10-15/network_data.json': json.dumps(NETWORK_DATA)} - -M_PATH = "cloudinit.sources.DataSourceConfigDrive." - - -class TestConfigDriveDataSource(CiTestCase): - - def setUp(self): - super(TestConfigDriveDataSource, self).setUp() - self.add_patch( - M_PATH + "util.find_devs_with", - "m_find_devs_with", return_value=[]) - self.tmp = self.tmp_dir() - - def test_ec2_metadata(self): - populate_dir(self.tmp, CFG_DRIVE_FILES_V2) - found = ds.read_config_drive(self.tmp) - self.assertTrue('ec2-metadata' in found) - ec2_md = found['ec2-metadata'] - self.assertEqual(EC2_META, ec2_md) - - def test_dev_os_remap(self): - populate_dir(self.tmp, CFG_DRIVE_FILES_V2) - cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, - None, - helpers.Paths({})) - found = ds.read_config_drive(self.tmp) - cfg_ds.metadata = found['metadata'] - name_tests = { - 'ami': '/dev/vda1', - 'root': '/dev/vda1', - 'ephemeral0': '/dev/vda2', - 'swap': '/dev/vda3', - } - for name, dev_name in name_tests.items(): - with ExitStack() as mocks: - provided_name = dev_name[len('/dev/'):] - provided_name = "s" + provided_name[1:] - find_mock = mocks.enter_context( - mock.patch.object(util, 'find_devs_with', - return_value=[provided_name])) - # We want os.path.exists() to return False on its first call, - # and True on its second call. We use a handy generator as - # the mock side effect for this. The mocked function returns - # what the side effect returns. - - def exists_side_effect(): - yield False - yield True - exists_mock = mocks.enter_context( - mock.patch.object(os.path, 'exists', - side_effect=exists_side_effect())) - self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) - - find_mock.assert_called_once_with(mock.ANY) - self.assertEqual(exists_mock.call_count, 2) - - def test_dev_os_map(self): - populate_dir(self.tmp, CFG_DRIVE_FILES_V2) - cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, - None, - helpers.Paths({})) - found = ds.read_config_drive(self.tmp) - os_md = found['metadata'] - cfg_ds.metadata = os_md - name_tests = { - 'ami': '/dev/vda1', - 'root': '/dev/vda1', - 'ephemeral0': '/dev/vda2', - 'swap': '/dev/vda3', - } - for name, dev_name in name_tests.items(): - with ExitStack() as mocks: - find_mock = mocks.enter_context( - mock.patch.object(util, 'find_devs_with', - return_value=[dev_name])) - exists_mock = mocks.enter_context( - mock.patch.object(os.path, 'exists', - return_value=True)) - self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) - - find_mock.assert_called_once_with(mock.ANY) - exists_mock.assert_called_once_with(mock.ANY) - - def test_dev_ec2_remap(self): - populate_dir(self.tmp, CFG_DRIVE_FILES_V2) - cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, - None, - helpers.Paths({})) - found = ds.read_config_drive(self.tmp) - ec2_md = found['ec2-metadata'] - os_md = found['metadata'] - cfg_ds.ec2_metadata = ec2_md - cfg_ds.metadata = os_md - name_tests = { - 'ami': '/dev/vda1', - 'root': '/dev/vda1', - 'ephemeral0': '/dev/vda2', - 'swap': '/dev/vda3', - None: None, - 'bob': None, - 'root2k': None, - } - for name, dev_name in name_tests.items(): - # We want os.path.exists() to return False on its first call, - # and True on its second call. We use a handy generator as - # the mock side effect for this. The mocked function returns - # what the side effect returns. - def exists_side_effect(): - yield False - yield True - with mock.patch.object(os.path, 'exists', - side_effect=exists_side_effect()): - self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) - # We don't assert the call count for os.path.exists() because - # not all of the entries in name_tests results in two calls to - # that function. Specifically, 'root2k' doesn't seem to call - # it at all. - - def test_dev_ec2_map(self): - populate_dir(self.tmp, CFG_DRIVE_FILES_V2) - cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, - None, - helpers.Paths({})) - found = ds.read_config_drive(self.tmp) - ec2_md = found['ec2-metadata'] - os_md = found['metadata'] - cfg_ds.ec2_metadata = ec2_md - cfg_ds.metadata = os_md - name_tests = { - 'ami': '/dev/sda1', - 'root': '/dev/sda1', - 'ephemeral0': '/dev/sda2', - 'swap': '/dev/sda3', - None: None, - 'bob': None, - 'root2k': None, - } - for name, dev_name in name_tests.items(): - with mock.patch.object(os.path, 'exists', return_value=True): - self.assertEqual(dev_name, cfg_ds.device_name_to_device(name)) - - def test_dir_valid(self): - """Verify a dir is read as such.""" - - populate_dir(self.tmp, CFG_DRIVE_FILES_V2) - - found = ds.read_config_drive(self.tmp) - - expected_md = copy(OSTACK_META) - expected_md['instance-id'] = expected_md['uuid'] - expected_md['local-hostname'] = expected_md['hostname'] - - self.assertEqual(USER_DATA, found['userdata']) - self.assertEqual(expected_md, found['metadata']) - self.assertEqual(NETWORK_DATA, found['networkdata']) - self.assertEqual(found['files']['/etc/foo.cfg'], CONTENT_0) - self.assertEqual(found['files']['/etc/bar/bar.cfg'], CONTENT_1) - - def test_seed_dir_valid_extra(self): - """Verify extra files do not affect datasource validity.""" - - data = copy(CFG_DRIVE_FILES_V2) - data["myfoofile.txt"] = "myfoocontent" - data["openstack/latest/random-file.txt"] = "random-content" - - populate_dir(self.tmp, data) - - found = ds.read_config_drive(self.tmp) - - expected_md = copy(OSTACK_META) - expected_md['instance-id'] = expected_md['uuid'] - expected_md['local-hostname'] = expected_md['hostname'] - - self.assertEqual(expected_md, found['metadata']) - - def test_seed_dir_bad_json_metadata(self): - """Verify that bad json in metadata raises BrokenConfigDriveDir.""" - data = copy(CFG_DRIVE_FILES_V2) - - data["openstack/2012-08-10/meta_data.json"] = "non-json garbage {}" - data["openstack/2015-10-15/meta_data.json"] = "non-json garbage {}" - data["openstack/latest/meta_data.json"] = "non-json garbage {}" - - populate_dir(self.tmp, data) - - self.assertRaises(openstack.BrokenMetadata, - ds.read_config_drive, self.tmp) - - def test_seed_dir_no_configdrive(self): - """Verify that no metadata raises NonConfigDriveDir.""" - - my_d = os.path.join(self.tmp, "non-configdrive") - data = copy(CFG_DRIVE_FILES_V2) - data["myfoofile.txt"] = "myfoocontent" - data["openstack/latest/random-file.txt"] = "random-content" - data["content/foo"] = "foocontent" - - self.assertRaises(openstack.NonReadable, - ds.read_config_drive, my_d) - - def test_seed_dir_missing(self): - """Verify that missing seed_dir raises NonConfigDriveDir.""" - my_d = os.path.join(self.tmp, "nonexistantdirectory") - self.assertRaises(openstack.NonReadable, - ds.read_config_drive, my_d) - - def test_find_candidates(self): - devs_with_answers = {} - - def my_devs_with(*args, **kwargs): - criteria = args[0] if len(args) else kwargs.pop('criteria', None) - return devs_with_answers.get(criteria, []) - - def my_is_partition(dev): - return dev[-1] in "0123456789" and not dev.startswith("sr") - - try: - orig_find_devs_with = util.find_devs_with - util.find_devs_with = my_devs_with - - orig_is_partition = util.is_partition - util.is_partition = my_is_partition - - devs_with_answers = {"TYPE=vfat": [], - "TYPE=iso9660": ["/dev/vdb"], - "LABEL=config-2": ["/dev/vdb"]} - self.assertEqual(["/dev/vdb"], ds.find_candidate_devs()) - - # add a vfat item - # zdd reverse sorts after vdb, but config-2 label is preferred - devs_with_answers['TYPE=vfat'] = ["/dev/zdd"] - self.assertEqual(["/dev/vdb", "/dev/zdd"], - ds.find_candidate_devs()) - - # verify that partitions are considered, that have correct label. - devs_with_answers = {"TYPE=vfat": ["/dev/sda1"], - "TYPE=iso9660": [], - "LABEL=config-2": ["/dev/vdb3"]} - self.assertEqual(["/dev/vdb3"], - ds.find_candidate_devs()) - - # Verify that uppercase labels are also found. - devs_with_answers = {"TYPE=vfat": [], - "TYPE=iso9660": ["/dev/vdb"], - "LABEL=CONFIG-2": ["/dev/vdb"]} - self.assertEqual(["/dev/vdb"], ds.find_candidate_devs()) - - finally: - util.find_devs_with = orig_find_devs_with - util.is_partition = orig_is_partition - - @mock.patch(M_PATH + 'on_first_boot') - def test_pubkeys_v2(self, on_first_boot): - """Verify that public-keys work in config-drive-v2.""" - myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) - self.assertEqual(myds.get_public_ssh_keys(), - [OSTACK_META['public_keys']['mykey']]) - self.assertEqual('configdrive', myds.cloud_name) - self.assertEqual('openstack', myds.platform) - self.assertEqual('seed-dir (%s/seed)' % self.tmp, myds.subplatform) - - def test_subplatform_config_drive_when_starts_with_dev(self): - """subplatform reports config-drive when source starts with /dev/.""" - cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, - None, - helpers.Paths({})) - with mock.patch(M_PATH + 'find_candidate_devs') as m_find_devs: - with mock.patch(M_PATH + 'util.mount_cb'): - with mock.patch(M_PATH + 'on_first_boot'): - m_find_devs.return_value = ['/dev/anything'] - self.assertEqual(True, cfg_ds.get_data()) - self.assertEqual('config-disk (/dev/anything)', cfg_ds.subplatform) - - -@mock.patch( - "cloudinit.net.is_openvswitch_internal_interface", - mock.Mock(return_value=False) -) -class TestNetJson(CiTestCase): - def setUp(self): - super(TestNetJson, self).setUp() - self.tmp = self.tmp_dir() - self.maxDiff = None - - @mock.patch(M_PATH + 'on_first_boot') - def test_network_data_is_found(self, on_first_boot): - """Verify that network_data is present in ds in config-drive-v2.""" - myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) - self.assertIsNotNone(myds.network_json) - - @mock.patch(M_PATH + 'on_first_boot') - def test_network_config_is_converted(self, on_first_boot): - """Verify that network_data is converted and present on ds object.""" - myds = cfg_ds_from_dir(self.tmp, files=CFG_DRIVE_FILES_V2) - network_config = openstack.convert_net_json(NETWORK_DATA, - known_macs=KNOWN_MACS) - self.assertEqual(myds.network_config, network_config) - - def test_network_config_conversion_dhcp6(self): - """Test some ipv6 input network json and check the expected - conversions.""" - in_data = { - 'links': [ - {'vif_id': '2ecc7709-b3f7-4448-9580-e1ec32d75bbd', - 'ethernet_mac_address': 'fa:16:3e:69:b0:58', - 'type': 'ovs', 'mtu': None, 'id': 'tap2ecc7709-b3'}, - {'vif_id': '2f88d109-5b57-40e6-af32-2472df09dc33', - 'ethernet_mac_address': 'fa:16:3e:d4:57:ad', - 'type': 'ovs', 'mtu': None, 'id': 'tap2f88d109-5b'}, - ], - 'networks': [ - {'link': 'tap2ecc7709-b3', 'type': 'ipv6_dhcpv6-stateless', - 'network_id': '6d6357ac-0f70-4afa-8bd7-c274cc4ea235', - 'id': 'network0'}, - {'link': 'tap2f88d109-5b', 'type': 'ipv6_dhcpv6-stateful', - 'network_id': 'd227a9b3-6960-4d94-8976-ee5788b44f54', - 'id': 'network1'}, - ] - } - out_data = { - 'version': 1, - 'config': [ - {'mac_address': 'fa:16:3e:69:b0:58', - 'mtu': None, - 'name': 'enp0s1', - 'subnets': [{'type': 'ipv6_dhcpv6-stateless'}], - 'type': 'physical'}, - {'mac_address': 'fa:16:3e:d4:57:ad', - 'mtu': None, - 'name': 'enp0s2', - 'subnets': [{'type': 'ipv6_dhcpv6-stateful'}], - 'type': 'physical', - 'accept-ra': True} - ], - } - conv_data = openstack.convert_net_json(in_data, known_macs=KNOWN_MACS) - self.assertEqual(out_data, conv_data) - - def test_network_config_conversions(self): - """Tests a bunch of input network json and checks the - expected conversions.""" - in_datas = [ - NETWORK_DATA, - { - 'services': [{'type': 'dns', 'address': '172.19.0.12'}], - 'networks': [{ - 'network_id': 'dacd568d-5be6-4786-91fe-750c374b78b4', - 'type': 'ipv4', - 'netmask': '255.255.252.0', - 'link': 'tap1a81968a-79', - 'routes': [{ - 'netmask': '0.0.0.0', - 'network': '0.0.0.0', - 'gateway': '172.19.3.254', - }], - 'ip_address': '172.19.1.34', - 'id': 'network0', - }], - 'links': [{ - 'type': 'bridge', - 'vif_id': '1a81968a-797a-400f-8a80-567f997eb93f', - 'ethernet_mac_address': 'fa:16:3e:ed:9a:59', - 'id': 'tap1a81968a-79', - 'mtu': None, - }], - }, - ] - out_datas = [ - { - 'version': 1, - 'config': [ - { - 'subnets': [{'type': 'dhcp4'}], - 'type': 'physical', - 'mac_address': 'fa:16:3e:69:b0:58', - 'name': 'enp0s1', - 'mtu': None, - }, - { - 'subnets': [{'type': 'dhcp4'}], - 'type': 'physical', - 'mac_address': 'fa:16:3e:d4:57:ad', - 'name': 'enp0s2', - 'mtu': None, - }, - { - 'subnets': [{'type': 'dhcp4'}], - 'type': 'physical', - 'mac_address': 'fa:16:3e:05:30:fe', - 'name': 'nic0', - 'mtu': None, - }, - { - 'type': 'nameserver', - 'address': '199.204.44.24', - }, - { - 'type': 'nameserver', - 'address': '199.204.47.54', - } - ], - - }, - { - 'version': 1, - 'config': [ - { - 'name': 'foo3', - 'mac_address': 'fa:16:3e:ed:9a:59', - 'mtu': None, - 'type': 'physical', - 'subnets': [ - { - 'address': '172.19.1.34', - 'netmask': '255.255.252.0', - 'type': 'static', - 'ipv4': True, - 'routes': [{ - 'gateway': '172.19.3.254', - 'netmask': '0.0.0.0', - 'network': '0.0.0.0', - }], - } - ] - }, - { - 'type': 'nameserver', - 'address': '172.19.0.12', - } - ], - }, - ] - for in_data, out_data in zip(in_datas, out_datas): - conv_data = openstack.convert_net_json(in_data, - known_macs=KNOWN_MACS) - self.assertEqual(out_data, conv_data) - - -@mock.patch( - "cloudinit.net.is_openvswitch_internal_interface", - mock.Mock(return_value=False) -) -class TestConvertNetworkData(CiTestCase): - - with_logs = True - - def setUp(self): - super(TestConvertNetworkData, self).setUp() - self.tmp = self.tmp_dir() - - def _getnames_in_config(self, ncfg): - return set([n['name'] for n in ncfg['config'] - if n['type'] == 'physical']) - - def test_conversion_fills_names(self): - ncfg = openstack.convert_net_json(NETWORK_DATA, known_macs=KNOWN_MACS) - expected = set(['nic0', 'enp0s1', 'enp0s2']) - found = self._getnames_in_config(ncfg) - self.assertEqual(found, expected) - - @mock.patch('cloudinit.net.get_interfaces_by_mac') - def test_convert_reads_system_prefers_name(self, get_interfaces_by_mac): - macs = KNOWN_MACS.copy() - macs.update({'fa:16:3e:05:30:fe': 'foonic1', - 'fa:16:3e:69:b0:58': 'ens1'}) - get_interfaces_by_mac.return_value = macs - - ncfg = openstack.convert_net_json(NETWORK_DATA) - expected = set(['nic0', 'ens1', 'enp0s2']) - found = self._getnames_in_config(ncfg) - self.assertEqual(found, expected) - - def test_convert_raises_value_error_on_missing_name(self): - macs = {'aa:aa:aa:aa:aa:00': 'ens1'} - self.assertRaises(ValueError, openstack.convert_net_json, - NETWORK_DATA, known_macs=macs) - - def test_conversion_with_route(self): - ncfg = openstack.convert_net_json(NETWORK_DATA_2, - known_macs=KNOWN_MACS) - # not the best test, but see that we get a route in the - # network config and that it gets rendered to an ENI file - routes = [] - for n in ncfg['config']: - for s in n.get('subnets', []): - routes.extend(s.get('routes', [])) - self.assertIn( - {'network': '0.0.0.0', 'netmask': '0.0.0.0', 'gateway': '2.2.2.9'}, - routes) - eni_renderer = eni.Renderer() - eni_renderer.render_network_state( - network_state.parse_net_config_data(ncfg), target=self.tmp) - with open(os.path.join(self.tmp, "etc", - "network", "interfaces"), 'r') as f: - eni_rendering = f.read() - self.assertIn("route add default gw 2.2.2.9", eni_rendering) - - def test_conversion_with_tap(self): - ncfg = openstack.convert_net_json(NETWORK_DATA_3, - known_macs=KNOWN_MACS) - physicals = set() - for i in ncfg['config']: - if i.get('type') == "physical": - physicals.add(i['name']) - self.assertEqual(physicals, set(('foo1', 'foo2'))) - - def test_bond_conversion(self): - # light testing of bond conversion and eni rendering of bond - ncfg = openstack.convert_net_json(NETWORK_DATA_BOND, - known_macs=KNOWN_MACS) - eni_renderer = eni.Renderer() - - eni_renderer.render_network_state( - network_state.parse_net_config_data(ncfg), target=self.tmp) - with open(os.path.join(self.tmp, "etc", - "network", "interfaces"), 'r') as f: - eni_rendering = f.read() - - # Verify there are expected interfaces in the net config. - interfaces = sorted( - [i['name'] for i in ncfg['config'] - if i['type'] in ('vlan', 'bond', 'physical')]) - self.assertEqual( - sorted(["oeth0", "oeth1", "bond0", "bond0.602", "bond0.612"]), - interfaces) - - words = eni_rendering.split() - # 'eth0' and 'eth1' are the ids. because their mac adresses - # map to other names, we should not see them in the ENI - self.assertNotIn('eth0', words) - self.assertNotIn('eth1', words) - - # oeth0 and oeth1 are the interface names for eni. - # bond0 will be generated for the bond. Each should be auto. - self.assertIn("auto oeth0", eni_rendering) - self.assertIn("auto oeth1", eni_rendering) - self.assertIn("auto bond0", eni_rendering) - # The bond should have the given mac address - pos = eni_rendering.find("auto bond0") - self.assertIn(BOND_MAC, eni_rendering[pos:]) - - def test_vlan(self): - # light testing of vlan config conversion and eni rendering - ncfg = openstack.convert_net_json(NETWORK_DATA_VLAN, - known_macs=KNOWN_MACS) - eni_renderer = eni.Renderer() - eni_renderer.render_network_state( - network_state.parse_net_config_data(ncfg), target=self.tmp) - with open(os.path.join(self.tmp, "etc", - "network", "interfaces"), 'r') as f: - eni_rendering = f.read() - - self.assertIn("iface enp0s1", eni_rendering) - self.assertIn("address 10.0.1.5", eni_rendering) - self.assertIn("auto enp0s1.602", eni_rendering) - - def test_mac_addrs_can_be_upper_case(self): - # input mac addresses on rackspace may be upper case - my_netdata = deepcopy(NETWORK_DATA) - for link in my_netdata['links']: - link['ethernet_mac_address'] = link['ethernet_mac_address'].upper() - - ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS) - config_name2mac = {} - for n in ncfg['config']: - if n['type'] == 'physical': - config_name2mac[n['name']] = n['mac_address'] - - expected = {'nic0': 'fa:16:3e:05:30:fe', 'enp0s1': 'fa:16:3e:69:b0:58', - 'enp0s2': 'fa:16:3e:d4:57:ad'} - self.assertEqual(expected, config_name2mac) - - def test_unknown_device_types_accepted(self): - # If we don't recognise a link, we should treat it as physical for a - # best-effort boot - my_netdata = deepcopy(NETWORK_DATA) - my_netdata['links'][0]['type'] = 'my-special-link-type' - - ncfg = openstack.convert_net_json(my_netdata, known_macs=KNOWN_MACS) - config_name2mac = {} - for n in ncfg['config']: - if n['type'] == 'physical': - config_name2mac[n['name']] = n['mac_address'] - - expected = {'nic0': 'fa:16:3e:05:30:fe', 'enp0s1': 'fa:16:3e:69:b0:58', - 'enp0s2': 'fa:16:3e:d4:57:ad'} - self.assertEqual(expected, config_name2mac) - - # We should, however, warn the user that we don't recognise the type - self.assertIn('Unknown network_data link type (my-special-link-type)', - self.logs.getvalue()) - - -def cfg_ds_from_dir(base_d, files=None): - run = os.path.join(base_d, "run") - os.mkdir(run) - cfg_ds = ds.DataSourceConfigDrive( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': run})) - cfg_ds.seed_dir = os.path.join(base_d, "seed") - if files: - populate_dir(cfg_ds.seed_dir, files) - cfg_ds.known_macs = KNOWN_MACS.copy() - if not cfg_ds.get_data(): - raise RuntimeError("Data source did not extract itself from" - " seed directory %s" % cfg_ds.seed_dir) - return cfg_ds - - -def populate_ds_from_read_config(cfg_ds, source, results): - """Patch the DataSourceConfigDrive from the results of - read_config_drive_dir hopefully in line with what it would have - if cfg_ds.get_data had been successfully called""" - cfg_ds.source = source - cfg_ds.metadata = results.get('metadata') - cfg_ds.ec2_metadata = results.get('ec2-metadata') - cfg_ds.userdata_raw = results.get('userdata') - cfg_ds.version = results.get('version') - cfg_ds.network_json = results.get('networkdata') - cfg_ds._network_config = openstack.convert_net_json( - cfg_ds.network_json, known_macs=KNOWN_MACS) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/test_datasource/test_digitalocean.py deleted file mode 100644 index 3127014b..00000000 --- a/tests/unittests/test_datasource/test_digitalocean.py +++ /dev/null @@ -1,372 +0,0 @@ -# Copyright (C) 2014 Neal Shrader -# -# Author: Neal Shrader -# Author: Ben Howard -# Author: Scott Moser -# -# This file is part of cloud-init. See LICENSE file for license information. - -import json - -from cloudinit import helpers -from cloudinit import settings -from cloudinit.sources import DataSourceDigitalOcean -from cloudinit.sources.helpers import digitalocean - -from cloudinit.tests.helpers import mock, CiTestCase - -DO_MULTIPLE_KEYS = ["ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@do.co", - "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@do.co"] -DO_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@do.co" - -# the following JSON was taken from droplet (that's why its a string) -DO_META = json.loads(""" -{ - "droplet_id": "22532410", - "hostname": "utl-96268", - "vendor_data": "vendordata goes here", - "user_data": "userdata goes here", - "public_keys": "", - "auth_key": "authorization_key", - "region": "nyc3", - "interfaces": { - "private": [ - { - "ipv4": { - "ip_address": "10.132.6.205", - "netmask": "255.255.0.0", - "gateway": "10.132.0.1" - }, - "mac": "04:01:57:d1:9e:02", - "type": "private" - } - ], - "public": [ - { - "ipv4": { - "ip_address": "192.0.0.20", - "netmask": "255.255.255.0", - "gateway": "104.236.0.1" - }, - "ipv6": { - "ip_address": "2604:A880:0800:0000:1000:0000:0000:0000", - "cidr": 64, - "gateway": "2604:A880:0800:0000:0000:0000:0000:0001" - }, - "anchor_ipv4": { - "ip_address": "10.0.0.5", - "netmask": "255.255.0.0", - "gateway": "10.0.0.1" - }, - "mac": "04:01:57:d1:9e:01", - "type": "public" - } - ] - }, - "floating_ip": { - "ipv4": { - "active": false - } - }, - "dns": { - "nameservers": [ - "2001:4860:4860::8844", - "2001:4860:4860::8888", - "8.8.8.8" - ] - } -} -""") - -# This has no private interface -DO_META_2 = { - "droplet_id": 27223699, - "hostname": "smtest1", - "vendor_data": "\n".join([ - ('"Content-Type: multipart/mixed; ' - 'boundary=\"===============8645434374073493512==\"'), - 'MIME-Version: 1.0', - '', - '--===============8645434374073493512==', - 'MIME-Version: 1.0' - 'Content-Type: text/cloud-config; charset="us-ascii"' - 'Content-Transfer-Encoding: 7bit' - 'Content-Disposition: attachment; filename="cloud-config"' - '', - '#cloud-config', - 'disable_root: false', - 'manage_etc_hosts: true', - '', - '', - '--===============8645434374073493512==' - ]), - "public_keys": [ - "ssh-rsa AAAAB3NzaN...N3NtHw== smoser@brickies" - ], - "auth_key": "88888888888888888888888888888888", - "region": "nyc3", - "interfaces": { - "public": [{ - "ipv4": { - "ip_address": "45.55.249.133", - "netmask": "255.255.192.0", - "gateway": "45.55.192.1" - }, - "anchor_ipv4": { - "ip_address": "10.17.0.5", - "netmask": "255.255.0.0", - "gateway": "10.17.0.1" - }, - "mac": "ae:cc:08:7c:88:00", - "type": "public" - }] - }, - "floating_ip": {"ipv4": {"active": True, "ip_address": "138.197.59.92"}}, - "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]}, - "tags": None, -} - -DO_META['public_keys'] = DO_SINGLE_KEY - -MD_URL = 'http://169.254.169.254/metadata/v1.json' - - -def _mock_dmi(): - return (True, DO_META.get('id')) - - -class TestDataSourceDigitalOcean(CiTestCase): - """ - Test reading the meta-data - """ - def setUp(self): - super(TestDataSourceDigitalOcean, self).setUp() - self.tmp = self.tmp_dir() - - def get_ds(self, get_sysinfo=_mock_dmi): - ds = DataSourceDigitalOcean.DataSourceDigitalOcean( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) - ds.use_ip4LL = False - if get_sysinfo is not None: - ds._get_sysinfo = get_sysinfo - return ds - - @mock.patch('cloudinit.sources.helpers.digitalocean.read_sysinfo') - def test_returns_false_not_on_docean(self, m_read_sysinfo): - m_read_sysinfo.return_value = (False, None) - ds = self.get_ds(get_sysinfo=None) - self.assertEqual(False, ds.get_data()) - self.assertTrue(m_read_sysinfo.called) - - @mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata') - def test_metadata(self, mock_readmd): - mock_readmd.return_value = DO_META.copy() - - ds = self.get_ds() - ret = ds.get_data() - self.assertTrue(ret) - - self.assertTrue(mock_readmd.called) - - self.assertEqual(DO_META.get('user_data'), ds.get_userdata_raw()) - self.assertEqual(DO_META.get('vendor_data'), ds.get_vendordata_raw()) - self.assertEqual(DO_META.get('region'), ds.availability_zone) - self.assertEqual(DO_META.get('droplet_id'), ds.get_instance_id()) - self.assertEqual(DO_META.get('hostname'), ds.get_hostname()) - - # Single key - self.assertEqual([DO_META.get('public_keys')], - ds.get_public_ssh_keys()) - - self.assertIsInstance(ds.get_public_ssh_keys(), list) - - @mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata') - def test_multiple_ssh_keys(self, mock_readmd): - metadata = DO_META.copy() - metadata['public_keys'] = DO_MULTIPLE_KEYS - mock_readmd.return_value = metadata.copy() - - ds = self.get_ds() - ret = ds.get_data() - self.assertTrue(ret) - - self.assertTrue(mock_readmd.called) - - # Multiple keys - self.assertEqual(metadata['public_keys'], ds.get_public_ssh_keys()) - self.assertIsInstance(ds.get_public_ssh_keys(), list) - - -class TestNetworkConvert(CiTestCase): - - def _get_networking(self): - self.m_get_by_mac.return_value = { - '04:01:57:d1:9e:01': 'ens1', - '04:01:57:d1:9e:02': 'ens2', - 'b8:ae:ed:75:5f:9a': 'enp0s25', - 'ae:cc:08:7c:88:00': 'meta2p1'} - netcfg = digitalocean.convert_network_configuration( - DO_META['interfaces'], DO_META['dns']['nameservers']) - self.assertIn('config', netcfg) - return netcfg - - def setUp(self): - super(TestNetworkConvert, self).setUp() - self.add_patch('cloudinit.net.get_interfaces_by_mac', 'm_get_by_mac') - - def test_networking_defined(self): - netcfg = self._get_networking() - self.assertIsNotNone(netcfg) - dns_defined = False - - for part in netcfg.get('config'): - n_type = part.get('type') - print("testing part ", n_type, "\n", json.dumps(part, indent=3)) - - if n_type == 'nameserver': - n_address = part.get('address') - self.assertIsNotNone(n_address) - self.assertEqual(len(n_address), 3) - - dns_resolvers = DO_META["dns"]["nameservers"] - for x in n_address: - self.assertIn(x, dns_resolvers) - dns_defined = True - - else: - n_subnets = part.get('type') - n_name = part.get('name') - n_mac = part.get('mac_address') - - self.assertIsNotNone(n_type) - self.assertIsNotNone(n_subnets) - self.assertIsNotNone(n_name) - self.assertIsNotNone(n_mac) - - self.assertTrue(dns_defined) - - def _get_nic_definition(self, int_type, expected_name): - """helper function to return if_type (i.e. public) and the expected - name used by cloud-init (i.e eth0)""" - netcfg = self._get_networking() - meta_def = (DO_META.get('interfaces')).get(int_type)[0] - - self.assertEqual(int_type, meta_def.get('type')) - - for nic_def in netcfg.get('config'): - print(nic_def) - if nic_def.get('name') == expected_name: - return nic_def, meta_def - - def _get_match_subn(self, subnets, ip_addr): - """get the matching subnet definition based on ip address""" - for subn in subnets: - address = subn.get('address') - self.assertIsNotNone(address) - - # equals won't work because of ipv6 addressing being in - # cidr notation, i.e fe00::1/64 - if ip_addr in address: - print(json.dumps(subn, indent=3)) - return subn - - def test_correct_gateways_defined(self): - """test to make sure the eth0 ipv4 and ipv6 gateways are defined""" - netcfg = self._get_networking() - gateways = [] - for nic_def in netcfg.get('config'): - if nic_def.get('type') != 'physical': - continue - for subn in nic_def.get('subnets'): - if 'gateway' in subn: - gateways.append(subn.get('gateway')) - - # we should have two gateways, one ipv4 and ipv6 - self.assertEqual(len(gateways), 2) - - # make that the ipv6 gateway is there - (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') - ipv4_def = meta_def.get('ipv4') - self.assertIn(ipv4_def.get('gateway'), gateways) - - # make sure the the ipv6 gateway is there - ipv6_def = meta_def.get('ipv6') - self.assertIn(ipv6_def.get('gateway'), gateways) - - def test_public_interface_defined(self): - """test that the public interface is defined as eth0""" - (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') - self.assertEqual('eth0', nic_def.get('name')) - self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address')) - self.assertEqual('physical', nic_def.get('type')) - - def test_private_interface_defined(self): - """test that the private interface is defined as eth1""" - (nic_def, meta_def) = self._get_nic_definition('private', 'eth1') - self.assertEqual('eth1', nic_def.get('name')) - self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address')) - self.assertEqual('physical', nic_def.get('type')) - - def test_public_interface_ipv6(self): - """test public ipv6 addressing""" - (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') - ipv6_def = meta_def.get('ipv6') - self.assertIsNotNone(ipv6_def) - - subn_def = self._get_match_subn(nic_def.get('subnets'), - ipv6_def.get('ip_address')) - - cidr_notated_address = "{0}/{1}".format(ipv6_def.get('ip_address'), - ipv6_def.get('cidr')) - - self.assertEqual(cidr_notated_address, subn_def.get('address')) - self.assertEqual(ipv6_def.get('gateway'), subn_def.get('gateway')) - - def test_public_interface_ipv4(self): - """test public ipv4 addressing""" - (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') - ipv4_def = meta_def.get('ipv4') - self.assertIsNotNone(ipv4_def) - - subn_def = self._get_match_subn(nic_def.get('subnets'), - ipv4_def.get('ip_address')) - - self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask')) - self.assertEqual(ipv4_def.get('gateway'), subn_def.get('gateway')) - - def test_public_interface_anchor_ipv4(self): - """test public ipv4 addressing""" - (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') - ipv4_def = meta_def.get('anchor_ipv4') - self.assertIsNotNone(ipv4_def) - - subn_def = self._get_match_subn(nic_def.get('subnets'), - ipv4_def.get('ip_address')) - - self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask')) - self.assertNotIn('gateway', subn_def) - - @mock.patch('cloudinit.net.get_interfaces_by_mac') - def test_convert_without_private(self, m_get_by_mac): - m_get_by_mac.return_value = { - 'b8:ae:ed:75:5f:9a': 'enp0s25', - 'ae:cc:08:7c:88:00': 'meta2p1'} - netcfg = digitalocean.convert_network_configuration( - DO_META_2['interfaces'], DO_META_2['dns']['nameservers']) - - # print(netcfg) - byname = {} - for i in netcfg['config']: - if 'name' in i: - if i['name'] in byname: - raise ValueError("name '%s' in config twice: %s" % - (i['name'], netcfg)) - byname[i['name']] = i - self.assertTrue('eth0' in byname) - self.assertTrue('subnets' in byname['eth0']) - eth0 = byname['eth0'] - self.assertEqual( - sorted(['45.55.249.133', '10.17.0.5']), - sorted([i['address'] for i in eth0['subnets']])) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py deleted file mode 100644 index a93f2195..00000000 --- a/tests/unittests/test_datasource/test_ec2.py +++ /dev/null @@ -1,978 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import copy -import httpretty -import json -import requests -from unittest import mock - -from cloudinit import helpers -from cloudinit.sources import DataSourceEc2 as ec2 -from cloudinit.tests import helpers as test_helpers - - -DYNAMIC_METADATA = { - "instance-identity": { - "document": json.dumps({ - "devpayProductCodes": None, - "marketplaceProductCodes": ["1abc2defghijklm3nopqrs4tu"], - "availabilityZone": "us-west-2b", - "privateIp": "10.158.112.84", - "version": "2017-09-30", - "instanceId": "my-identity-id", - "billingProducts": None, - "instanceType": "t2.micro", - "accountId": "123456789012", - "imageId": "ami-5fb8c835", - "pendingTime": "2016-11-19T16:32:11Z", - "architecture": "x86_64", - "kernelId": None, - "ramdiskId": None, - "region": "us-west-2" - }) - } -} - - -# collected from api version 2016-09-02/ with -# python3 -c 'import json -# from cloudinit.ec2_utils import get_instance_metadata as gm -# print(json.dumps(gm("2016-09-02"), indent=1, sort_keys=True))' -# Note that the MAC addresses have been modified to sort in the opposite order -# to the device-number attribute, to test LP: #1876312 -DEFAULT_METADATA = { - "ami-id": "ami-8b92b4ee", - "ami-launch-index": "0", - "ami-manifest-path": "(unknown)", - "block-device-mapping": {"ami": "/dev/sda1", "root": "/dev/sda1"}, - "hostname": "ip-172-31-31-158.us-east-2.compute.internal", - "instance-action": "none", - "instance-id": "i-0a33f80f09c96477f", - "instance-type": "t2.small", - "local-hostname": "ip-172-3-3-15.us-east-2.compute.internal", - "local-ipv4": "172.3.3.15", - "mac": "06:17:04:d7:26:09", - "metrics": {"vhostmd": ""}, - "network": { - "interfaces": { - "macs": { - "06:17:04:d7:26:09": { - "device-number": "0", - "interface-id": "eni-e44ef49e", - "ipv4-associations": {"13.59.77.202": "172.3.3.15"}, - "ipv6s": "2600:1f16:aeb:b20b:9d87:a4af:5cc9:73dc", - "local-hostname": ("ip-172-3-3-15.us-east-2." - "compute.internal"), - "local-ipv4s": "172.3.3.15", - "mac": "06:17:04:d7:26:09", - "owner-id": "950047163771", - "public-hostname": ("ec2-13-59-77-202.us-east-2." - "compute.amazonaws.com"), - "public-ipv4s": "13.59.77.202", - "security-group-ids": "sg-5a61d333", - "security-groups": "wide-open", - "subnet-id": "subnet-20b8565b", - "subnet-ipv4-cidr-block": "172.31.16.0/20", - "subnet-ipv6-cidr-blocks": "2600:1f16:aeb:b20b::/64", - "vpc-id": "vpc-87e72bee", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16", - "vpc-ipv6-cidr-blocks": "2600:1f16:aeb:b200::/56" - }, - "06:17:04:d7:26:08": { - "device-number": "1", # Only IPv4 local config - "interface-id": "eni-e44ef49f", - "ipv4-associations": {"": "172.3.3.16"}, - "ipv6s": "", # No IPv6 config - "local-hostname": ("ip-172-3-3-16.us-east-2." - "compute.internal"), - "local-ipv4s": "172.3.3.16", - "mac": "06:17:04:d7:26:08", - "owner-id": "950047163771", - "public-hostname": ("ec2-172-3-3-16.us-east-2." - "compute.amazonaws.com"), - "public-ipv4s": "", # No public ipv4 config - "security-group-ids": "sg-5a61d333", - "security-groups": "wide-open", - "subnet-id": "subnet-20b8565b", - "subnet-ipv4-cidr-block": "172.31.16.0/20", - "subnet-ipv6-cidr-blocks": "", - "vpc-id": "vpc-87e72bee", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16", - "vpc-ipv6-cidr-blocks": "" - } - } - } - }, - "placement": {"availability-zone": "us-east-2b"}, - "profile": "default-hvm", - "public-hostname": "ec2-13-59-77-202.us-east-2.compute.amazonaws.com", - "public-ipv4": "13.59.77.202", - "public-keys": {"brickies": ["ssh-rsa AAAAB3Nz....w== brickies"]}, - "reservation-id": "r-01efbc9996bac1bd6", - "security-groups": "my-wide-open", - "services": {"domain": "amazonaws.com", "partition": "aws"}, -} - -# collected from api version 2018-09-24/ with -# python3 -c 'import json -# from cloudinit.ec2_utils import get_instance_metadata as gm -# print(json.dumps(gm("2018-09-24"), indent=1, sort_keys=True))' - -NIC1_MD_IPV4_IPV6_MULTI_IP = { - "device-number": "0", - "interface-id": "eni-0d6335689899ce9cc", - "ipv4-associations": { - "18.218.219.181": "172.31.44.13" - }, - "ipv6s": [ - "2600:1f16:292:100:c187:593c:4349:136", - "2600:1f16:292:100:f153:12a3:c37c:11f9", - "2600:1f16:292:100:f152:2222:3333:4444" - ], - "local-hostname": ("ip-172-31-44-13.us-east-2." - "compute.internal"), - "local-ipv4s": [ - "172.31.44.13", - "172.31.45.70" - ], - "mac": "0a:07:84:3d:6e:38", - "owner-id": "329910648901", - "public-hostname": ("ec2-18-218-219-181.us-east-2." - "compute.amazonaws.com"), - "public-ipv4s": "18.218.219.181", - "security-group-ids": "sg-0c387755222ba8d2e", - "security-groups": "launch-wizard-4", - "subnet-id": "subnet-9d7ba0d1", - "subnet-ipv4-cidr-block": "172.31.32.0/20", - "subnet_ipv6_cidr_blocks": "2600:1f16:292:100::/64", - "vpc-id": "vpc-a07f62c8", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16", - "vpc_ipv6_cidr_blocks": "2600:1f16:292:100::/56" -} - -NIC2_MD = { - "device-number": "1", - "interface-id": "eni-043cdce36ded5e79f", - "local-hostname": "ip-172-31-47-221.us-east-2.compute.internal", - "local-ipv4s": "172.31.47.221", - "mac": "0a:75:69:92:e2:16", - "owner-id": "329910648901", - "security-group-ids": "sg-0d68fef37d8cc9b77", - "security-groups": "launch-wizard-17", - "subnet-id": "subnet-9d7ba0d1", - "subnet-ipv4-cidr-block": "172.31.32.0/20", - "vpc-id": "vpc-a07f62c8", - "vpc-ipv4-cidr-block": "172.31.0.0/16", - "vpc-ipv4-cidr-blocks": "172.31.0.0/16" -} - -SECONDARY_IP_METADATA_2018_09_24 = { - "ami-id": "ami-0986c2ac728528ac2", - "ami-launch-index": "0", - "ami-manifest-path": "(unknown)", - "block-device-mapping": { - "ami": "/dev/sda1", - "root": "/dev/sda1" - }, - "events": { - "maintenance": { - "history": "[]", - "scheduled": "[]" - } - }, - "hostname": "ip-172-31-44-13.us-east-2.compute.internal", - "identity-credentials": { - "ec2": { - "info": { - "AccountId": "329910648901", - "Code": "Success", - "LastUpdated": "2019-07-06T14:22:56Z" - } - } - }, - "instance-action": "none", - "instance-id": "i-069e01e8cc43732f8", - "instance-type": "t2.micro", - "local-hostname": "ip-172-31-44-13.us-east-2.compute.internal", - "local-ipv4": "172.31.44.13", - "mac": "0a:07:84:3d:6e:38", - "metrics": { - "vhostmd": "" - }, - "network": { - "interfaces": { - "macs": { - "0a:07:84:3d:6e:38": NIC1_MD_IPV4_IPV6_MULTI_IP, - } - } - }, - "placement": { - "availability-zone": "us-east-2c" - }, - "profile": "default-hvm", - "public-hostname": ( - "ec2-18-218-219-181.us-east-2.compute.amazonaws.com"), - "public-ipv4": "18.218.219.181", - "public-keys": { - "yourkeyname,e": [ - "ssh-rsa AAAAW...DZ yourkeyname" - ] - }, - "reservation-id": "r-09b4917135cdd33be", - "security-groups": "launch-wizard-4", - "services": { - "domain": "amazonaws.com", - "partition": "aws" - } -} - -M_PATH_NET = 'cloudinit.sources.DataSourceEc2.net.' - - -def _register_ssh_keys(rfunc, base_url, keys_data): - """handle ssh key inconsistencies. - - public-keys in the ec2 metadata is inconsistently formated compared - to other entries. - Given keys_data of {name1: pubkey1, name2: pubkey2} - - This registers the following urls: - base_url 0={name1}\n1={name2} # (for each name) - base_url/ 0={name1}\n1={name2} # (for each name) - base_url/0 openssh-key - base_url/0/ openssh-key - base_url/0/openssh-key {pubkey1} - base_url/0/openssh-key/ {pubkey1} - ... - """ - - base_url = base_url.rstrip("/") - odd_index = '\n'.join( - ["{0}={1}".format(n, name) - for n, name in enumerate(sorted(keys_data))]) - - rfunc(base_url, odd_index) - rfunc(base_url + "/", odd_index) - - for n, name in enumerate(sorted(keys_data)): - val = keys_data[name] - if isinstance(val, list): - val = '\n'.join(val) - burl = base_url + "/%s" % n - rfunc(burl, "openssh-key") - rfunc(burl + "/", "openssh-key") - rfunc(burl + "/%s/openssh-key" % name, val) - rfunc(burl + "/%s/openssh-key/" % name, val) - - -def register_mock_metaserver(base_url, data): - """Register with httpretty a ec2 metadata like service serving 'data'. - - If given a dictionary, it will populate urls under base_url for - that dictionary. For example, input of - {"instance-id": "i-abc", "mac": "00:16:3e:00:00:00"} - populates - base_url with 'instance-id\nmac' - base_url/ with 'instance-id\nmac' - base_url/instance-id with i-abc - base_url/mac with 00:16:3e:00:00:00 - In the index, references to lists or dictionaries have a trailing /. - """ - def register_helper(register, base_url, body): - if not isinstance(base_url, str): - register(base_url, body) - return - base_url = base_url.rstrip("/") - if isinstance(body, str): - register(base_url, body) - elif isinstance(body, list): - register(base_url, '\n'.join(body) + '\n') - register(base_url + '/', '\n'.join(body) + '\n') - elif isinstance(body, dict): - vals = [] - for k, v in body.items(): - if k == 'public-keys': - _register_ssh_keys( - register, base_url + '/public-keys/', v) - continue - suffix = k.rstrip("/") - if not isinstance(v, (str, list)): - suffix += "/" - vals.append(suffix) - url = base_url + '/' + suffix - register_helper(register, url, v) - register(base_url, '\n'.join(vals) + '\n') - register(base_url + '/', '\n'.join(vals) + '\n') - elif body is None: - register(base_url, 'not found', status=404) - - def myreg(*argc, **kwargs): - url = argc[0] - method = httpretty.PUT if ec2.API_TOKEN_ROUTE in url else httpretty.GET - return httpretty.register_uri(method, *argc, **kwargs) - - register_helper(myreg, base_url, data) - - -class TestEc2(test_helpers.HttprettyTestCase): - with_logs = True - maxDiff = None - - valid_platform_data = { - 'uuid': 'ec212f79-87d1-2f1d-588f-d86dc0fd5412', - 'uuid_source': 'dmi', - 'serial': 'ec212f79-87d1-2f1d-588f-d86dc0fd5412', - } - - def setUp(self): - super(TestEc2, self).setUp() - self.datasource = ec2.DataSourceEc2 - self.metadata_addr = self.datasource.metadata_urls[0] - self.tmp = self.tmp_dir() - - def data_url(self, version, data_item='meta-data'): - """Return a metadata url based on the version provided.""" - return '/'.join([self.metadata_addr, version, data_item]) - - def _patch_add_cleanup(self, mpath, *args, **kwargs): - p = mock.patch(mpath, *args, **kwargs) - p.start() - self.addCleanup(p.stop) - - def _setup_ds(self, sys_cfg, platform_data, md, md_version=None): - self.uris = [] - distro = {} - paths = helpers.Paths({'run_dir': self.tmp}) - if sys_cfg is None: - sys_cfg = {} - ds = self.datasource(sys_cfg=sys_cfg, distro=distro, paths=paths) - if not md_version: - md_version = ds.min_metadata_version - if platform_data is not None: - self._patch_add_cleanup( - "cloudinit.sources.DataSourceEc2._collect_platform_data", - return_value=platform_data) - - if md: - all_versions = ( - [ds.min_metadata_version] + ds.extended_metadata_versions) - token_url = self.data_url('latest', data_item='api/token') - register_mock_metaserver(token_url, 'API-TOKEN') - for version in all_versions: - metadata_url = self.data_url(version) + '/' - if version == md_version: - # Register all metadata for desired version - register_mock_metaserver( - metadata_url, md.get('md', DEFAULT_METADATA)) - userdata_url = self.data_url( - version, data_item='user-data') - register_mock_metaserver(userdata_url, md.get('ud', '')) - identity_url = self.data_url( - version, data_item='dynamic/instance-identity') - register_mock_metaserver( - identity_url, md.get('id', DYNAMIC_METADATA)) - else: - instance_id_url = metadata_url + 'instance-id' - if version == ds.min_metadata_version: - # Add min_metadata_version service availability check - register_mock_metaserver( - instance_id_url, DEFAULT_METADATA['instance-id']) - else: - # Register 404s for all unrequested extended versions - register_mock_metaserver(instance_id_url, None) - return ds - - def test_network_config_property_returns_version_2_network_data(self): - """network_config property returns network version 2 for metadata""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, - md={'md': DEFAULT_METADATA}) - find_fallback_path = M_PATH_NET + 'find_fallback_nic' - with mock.patch(find_fallback_path) as m_find_fallback: - m_find_fallback.return_value = 'eth9' - ds.get_data() - - mac1 = '06:17:04:d7:26:09' # Defined in DEFAULT_METADATA - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': '06:17:04:d7:26:09'}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp6': True}}} - patch_path = M_PATH_NET + 'get_interfaces_by_mac' - get_interface_mac_path = M_PATH_NET + 'get_interface_mac' - with mock.patch(patch_path) as m_get_interfaces_by_mac: - with mock.patch(find_fallback_path) as m_find_fallback: - with mock.patch(get_interface_mac_path) as m_get_mac: - m_get_interfaces_by_mac.return_value = {mac1: 'eth9'} - m_find_fallback.return_value = 'eth9' - m_get_mac.return_value = mac1 - self.assertEqual(expected, ds.network_config) - - def test_network_config_property_set_dhcp4(self): - """network_config property configures dhcp4 on nics with local-ipv4s. - - Only one device is configured based on get_interfaces_by_mac even when - multiple MACs exist in metadata. - """ - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, - md={'md': DEFAULT_METADATA}) - find_fallback_path = M_PATH_NET + 'find_fallback_nic' - with mock.patch(find_fallback_path) as m_find_fallback: - m_find_fallback.return_value = 'eth9' - ds.get_data() - - mac1 = '06:17:04:d7:26:08' # IPv4 only in DEFAULT_METADATA - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': mac1.lower()}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp6': False}}} - patch_path = M_PATH_NET + 'get_interfaces_by_mac' - get_interface_mac_path = M_PATH_NET + 'get_interface_mac' - with mock.patch(patch_path) as m_get_interfaces_by_mac: - with mock.patch(find_fallback_path) as m_find_fallback: - with mock.patch(get_interface_mac_path) as m_get_mac: - m_get_interfaces_by_mac.return_value = {mac1: 'eth9'} - m_find_fallback.return_value = 'eth9' - m_get_mac.return_value = mac1 - self.assertEqual(expected, ds.network_config) - - def test_network_config_property_secondary_private_ips(self): - """network_config property configures any secondary ipv4 addresses. - - Only one device is configured based on get_interfaces_by_mac even when - multiple MACs exist in metadata. - """ - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, - md={'md': SECONDARY_IP_METADATA_2018_09_24}) - find_fallback_path = M_PATH_NET + 'find_fallback_nic' - with mock.patch(find_fallback_path) as m_find_fallback: - m_find_fallback.return_value = 'eth9' - ds.get_data() - - mac1 = '0a:07:84:3d:6e:38' # 1 secondary IPv4 and 2 secondary IPv6 - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': mac1}, 'set-name': 'eth9', - 'addresses': ['172.31.45.70/20', - '2600:1f16:292:100:f152:2222:3333:4444/128', - '2600:1f16:292:100:f153:12a3:c37c:11f9/128'], - 'dhcp4': True, 'dhcp6': True}}} - patch_path = M_PATH_NET + 'get_interfaces_by_mac' - get_interface_mac_path = M_PATH_NET + 'get_interface_mac' - with mock.patch(patch_path) as m_get_interfaces_by_mac: - with mock.patch(find_fallback_path) as m_find_fallback: - with mock.patch(get_interface_mac_path) as m_get_mac: - m_get_interfaces_by_mac.return_value = {mac1: 'eth9'} - m_find_fallback.return_value = 'eth9' - m_get_mac.return_value = mac1 - self.assertEqual(expected, ds.network_config) - - def test_network_config_property_is_cached_in_datasource(self): - """network_config property is cached in DataSourceEc2.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, - md={'md': DEFAULT_METADATA}) - ds._network_config = {'cached': 'data'} - self.assertEqual({'cached': 'data'}, ds.network_config) - - @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') - def test_network_config_cached_property_refreshed_on_upgrade(self, m_dhcp): - """Refresh the network_config Ec2 cache if network key is absent. - - This catches an upgrade issue where obj.pkl contained stale metadata - which lacked newly required network key. - """ - old_metadata = copy.deepcopy(DEFAULT_METADATA) - old_metadata.pop('network') - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, - md={'md': old_metadata}) - self.assertTrue(ds.get_data()) - # Provide new revision of metadata that contains network data - register_mock_metaserver( - 'http://169.254.169.254/2009-04-04/meta-data/', DEFAULT_METADATA) - mac1 = '06:17:04:d7:26:09' # Defined in DEFAULT_METADATA - get_interface_mac_path = M_PATH_NET + 'get_interfaces_by_mac' - ds.fallback_nic = 'eth9' - with mock.patch(get_interface_mac_path) as m_get_interfaces_by_mac: - m_get_interfaces_by_mac.return_value = {mac1: 'eth9'} - nc = ds.network_config # Will re-crawl network metadata - self.assertIsNotNone(nc) - self.assertIn( - 'Refreshing stale metadata from prior to upgrade', - self.logs.getvalue()) - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': mac1}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp6': True}}} - self.assertEqual(expected, ds.network_config) - - def test_ec2_get_instance_id_refreshes_identity_on_upgrade(self): - """get_instance-id gets DataSourceEc2Local.identity if not present. - - This handles an upgrade case where the old pickled datasource didn't - set up self.identity, but 'systemctl cloud-init init' runs - get_instance_id which traces on missing self.identity. lp:1748354. - """ - self.datasource = ec2.DataSourceEc2Local - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': DEFAULT_METADATA}) - # Mock 404s on all versions except latest - all_versions = ( - [ds.min_metadata_version] + ds.extended_metadata_versions) - for ver in all_versions[:-1]: - register_mock_metaserver( - 'http://169.254.169.254/{0}/meta-data/instance-id'.format(ver), - None) - ds.metadata_address = 'http://169.254.169.254' - register_mock_metaserver( - '{0}/{1}/meta-data/'.format(ds.metadata_address, all_versions[-1]), - DEFAULT_METADATA) - # Register dynamic/instance-identity document which we now read. - register_mock_metaserver( - '{0}/{1}/dynamic/'.format(ds.metadata_address, all_versions[-1]), - DYNAMIC_METADATA) - ds._cloud_name = ec2.CloudNames.AWS - # Setup cached metadata on the Datasource - ds.metadata = DEFAULT_METADATA - self.assertEqual('my-identity-id', ds.get_instance_id()) - - def test_classic_instance_true(self): - """If no vpc-id in metadata, is_classic_instance must return true.""" - md_copy = copy.deepcopy(DEFAULT_METADATA) - ifaces_md = md_copy.get('network', {}).get('interfaces', {}) - for _mac, mac_data in ifaces_md.get('macs', {}).items(): - if 'vpc-id' in mac_data: - del mac_data['vpc-id'] - - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': md_copy}) - self.assertTrue(ds.get_data()) - self.assertTrue(ds.is_classic_instance()) - - def test_classic_instance_false(self): - """If vpc-id in metadata, is_classic_instance must return false.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': DEFAULT_METADATA}) - self.assertTrue(ds.get_data()) - self.assertFalse(ds.is_classic_instance()) - - def test_aws_inaccessible_imds_service_fails_with_retries(self): - """Inaccessibility of http://169.254.169.254 are retried.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md=None) - - conn_error = requests.exceptions.ConnectionError( - '[Errno 113] no route to host' - ) - - mock_success = mock.MagicMock(contents=b'fakesuccess') - mock_success.ok.return_value = True - - with mock.patch('cloudinit.url_helper.readurl') as m_readurl: - m_readurl.side_effect = (conn_error, conn_error, mock_success) - with mock.patch('cloudinit.url_helper.time.sleep'): - self.assertTrue(ds.wait_for_metadata_service()) - - # Just one /latest/api/token request - self.assertEqual(3, len(m_readurl.call_args_list)) - for readurl_call in m_readurl.call_args_list: - self.assertIn('latest/api/token', readurl_call[0][0]) - - def test_aws_token_403_fails_without_retries(self): - """Verify that 403s fetching AWS tokens are not retried.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md=None) - token_url = self.data_url('latest', data_item='api/token') - httpretty.register_uri(httpretty.PUT, token_url, body={}, status=403) - self.assertFalse(ds.get_data()) - # Just one /latest/api/token request - logs = self.logs.getvalue() - failed_put_log = '"PUT /latest/api/token HTTP/1.1" 403 0' - expected_logs = [ - 'WARNING: Ec2 IMDS endpoint returned a 403 error. HTTP endpoint is' - ' disabled. Aborting.', - "WARNING: IMDS's HTTP endpoint is probably disabled", - failed_put_log - ] - for log in expected_logs: - self.assertIn(log, logs) - self.assertEqual( - 1, - len([line for line in logs.splitlines() if failed_put_log in line]) - ) - - def test_aws_token_redacted(self): - """Verify that aws tokens are redacted when logged.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': DEFAULT_METADATA}) - self.assertTrue(ds.get_data()) - all_logs = self.logs.getvalue().splitlines() - REDACT_TTL = "'X-aws-ec2-metadata-token-ttl-seconds': 'REDACTED'" - REDACT_TOK = "'X-aws-ec2-metadata-token': 'REDACTED'" - logs_with_redacted_ttl = [log for log in all_logs if REDACT_TTL in log] - logs_with_redacted = [log for log in all_logs if REDACT_TOK in log] - logs_with_token = [log for log in all_logs if 'API-TOKEN' in log] - self.assertEqual(1, len(logs_with_redacted_ttl)) - self.assertEqual(81, len(logs_with_redacted)) - self.assertEqual(0, len(logs_with_token)) - - @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') - def test_valid_platform_with_strict_true(self, m_dhcp): - """Valid platform data should return true with strict_id true.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, - md={'md': DEFAULT_METADATA}) - ret = ds.get_data() - self.assertTrue(ret) - self.assertEqual(0, m_dhcp.call_count) - self.assertEqual('aws', ds.cloud_name) - self.assertEqual('ec2', ds.platform_type) - self.assertEqual('metadata (%s)' % ds.metadata_address, ds.subplatform) - - def test_valid_platform_with_strict_false(self): - """Valid platform data should return true with strict_id false.""" - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': DEFAULT_METADATA}) - ret = ds.get_data() - self.assertTrue(ret) - - def test_unknown_platform_with_strict_true(self): - """Unknown platform data with strict_id true should return False.""" - uuid = 'ab439480-72bf-11d3-91fc-b8aded755F9a' - ds = self._setup_ds( - platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''}, - sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, - md={'md': DEFAULT_METADATA}) - ret = ds.get_data() - self.assertFalse(ret) - - def test_unknown_platform_with_strict_false(self): - """Unknown platform data with strict_id false should return True.""" - uuid = 'ab439480-72bf-11d3-91fc-b8aded755F9a' - ds = self._setup_ds( - platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''}, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': DEFAULT_METADATA}) - ret = ds.get_data() - self.assertTrue(ret) - - def test_ec2_local_returns_false_on_non_aws(self): - """DataSourceEc2Local returns False when platform is not AWS.""" - self.datasource = ec2.DataSourceEc2Local - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': DEFAULT_METADATA}) - platform_attrs = [ - attr for attr in ec2.CloudNames.__dict__.keys() - if not attr.startswith('__')] - for attr_name in platform_attrs: - platform_name = getattr(ec2.CloudNames, attr_name) - if platform_name != 'aws': - ds._cloud_name = platform_name - ret = ds.get_data() - self.assertEqual('ec2', ds.platform_type) - self.assertFalse(ret) - message = ( - "Local Ec2 mode only supported on ('aws',)," - ' not {0}'.format(platform_name)) - self.assertIn(message, self.logs.getvalue()) - - @mock.patch('cloudinit.sources.DataSourceEc2.util.is_FreeBSD') - def test_ec2_local_returns_false_on_bsd(self, m_is_freebsd): - """DataSourceEc2Local returns False on BSD. - - FreeBSD dhclient doesn't support dhclient -sf to run in a sandbox. - """ - m_is_freebsd.return_value = True - self.datasource = ec2.DataSourceEc2Local - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': DEFAULT_METADATA}) - ret = ds.get_data() - self.assertFalse(ret) - self.assertIn( - "FreeBSD doesn't support running dhclient with -sf", - self.logs.getvalue()) - - @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') - @mock.patch('cloudinit.net.find_fallback_nic') - @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') - @mock.patch('cloudinit.sources.DataSourceEc2.util.is_FreeBSD') - def test_ec2_local_performs_dhcp_on_non_bsd(self, m_is_bsd, m_dhcp, - m_fallback_nic, m_net): - """Ec2Local returns True for valid platform data on non-BSD with dhcp. - - DataSourceEc2Local will setup initial IPv4 network via dhcp discovery. - Then the metadata services is crawled for more network config info. - When the platform data is valid, return True. - """ - - m_fallback_nic.return_value = 'eth9' - m_is_bsd.return_value = False - m_dhcp.return_value = [{ - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'broadcast-address': '192.168.2.255'}] - self.datasource = ec2.DataSourceEc2Local - ds = self._setup_ds( - platform_data=self.valid_platform_data, - sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, - md={'md': DEFAULT_METADATA}) - - ret = ds.get_data() - self.assertTrue(ret) - m_dhcp.assert_called_once_with('eth9', None) - m_net.assert_called_once_with( - broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', - prefix_or_mask='255.255.255.0', router='192.168.2.1', - static_routes=None) - self.assertIn('Crawl of metadata service took', self.logs.getvalue()) - - -class TestGetSecondaryAddresses(test_helpers.CiTestCase): - - mac = '06:17:04:d7:26:ff' - with_logs = True - - def test_md_with_no_secondary_addresses(self): - """Empty list is returned when nic metadata contains no secondary ip""" - self.assertEqual([], ec2.get_secondary_addresses(NIC2_MD, self.mac)) - - def test_md_with_secondary_v4_and_v6_addresses(self): - """All secondary addresses are returned from nic metadata""" - self.assertEqual( - ['172.31.45.70/20', '2600:1f16:292:100:f152:2222:3333:4444/128', - '2600:1f16:292:100:f153:12a3:c37c:11f9/128'], - ec2.get_secondary_addresses(NIC1_MD_IPV4_IPV6_MULTI_IP, self.mac)) - - def test_invalid_ipv4_ipv6_cidr_metadata_logged_with_defaults(self): - """Any invalid subnet-ipv(4|6)-cidr-block values use defaults""" - invalid_cidr_md = copy.deepcopy(NIC1_MD_IPV4_IPV6_MULTI_IP) - invalid_cidr_md['subnet-ipv4-cidr-block'] = "something-unexpected" - invalid_cidr_md['subnet-ipv6-cidr-block'] = "not/sure/what/this/is" - self.assertEqual( - ['172.31.45.70/24', '2600:1f16:292:100:f152:2222:3333:4444/128', - '2600:1f16:292:100:f153:12a3:c37c:11f9/128'], - ec2.get_secondary_addresses(invalid_cidr_md, self.mac)) - expected_logs = [ - "WARNING: Could not parse subnet-ipv4-cidr-block" - " something-unexpected for mac 06:17:04:d7:26:ff." - " ipv4 network config prefix defaults to /24", - "WARNING: Could not parse subnet-ipv6-cidr-block" - " not/sure/what/this/is for mac 06:17:04:d7:26:ff." - " ipv6 network config prefix defaults to /128" - ] - logs = self.logs.getvalue() - for log in expected_logs: - self.assertIn(log, logs) - - -class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase): - - def setUp(self): - super(TestConvertEc2MetadataNetworkConfig, self).setUp() - self.mac1 = '06:17:04:d7:26:09' - interface_dict = copy.deepcopy( - DEFAULT_METADATA['network']['interfaces']['macs'][self.mac1]) - # These tests are written assuming the base interface doesn't have IPv6 - interface_dict.pop('ipv6s') - self.network_metadata = { - 'interfaces': {'macs': {self.mac1: interface_dict}}} - - def test_convert_ec2_metadata_network_config_skips_absent_macs(self): - """Any mac absent from metadata is skipped by network config.""" - macs_to_nics = {self.mac1: 'eth9', 'DE:AD:BE:EF:FF:FF': 'vitualnic2'} - - # DE:AD:BE:EF:FF:FF represented by OS but not in metadata - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp6': False}}} - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - self.network_metadata, macs_to_nics)) - - def test_convert_ec2_metadata_network_config_handles_only_dhcp6(self): - """Config dhcp6 when ipv6s is in metadata for a mac.""" - macs_to_nics = {self.mac1: 'eth9'} - network_metadata_ipv6 = copy.deepcopy(self.network_metadata) - nic1_metadata = ( - network_metadata_ipv6['interfaces']['macs'][self.mac1]) - nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64' - nic1_metadata.pop('public-ipv4s') - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp6': True}}} - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_ipv6, macs_to_nics)) - - def test_convert_ec2_metadata_network_config_local_only_dhcp4(self): - """Config dhcp4 when there are no public addresses in public-ipv4s.""" - macs_to_nics = {self.mac1: 'eth9'} - network_metadata_ipv6 = copy.deepcopy(self.network_metadata) - nic1_metadata = ( - network_metadata_ipv6['interfaces']['macs'][self.mac1]) - nic1_metadata['local-ipv4s'] = '172.3.3.15' - nic1_metadata.pop('public-ipv4s') - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp6': False}}} - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_ipv6, macs_to_nics)) - - def test_convert_ec2_metadata_network_config_handles_absent_dhcp4(self): - """Config dhcp4 on fallback_nic when there are no ipv4 addresses.""" - macs_to_nics = {self.mac1: 'eth9'} - network_metadata_ipv6 = copy.deepcopy(self.network_metadata) - nic1_metadata = ( - network_metadata_ipv6['interfaces']['macs'][self.mac1]) - nic1_metadata['public-ipv4s'] = '' - - # When no ipv4 or ipv6 content but fallback_nic set, set dhcp4 config. - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp6': False}}} - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_ipv6, macs_to_nics, fallback_nic='eth9')) - - def test_convert_ec2_metadata_network_config_handles_local_v4_and_v6(self): - """When ipv6s and local-ipv4s are non-empty, enable dhcp6 and dhcp4.""" - macs_to_nics = {self.mac1: 'eth9'} - network_metadata_both = copy.deepcopy(self.network_metadata) - nic1_metadata = ( - network_metadata_both['interfaces']['macs'][self.mac1]) - nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64' - nic1_metadata.pop('public-ipv4s') - nic1_metadata['local-ipv4s'] = '10.0.0.42' # Local ipv4 only on vpc - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp6': True}}} - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_both, macs_to_nics)) - - def test_convert_ec2_metadata_network_config_handles_multiple_nics(self): - """DHCP route-metric increases on secondary NICs for IPv4 and IPv6.""" - mac2 = '06:17:04:d7:26:08' - macs_to_nics = {self.mac1: 'eth9', mac2: 'eth10'} - network_metadata_both = copy.deepcopy(self.network_metadata) - # Add 2nd nic info - network_metadata_both['interfaces']['macs'][mac2] = NIC2_MD - nic1_metadata = ( - network_metadata_both['interfaces']['macs'][self.mac1]) - nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64' - nic1_metadata.pop('public-ipv4s') # No public-ipv4 IPs in cfg - nic1_metadata['local-ipv4s'] = '10.0.0.42' # Local ipv4 only on vpc - expected = {'version': 2, 'ethernets': { - 'eth9': { - 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 100}, - 'dhcp6': True, 'dhcp6-overrides': {'route-metric': 100}}, - 'eth10': { - 'match': {'macaddress': mac2}, 'set-name': 'eth10', - 'dhcp4': True, 'dhcp4-overrides': {'route-metric': 200}, - 'dhcp6': False}}} - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_both, macs_to_nics)) - - def test_convert_ec2_metadata_network_config_handles_dhcp4_and_dhcp6(self): - """Config both dhcp4 and dhcp6 when both vpc-ipv6 and ipv4 exists.""" - macs_to_nics = {self.mac1: 'eth9'} - network_metadata_both = copy.deepcopy(self.network_metadata) - nic1_metadata = ( - network_metadata_both['interfaces']['macs'][self.mac1]) - nic1_metadata['ipv6s'] = '2620:0:1009:fd00:e442:c88d:c04d:dc85/64' - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': self.mac1}, 'set-name': 'eth9', - 'dhcp4': True, 'dhcp6': True}}} - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config( - network_metadata_both, macs_to_nics)) - - def test_convert_ec2_metadata_gets_macs_from_get_interfaces_by_mac(self): - """Convert Ec2 Metadata calls get_interfaces_by_mac by default.""" - expected = {'version': 2, 'ethernets': {'eth9': { - 'match': {'macaddress': self.mac1}, - 'set-name': 'eth9', 'dhcp4': True, 'dhcp6': False}}} - patch_path = M_PATH_NET + 'get_interfaces_by_mac' - with mock.patch(patch_path) as m_get_interfaces_by_mac: - m_get_interfaces_by_mac.return_value = {self.mac1: 'eth9'} - self.assertEqual( - expected, - ec2.convert_ec2_metadata_network_config(self.network_metadata)) - - -class TesIdentifyPlatform(test_helpers.CiTestCase): - - def collmock(self, **kwargs): - """return non-special _collect_platform_data updated with changes.""" - unspecial = { - 'asset_tag': '3857-0037-2746-7462-1818-3997-77', - 'serial': 'H23-C4J3JV-R6', - 'uuid': '81c7e555-6471-4833-9551-1ab366c4cfd2', - 'uuid_source': 'dmi', - 'vendor': 'tothecloud', - } - unspecial.update(**kwargs) - return unspecial - - @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') - def test_identify_zstack(self, m_collect): - """zstack should be identified if chassis-asset-tag ends in .zstack.io - """ - m_collect.return_value = self.collmock(asset_tag='123456.zstack.io') - self.assertEqual(ec2.CloudNames.ZSTACK, ec2.identify_platform()) - - @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') - def test_identify_zstack_full_domain_only(self, m_collect): - """zstack asset-tag matching should match only on full domain boundary. - """ - m_collect.return_value = self.collmock(asset_tag='123456.buzzstack.io') - self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) - - @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') - def test_identify_e24cloud(self, m_collect): - """e24cloud identified if vendor is e24cloud""" - m_collect.return_value = self.collmock(vendor='e24cloud') - self.assertEqual(ec2.CloudNames.E24CLOUD, ec2.identify_platform()) - - @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') - def test_identify_e24cloud_negative(self, m_collect): - """e24cloud identified if vendor is e24cloud""" - m_collect.return_value = self.collmock(vendor='e24cloudyday') - self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_exoscale.py b/tests/unittests/test_datasource/test_exoscale.py deleted file mode 100644 index f0061199..00000000 --- a/tests/unittests/test_datasource/test_exoscale.py +++ /dev/null @@ -1,211 +0,0 @@ -# Author: Mathieu Corbin -# Author: Christopher Glass -# -# This file is part of cloud-init. See LICENSE file for license information. -from cloudinit import helpers -from cloudinit.sources.DataSourceExoscale import ( - API_VERSION, - DataSourceExoscale, - METADATA_URL, - get_password, - PASSWORD_SERVER_PORT, - read_metadata) -from cloudinit.tests.helpers import HttprettyTestCase, mock -from cloudinit import util - -import httpretty -import os -import requests - - -TEST_PASSWORD_URL = "{}:{}/{}/".format(METADATA_URL, - PASSWORD_SERVER_PORT, - API_VERSION) - -TEST_METADATA_URL = "{}/{}/meta-data/".format(METADATA_URL, - API_VERSION) - -TEST_USERDATA_URL = "{}/{}/user-data".format(METADATA_URL, - API_VERSION) - - -@httpretty.activate -class TestDatasourceExoscale(HttprettyTestCase): - - def setUp(self): - super(TestDatasourceExoscale, self).setUp() - self.tmp = self.tmp_dir() - self.password_url = TEST_PASSWORD_URL - self.metadata_url = TEST_METADATA_URL - self.userdata_url = TEST_USERDATA_URL - - def test_password_saved(self): - """The password is not set when it is not found - in the metadata service.""" - httpretty.register_uri(httpretty.GET, - self.password_url, - body="saved_password") - self.assertFalse(get_password()) - - def test_password_empty(self): - """No password is set if the metadata service returns - an empty string.""" - httpretty.register_uri(httpretty.GET, - self.password_url, - body="") - self.assertFalse(get_password()) - - def test_password(self): - """The password is set to what is found in the metadata - service.""" - expected_password = "p@ssw0rd" - httpretty.register_uri(httpretty.GET, - self.password_url, - body=expected_password) - password = get_password() - self.assertEqual(expected_password, password) - - def test_activate_removes_set_passwords_semaphore(self): - """Allow set_passwords to run every boot by removing the semaphore.""" - path = helpers.Paths({'cloud_dir': self.tmp}) - sem_dir = self.tmp_path('instance/sem', dir=self.tmp) - util.ensure_dir(sem_dir) - sem_file = os.path.join(sem_dir, 'config_set_passwords') - with open(sem_file, 'w') as stream: - stream.write('') - ds = DataSourceExoscale({}, None, path) - ds.activate(None, None) - self.assertFalse(os.path.exists(sem_file)) - - def test_get_data(self): - """The datasource conforms to expected behavior when supplied - full test data.""" - path = helpers.Paths({'run_dir': self.tmp}) - ds = DataSourceExoscale({}, None, path) - ds._is_platform_viable = lambda: True - expected_password = "p@ssw0rd" - expected_id = "12345" - expected_hostname = "myname" - expected_userdata = "#cloud-config" - httpretty.register_uri(httpretty.GET, - self.userdata_url, - body=expected_userdata) - httpretty.register_uri(httpretty.GET, - self.password_url, - body=expected_password) - httpretty.register_uri(httpretty.GET, - self.metadata_url, - body="instance-id\nlocal-hostname") - httpretty.register_uri(httpretty.GET, - "{}local-hostname".format(self.metadata_url), - body=expected_hostname) - httpretty.register_uri(httpretty.GET, - "{}instance-id".format(self.metadata_url), - body=expected_id) - self.assertTrue(ds._get_data()) - self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") - self.assertEqual(ds.metadata, {"instance-id": expected_id, - "local-hostname": expected_hostname}) - self.assertEqual(ds.get_config_obj(), - {'ssh_pwauth': True, - 'password': expected_password, - 'chpasswd': { - 'expire': False, - }}) - - def test_get_data_saved_password(self): - """The datasource conforms to expected behavior when saved_password is - returned by the password server.""" - path = helpers.Paths({'run_dir': self.tmp}) - ds = DataSourceExoscale({}, None, path) - ds._is_platform_viable = lambda: True - expected_answer = "saved_password" - expected_id = "12345" - expected_hostname = "myname" - expected_userdata = "#cloud-config" - httpretty.register_uri(httpretty.GET, - self.userdata_url, - body=expected_userdata) - httpretty.register_uri(httpretty.GET, - self.password_url, - body=expected_answer) - httpretty.register_uri(httpretty.GET, - self.metadata_url, - body="instance-id\nlocal-hostname") - httpretty.register_uri(httpretty.GET, - "{}local-hostname".format(self.metadata_url), - body=expected_hostname) - httpretty.register_uri(httpretty.GET, - "{}instance-id".format(self.metadata_url), - body=expected_id) - self.assertTrue(ds._get_data()) - self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") - self.assertEqual(ds.metadata, {"instance-id": expected_id, - "local-hostname": expected_hostname}) - self.assertEqual(ds.get_config_obj(), {}) - - def test_get_data_no_password(self): - """The datasource conforms to expected behavior when no password is - returned by the password server.""" - path = helpers.Paths({'run_dir': self.tmp}) - ds = DataSourceExoscale({}, None, path) - ds._is_platform_viable = lambda: True - expected_answer = "" - expected_id = "12345" - expected_hostname = "myname" - expected_userdata = "#cloud-config" - httpretty.register_uri(httpretty.GET, - self.userdata_url, - body=expected_userdata) - httpretty.register_uri(httpretty.GET, - self.password_url, - body=expected_answer) - httpretty.register_uri(httpretty.GET, - self.metadata_url, - body="instance-id\nlocal-hostname") - httpretty.register_uri(httpretty.GET, - "{}local-hostname".format(self.metadata_url), - body=expected_hostname) - httpretty.register_uri(httpretty.GET, - "{}instance-id".format(self.metadata_url), - body=expected_id) - self.assertTrue(ds._get_data()) - self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") - self.assertEqual(ds.metadata, {"instance-id": expected_id, - "local-hostname": expected_hostname}) - self.assertEqual(ds.get_config_obj(), {}) - - @mock.patch('cloudinit.sources.DataSourceExoscale.get_password') - def test_read_metadata_when_password_server_unreachable(self, m_password): - """The read_metadata function returns partial results in case the - password server (only) is unreachable.""" - expected_id = "12345" - expected_hostname = "myname" - expected_userdata = "#cloud-config" - - m_password.side_effect = requests.Timeout('Fake Connection Timeout') - httpretty.register_uri(httpretty.GET, - self.userdata_url, - body=expected_userdata) - httpretty.register_uri(httpretty.GET, - self.metadata_url, - body="instance-id\nlocal-hostname") - httpretty.register_uri(httpretty.GET, - "{}local-hostname".format(self.metadata_url), - body=expected_hostname) - httpretty.register_uri(httpretty.GET, - "{}instance-id".format(self.metadata_url), - body=expected_id) - - result = read_metadata() - - self.assertIsNone(result.get("password")) - self.assertEqual(result.get("user-data").decode("utf-8"), - expected_userdata) - - def test_non_viable_platform(self): - """The datasource fails fast when the platform is not viable.""" - path = helpers.Paths({'run_dir': self.tmp}) - ds = DataSourceExoscale({}, None, path) - ds._is_platform_viable = lambda: False - self.assertFalse(ds._get_data()) diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py deleted file mode 100644 index 1d91b301..00000000 --- a/tests/unittests/test_datasource/test_gce.py +++ /dev/null @@ -1,388 +0,0 @@ -# Copyright (C) 2014 Vaidas Jablonskis -# -# Author: Vaidas Jablonskis -# -# This file is part of cloud-init. See LICENSE file for license information. - -import datetime -import httpretty -import json -import re -from unittest import mock -from urllib.parse import urlparse - -from base64 import b64encode, b64decode - -from cloudinit import distros -from cloudinit import helpers -from cloudinit import settings -from cloudinit.sources import DataSourceGCE - -from cloudinit.tests import helpers as test_helpers - - -GCE_META = { - 'instance/id': '123', - 'instance/zone': 'foo/bar', - 'instance/hostname': 'server.project-foo.local', -} - -GCE_META_PARTIAL = { - 'instance/id': '1234', - 'instance/hostname': 'server.project-bar.local', - 'instance/zone': 'bar/baz', -} - -GCE_META_ENCODING = { - 'instance/id': '12345', - 'instance/hostname': 'server.project-baz.local', - 'instance/zone': 'baz/bang', - 'instance/attributes': { - 'user-data': b64encode(b'#!/bin/echo baz\n').decode('utf-8'), - 'user-data-encoding': 'base64', - } -} - -GCE_USER_DATA_TEXT = { - 'instance/id': '12345', - 'instance/hostname': 'server.project-baz.local', - 'instance/zone': 'baz/bang', - 'instance/attributes': { - 'user-data': '#!/bin/sh\necho hi mom\ntouch /run/up-now\n', - } -} - -HEADERS = {'Metadata-Flavor': 'Google'} -MD_URL_RE = re.compile( - r'http://metadata.google.internal/computeMetadata/v1/.*') -GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/' - 'v1/instance/guest-attributes/hostkeys/') - - -def _set_mock_metadata(gce_meta=None): - if gce_meta is None: - gce_meta = GCE_META - - def _request_callback(method, uri, headers): - url_path = urlparse(uri).path - if url_path.startswith('/computeMetadata/v1/'): - path = url_path.split('/computeMetadata/v1/')[1:][0] - recursive = path.endswith('/') - path = path.rstrip('/') - else: - path = None - if path in gce_meta: - response = gce_meta.get(path) - if recursive: - response = json.dumps(response) - return (200, headers, response) - else: - return (404, headers, '') - - # reset is needed. https://github.com/gabrielfalcao/HTTPretty/issues/316 - httpretty.register_uri(httpretty.GET, MD_URL_RE, body=_request_callback) - - -@httpretty.activate -class TestDataSourceGCE(test_helpers.HttprettyTestCase): - - def _make_distro(self, dtype, def_user=None): - cfg = dict(settings.CFG_BUILTIN) - cfg['system_info']['distro'] = dtype - paths = helpers.Paths(cfg['system_info']['paths']) - distro_cls = distros.fetch(dtype) - if def_user: - cfg['system_info']['default_user'] = def_user.copy() - distro = distro_cls(dtype, cfg['system_info'], paths) - return distro - - def setUp(self): - tmp = self.tmp_dir() - self.ds = DataSourceGCE.DataSourceGCE( - settings.CFG_BUILTIN, None, - helpers.Paths({'run_dir': tmp})) - ppatch = self.m_platform_reports_gce = mock.patch( - 'cloudinit.sources.DataSourceGCE.platform_reports_gce') - self.m_platform_reports_gce = ppatch.start() - self.m_platform_reports_gce.return_value = True - self.addCleanup(ppatch.stop) - self.add_patch('time.sleep', 'm_sleep') # just to speed up tests - super(TestDataSourceGCE, self).setUp() - - def test_connection(self): - _set_mock_metadata() - success = self.ds.get_data() - self.assertTrue(success) - - req_header = httpretty.last_request().headers - for header_name, expected_value in HEADERS.items(): - self.assertEqual(expected_value, req_header.get(header_name)) - - def test_metadata(self): - # UnicodeDecodeError if set to ds.userdata instead of userdata_raw - meta = GCE_META.copy() - meta['instance/attributes/user-data'] = b'/bin/echo \xff\n' - - _set_mock_metadata() - self.ds.get_data() - - shostname = GCE_META.get('instance/hostname').split('.')[0] - self.assertEqual(shostname, - self.ds.get_hostname()) - - self.assertEqual(GCE_META.get('instance/id'), - self.ds.get_instance_id()) - - self.assertEqual(GCE_META.get('instance/attributes/user-data'), - self.ds.get_userdata_raw()) - - # test partial metadata (missing user-data in particular) - def test_metadata_partial(self): - _set_mock_metadata(GCE_META_PARTIAL) - self.ds.get_data() - - self.assertEqual(GCE_META_PARTIAL.get('instance/id'), - self.ds.get_instance_id()) - - shostname = GCE_META_PARTIAL.get('instance/hostname').split('.')[0] - self.assertEqual(shostname, self.ds.get_hostname()) - - def test_userdata_no_encoding(self): - """check that user-data is read.""" - _set_mock_metadata(GCE_USER_DATA_TEXT) - self.ds.get_data() - self.assertEqual( - GCE_USER_DATA_TEXT['instance/attributes']['user-data'].encode(), - self.ds.get_userdata_raw()) - - def test_metadata_encoding(self): - """user-data is base64 encoded if user-data-encoding is 'base64'.""" - _set_mock_metadata(GCE_META_ENCODING) - self.ds.get_data() - - instance_data = GCE_META_ENCODING.get('instance/attributes') - decoded = b64decode(instance_data.get('user-data')) - self.assertEqual(decoded, self.ds.get_userdata_raw()) - - def test_missing_required_keys_return_false(self): - for required_key in ['instance/id', 'instance/zone', - 'instance/hostname']: - meta = GCE_META_PARTIAL.copy() - del meta[required_key] - _set_mock_metadata(meta) - self.assertEqual(False, self.ds.get_data()) - httpretty.reset() - - def test_no_ssh_keys_metadata(self): - _set_mock_metadata() - self.ds.get_data() - self.assertEqual([], self.ds.get_public_ssh_keys()) - - def test_cloudinit_ssh_keys(self): - valid_key = 'ssh-rsa VALID {0}' - invalid_key = 'ssh-rsa INVALID {0}' - project_attributes = { - 'sshKeys': '\n'.join([ - 'cloudinit:{0}'.format(valid_key.format(0)), - 'user:{0}'.format(invalid_key.format(0)), - ]), - 'ssh-keys': '\n'.join([ - 'cloudinit:{0}'.format(valid_key.format(1)), - 'user:{0}'.format(invalid_key.format(1)), - ]), - } - instance_attributes = { - 'ssh-keys': '\n'.join([ - 'cloudinit:{0}'.format(valid_key.format(2)), - 'user:{0}'.format(invalid_key.format(2)), - ]), - 'block-project-ssh-keys': 'False', - } - - meta = GCE_META.copy() - meta['project/attributes'] = project_attributes - meta['instance/attributes'] = instance_attributes - - _set_mock_metadata(meta) - self.ds.get_data() - - expected = [valid_key.format(key) for key in range(3)] - self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys())) - - @mock.patch("cloudinit.sources.DataSourceGCE.ug_util") - def test_default_user_ssh_keys(self, mock_ug_util): - mock_ug_util.normalize_users_groups.return_value = None, None - mock_ug_util.extract_default.return_value = 'ubuntu', None - ubuntu_ds = DataSourceGCE.DataSourceGCE( - settings.CFG_BUILTIN, self._make_distro('ubuntu'), - helpers.Paths({'run_dir': self.tmp_dir()})) - - valid_key = 'ssh-rsa VALID {0}' - invalid_key = 'ssh-rsa INVALID {0}' - project_attributes = { - 'sshKeys': '\n'.join([ - 'ubuntu:{0}'.format(valid_key.format(0)), - 'user:{0}'.format(invalid_key.format(0)), - ]), - 'ssh-keys': '\n'.join([ - 'ubuntu:{0}'.format(valid_key.format(1)), - 'user:{0}'.format(invalid_key.format(1)), - ]), - } - instance_attributes = { - 'ssh-keys': '\n'.join([ - 'ubuntu:{0}'.format(valid_key.format(2)), - 'user:{0}'.format(invalid_key.format(2)), - ]), - 'block-project-ssh-keys': 'False', - } - - meta = GCE_META.copy() - meta['project/attributes'] = project_attributes - meta['instance/attributes'] = instance_attributes - - _set_mock_metadata(meta) - ubuntu_ds.get_data() - - expected = [valid_key.format(key) for key in range(3)] - self.assertEqual(set(expected), set(ubuntu_ds.get_public_ssh_keys())) - - def test_instance_ssh_keys_override(self): - valid_key = 'ssh-rsa VALID {0}' - invalid_key = 'ssh-rsa INVALID {0}' - project_attributes = { - 'sshKeys': 'cloudinit:{0}'.format(invalid_key.format(0)), - 'ssh-keys': 'cloudinit:{0}'.format(invalid_key.format(1)), - } - instance_attributes = { - 'sshKeys': 'cloudinit:{0}'.format(valid_key.format(0)), - 'ssh-keys': 'cloudinit:{0}'.format(valid_key.format(1)), - 'block-project-ssh-keys': 'False', - } - - meta = GCE_META.copy() - meta['project/attributes'] = project_attributes - meta['instance/attributes'] = instance_attributes - - _set_mock_metadata(meta) - self.ds.get_data() - - expected = [valid_key.format(key) for key in range(2)] - self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys())) - - def test_block_project_ssh_keys_override(self): - valid_key = 'ssh-rsa VALID {0}' - invalid_key = 'ssh-rsa INVALID {0}' - project_attributes = { - 'sshKeys': 'cloudinit:{0}'.format(invalid_key.format(0)), - 'ssh-keys': 'cloudinit:{0}'.format(invalid_key.format(1)), - } - instance_attributes = { - 'ssh-keys': 'cloudinit:{0}'.format(valid_key.format(0)), - 'block-project-ssh-keys': 'True', - } - - meta = GCE_META.copy() - meta['project/attributes'] = project_attributes - meta['instance/attributes'] = instance_attributes - - _set_mock_metadata(meta) - self.ds.get_data() - - expected = [valid_key.format(0)] - self.assertEqual(set(expected), set(self.ds.get_public_ssh_keys())) - - def test_only_last_part_of_zone_used_for_availability_zone(self): - _set_mock_metadata() - r = self.ds.get_data() - self.assertEqual(True, r) - self.assertEqual('bar', self.ds.availability_zone) - - @mock.patch("cloudinit.sources.DataSourceGCE.GoogleMetadataFetcher") - def test_get_data_returns_false_if_not_on_gce(self, m_fetcher): - self.m_platform_reports_gce.return_value = False - ret = self.ds.get_data() - self.assertEqual(False, ret) - m_fetcher.assert_not_called() - - def test_has_expired(self): - - def _get_timestamp(days): - format_str = '%Y-%m-%dT%H:%M:%S+0000' - today = datetime.datetime.now() - timestamp = today + datetime.timedelta(days=days) - return timestamp.strftime(format_str) - - past = _get_timestamp(-1) - future = _get_timestamp(1) - ssh_keys = { - None: False, - '': False, - 'Invalid': False, - 'user:ssh-rsa key user@domain.com': False, - 'user:ssh-rsa key google {"expireOn":"%s"}' % past: False, - 'user:ssh-rsa key google-ssh': False, - 'user:ssh-rsa key google-ssh {invalid:json}': False, - 'user:ssh-rsa key google-ssh {"userName":"user"}': False, - 'user:ssh-rsa key google-ssh {"expireOn":"invalid"}': False, - 'user:xyz key google-ssh {"expireOn":"%s"}' % future: False, - 'user:xyz key google-ssh {"expireOn":"%s"}' % past: True, - } - - for key, expired in ssh_keys.items(): - self.assertEqual(DataSourceGCE._has_expired(key), expired) - - def test_parse_public_keys_non_ascii(self): - public_key_data = [ - 'cloudinit:rsa ssh-ke%s invalid' % chr(165), - 'use%sname:rsa ssh-key' % chr(174), - 'cloudinit:test 1', - 'default:test 2', - 'user:test 3', - ] - expected = ['test 1', 'test 2'] - found = DataSourceGCE._parse_public_keys( - public_key_data, default_user='default') - self.assertEqual(sorted(found), sorted(expected)) - - @mock.patch("cloudinit.url_helper.readurl") - def test_publish_host_keys(self, m_readurl): - hostkeys = [('ssh-rsa', 'asdfasdf'), - ('ssh-ed25519', 'qwerqwer')] - readurl_expected_calls = [ - mock.call(check_status=False, data=b'asdfasdf', headers=HEADERS, - request_method='PUT', - url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-rsa')), - mock.call(check_status=False, data=b'qwerqwer', headers=HEADERS, - request_method='PUT', - url='%s%s' % (GUEST_ATTRIBUTES_URL, 'ssh-ed25519')), - ] - self.ds.publish_host_keys(hostkeys) - m_readurl.assert_has_calls(readurl_expected_calls, any_order=True) - - @mock.patch( - "cloudinit.sources.DataSourceGCE.EphemeralDHCPv4", - autospec=True, - ) - @mock.patch( - "cloudinit.sources.DataSourceGCE.DataSourceGCELocal.fallback_interface" - ) - def test_local_datasource_uses_ephemeral_dhcp(self, _m_fallback, m_dhcp): - _set_mock_metadata() - ds = DataSourceGCE.DataSourceGCELocal( - sys_cfg={}, distro=None, paths=None - ) - ds._get_data() - assert m_dhcp.call_count == 1 - - @mock.patch( - "cloudinit.sources.DataSourceGCE.EphemeralDHCPv4", - autospec=True, - ) - def test_datasource_doesnt_use_ephemeral_dhcp(self, m_dhcp): - _set_mock_metadata() - ds = DataSourceGCE.DataSourceGCE(sys_cfg={}, distro=None, paths=None) - ds._get_data() - assert m_dhcp.call_count == 0 - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_hetzner.py b/tests/unittests/test_datasource/test_hetzner.py deleted file mode 100644 index eadb92f1..00000000 --- a/tests/unittests/test_datasource/test_hetzner.py +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright (C) 2018 Jonas Keidel -# -# Author: Jonas Keidel -# -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.sources import DataSourceHetzner -import cloudinit.sources.helpers.hetzner as hc_helper -from cloudinit import util, settings, helpers - -from cloudinit.tests.helpers import mock, CiTestCase - -import base64 -import pytest - -METADATA = util.load_yaml(""" -hostname: cloudinit-test -instance-id: 123456 -local-ipv4: '' -network-config: - config: - - mac_address: 96:00:00:08:19:da - name: eth0 - subnets: - - dns_nameservers: - - 213.133.99.99 - - 213.133.100.100 - - 213.133.98.98 - ipv4: true - type: dhcp - type: physical - - name: eth0:0 - subnets: - - address: 2a01:4f8:beef:beef::1/64 - gateway: fe80::1 - ipv6: true - routes: - - gateway: fe80::1%eth0 - netmask: 0 - network: '::' - type: static - type: physical - version: 1 -network-sysconfig: "DEVICE='eth0'\nTYPE=Ethernet\nBOOTPROTO=dhcp\n\ - ONBOOT='yes'\nHWADDR=96:00:00:08:19:da\n\ - IPV6INIT=yes\nIPV6ADDR=2a01:4f8:beef:beef::1/64\n\ - IPV6_DEFAULTGW=fe80::1%eth0\nIPV6_AUTOCONF=no\n\ - DNS1=213.133.99.99\nDNS2=213.133.100.100\n" -public-ipv4: 192.168.0.1 -public-keys: -- ssh-ed25519 \ - AAAAC3Nzac1lZdI1NTE5AaaAIaFrcac0yVITsmRrmueq6MD0qYNKlEvW8O1Ib4nkhmWh \ - test-key@workstation -vendor_data: "test" -""") - -USERDATA = b"""#cloud-config -runcmd: -- [touch, /root/cloud-init-worked ] -""" - - -class TestDataSourceHetzner(CiTestCase): - """ - Test reading the meta-data - """ - def setUp(self): - super(TestDataSourceHetzner, self).setUp() - self.tmp = self.tmp_dir() - - def get_ds(self): - ds = DataSourceHetzner.DataSourceHetzner( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) - return ds - - @mock.patch('cloudinit.net.EphemeralIPv4Network') - @mock.patch('cloudinit.net.find_fallback_nic') - @mock.patch('cloudinit.sources.helpers.hetzner.read_metadata') - @mock.patch('cloudinit.sources.helpers.hetzner.read_userdata') - @mock.patch('cloudinit.sources.DataSourceHetzner.get_hcloud_data') - def test_read_data(self, m_get_hcloud_data, m_usermd, m_readmd, - m_fallback_nic, m_net): - m_get_hcloud_data.return_value = (True, - str(METADATA.get('instance-id'))) - m_readmd.return_value = METADATA.copy() - m_usermd.return_value = USERDATA - m_fallback_nic.return_value = 'eth0' - - ds = self.get_ds() - ret = ds.get_data() - self.assertTrue(ret) - - m_net.assert_called_once_with( - 'eth0', '169.254.0.1', - 16, '169.254.255.255' - ) - - self.assertTrue(m_readmd.called) - - self.assertEqual(METADATA.get('hostname'), ds.get_hostname()) - - self.assertEqual(METADATA.get('public-keys'), - ds.get_public_ssh_keys()) - - self.assertIsInstance(ds.get_public_ssh_keys(), list) - self.assertEqual(ds.get_userdata_raw(), USERDATA) - self.assertEqual(ds.get_vendordata_raw(), METADATA.get('vendor_data')) - - @mock.patch('cloudinit.sources.helpers.hetzner.read_metadata') - @mock.patch('cloudinit.net.find_fallback_nic') - @mock.patch('cloudinit.sources.DataSourceHetzner.get_hcloud_data') - def test_not_on_hetzner_returns_false(self, m_get_hcloud_data, - m_find_fallback, m_read_md): - """If helper 'get_hcloud_data' returns False, - return False from get_data.""" - m_get_hcloud_data.return_value = (False, None) - ds = self.get_ds() - ret = ds.get_data() - - self.assertFalse(ret) - # These are a white box attempt to ensure it did not search. - m_find_fallback.assert_not_called() - m_read_md.assert_not_called() - - -class TestMaybeB64Decode: - """Test the maybe_b64decode helper function.""" - - @pytest.mark.parametrize("invalid_input", (str("not bytes"), int(4))) - def test_raises_error_on_non_bytes(self, invalid_input): - """maybe_b64decode should raise error if data is not bytes.""" - with pytest.raises(TypeError): - hc_helper.maybe_b64decode(invalid_input) - - @pytest.mark.parametrize("in_data,expected", [ - # If data is not b64 encoded, then return value should be the same. - (b"this is my data", b"this is my data"), - # If data is b64 encoded, then return value should be decoded. - (base64.b64encode(b"data"), b"data"), - ]) - def test_happy_path(self, in_data, expected): - assert expected == hc_helper.maybe_b64decode(in_data) diff --git a/tests/unittests/test_datasource/test_ibmcloud.py b/tests/unittests/test_datasource/test_ibmcloud.py deleted file mode 100644 index 9013ae9f..00000000 --- a/tests/unittests/test_datasource/test_ibmcloud.py +++ /dev/null @@ -1,343 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.helpers import Paths -from cloudinit.sources import DataSourceIBMCloud as ibm -from cloudinit.tests import helpers as test_helpers -from cloudinit import util - -import base64 -import copy -import json -from textwrap import dedent - -mock = test_helpers.mock - -D_PATH = "cloudinit.sources.DataSourceIBMCloud." - - -@mock.patch(D_PATH + "_is_xen", return_value=True) -@mock.patch(D_PATH + "_is_ibm_provisioning") -@mock.patch(D_PATH + "util.blkid") -class TestGetIBMPlatform(test_helpers.CiTestCase): - """Test the get_ibm_platform helper.""" - - blkid_base = { - "/dev/xvda1": { - "DEVNAME": "/dev/xvda1", "LABEL": "cloudimg-bootfs", - "TYPE": "ext3"}, - "/dev/xvda2": { - "DEVNAME": "/dev/xvda2", "LABEL": "cloudimg-rootfs", - "TYPE": "ext4"}, - } - - blkid_metadata_disk = { - "/dev/xvdh1": { - "DEVNAME": "/dev/xvdh1", "LABEL": "METADATA", "TYPE": "vfat", - "SEC_TYPE": "msdos", "UUID": "681B-8C5D", - "PARTUUID": "3d631e09-01"}, - } - - blkid_oscode_disk = { - "/dev/xvdh": { - "DEVNAME": "/dev/xvdh", "LABEL": "config-2", "TYPE": "vfat", - "SEC_TYPE": "msdos", "UUID": ibm.IBM_CONFIG_UUID} - } - - def setUp(self): - self.blkid_metadata = copy.deepcopy(self.blkid_base) - self.blkid_metadata.update(copy.deepcopy(self.blkid_metadata_disk)) - - self.blkid_oscode = copy.deepcopy(self.blkid_base) - self.blkid_oscode.update(copy.deepcopy(self.blkid_oscode_disk)) - - def test_id_template_live_metadata(self, m_blkid, m_is_prov, _m_xen): - """identify TEMPLATE_LIVE_METADATA.""" - m_blkid.return_value = self.blkid_metadata - m_is_prov.return_value = False - self.assertEqual( - (ibm.Platforms.TEMPLATE_LIVE_METADATA, "/dev/xvdh1"), - ibm.get_ibm_platform()) - - def test_id_template_prov_metadata(self, m_blkid, m_is_prov, _m_xen): - """identify TEMPLATE_PROVISIONING_METADATA.""" - m_blkid.return_value = self.blkid_metadata - m_is_prov.return_value = True - self.assertEqual( - (ibm.Platforms.TEMPLATE_PROVISIONING_METADATA, "/dev/xvdh1"), - ibm.get_ibm_platform()) - - def test_id_template_prov_nodata(self, m_blkid, m_is_prov, _m_xen): - """identify TEMPLATE_PROVISIONING_NODATA.""" - m_blkid.return_value = self.blkid_base - m_is_prov.return_value = True - self.assertEqual( - (ibm.Platforms.TEMPLATE_PROVISIONING_NODATA, None), - ibm.get_ibm_platform()) - - def test_id_os_code(self, m_blkid, m_is_prov, _m_xen): - """Identify OS_CODE.""" - m_blkid.return_value = self.blkid_oscode - m_is_prov.return_value = False - self.assertEqual((ibm.Platforms.OS_CODE, "/dev/xvdh"), - ibm.get_ibm_platform()) - - def test_id_os_code_must_match_uuid(self, m_blkid, m_is_prov, _m_xen): - """Test against false positive on openstack with non-ibm UUID.""" - blkid = self.blkid_oscode - blkid["/dev/xvdh"]["UUID"] = "9999-9999" - m_blkid.return_value = blkid - m_is_prov.return_value = False - self.assertEqual((None, None), ibm.get_ibm_platform()) - - -@mock.patch(D_PATH + "_read_system_uuid", return_value=None) -@mock.patch(D_PATH + "get_ibm_platform") -class TestReadMD(test_helpers.CiTestCase): - """Test the read_datasource helper.""" - - template_md = { - "files": [], - "network_config": {"content_path": "/content/interfaces"}, - "hostname": "ci-fond-ram", - "name": "ci-fond-ram", - "domain": "testing.ci.cloud-init.org", - "meta": {"dsmode": "net"}, - "uuid": "8e636730-9f5d-c4a5-327c-d7123c46e82f", - "public_keys": {"1091307": "ssh-rsa AAAAB3NzaC1...Hw== ci-pubkey"}, - } - - oscode_md = { - "hostname": "ci-grand-gannet.testing.ci.cloud-init.org", - "name": "ci-grand-gannet", - "uuid": "2f266908-8e6c-4818-9b5c-42e9cc66a785", - "random_seed": "bm90LXJhbmRvbQo=", - "crypt_key": "ssh-rsa AAAAB3NzaC1yc2..n6z/", - "configuration_token": "eyJhbGciOi..M3ZA", - "public_keys": {"1091307": "ssh-rsa AAAAB3N..Hw== ci-pubkey"}, - } - - content_interfaces = dedent("""\ - auto lo - iface lo inet loopback - - auto eth0 - allow-hotplug eth0 - iface eth0 inet static - address 10.82.43.5 - netmask 255.255.255.192 - """) - - userdata = b"#!/bin/sh\necho hi mom\n" - # meta.js file gets json encoded userdata as a list. - meta_js = '["#!/bin/sh\necho hi mom\n"]' - vendor_data = { - "cloud-init": "#!/bin/bash\necho 'root:$6$5ab01p1m1' | chpasswd -e"} - - network_data = { - "links": [ - {"id": "interface_29402281", "name": "eth0", "mtu": None, - "type": "phy", "ethernet_mac_address": "06:00:f1:bd:da:25"}, - {"id": "interface_29402279", "name": "eth1", "mtu": None, - "type": "phy", "ethernet_mac_address": "06:98:5e:d0:7f:86"} - ], - "networks": [ - {"id": "network_109887563", "link": "interface_29402281", - "type": "ipv4", "ip_address": "10.82.43.2", - "netmask": "255.255.255.192", - "routes": [ - {"network": "10.0.0.0", "netmask": "255.0.0.0", - "gateway": "10.82.43.1"}, - {"network": "161.26.0.0", "netmask": "255.255.0.0", - "gateway": "10.82.43.1"}]}, - {"id": "network_109887551", "link": "interface_29402279", - "type": "ipv4", "ip_address": "108.168.194.252", - "netmask": "255.255.255.248", - "routes": [ - {"network": "0.0.0.0", "netmask": "0.0.0.0", - "gateway": "108.168.194.249"}]} - ], - "services": [ - {"type": "dns", "address": "10.0.80.11"}, - {"type": "dns", "address": "10.0.80.12"} - ], - } - - sysuuid = '7f79ebf5-d791-43c3-a723-854e8389d59f' - - def _get_expected_metadata(self, os_md): - """return expected 'metadata' for data loaded from meta_data.json.""" - os_md = copy.deepcopy(os_md) - renames = ( - ('hostname', 'local-hostname'), - ('uuid', 'instance-id'), - ('public_keys', 'public-keys')) - ret = {} - for osname, mdname in renames: - if osname in os_md: - ret[mdname] = os_md[osname] - if 'random_seed' in os_md: - ret['random_seed'] = base64.b64decode(os_md['random_seed']) - - return ret - - def test_provisioning_md(self, m_platform, m_sysuuid): - """Provisioning env with a metadata disk should return None.""" - m_platform.return_value = ( - ibm.Platforms.TEMPLATE_PROVISIONING_METADATA, "/dev/xvdh") - self.assertIsNone(ibm.read_md()) - - def test_provisioning_no_metadata(self, m_platform, m_sysuuid): - """Provisioning env with no metadata disk should return None.""" - m_platform.return_value = ( - ibm.Platforms.TEMPLATE_PROVISIONING_NODATA, None) - self.assertIsNone(ibm.read_md()) - - def test_provisioning_not_ibm(self, m_platform, m_sysuuid): - """Provisioning env but not identified as IBM should return None.""" - m_platform.return_value = (None, None) - self.assertIsNone(ibm.read_md()) - - def test_template_live(self, m_platform, m_sysuuid): - """Template live environment should be identified.""" - tmpdir = self.tmp_dir() - m_platform.return_value = ( - ibm.Platforms.TEMPLATE_LIVE_METADATA, tmpdir) - m_sysuuid.return_value = self.sysuuid - - test_helpers.populate_dir(tmpdir, { - 'openstack/latest/meta_data.json': json.dumps(self.template_md), - 'openstack/latest/user_data': self.userdata, - 'openstack/content/interfaces': self.content_interfaces, - 'meta.js': self.meta_js}) - - ret = ibm.read_md() - self.assertEqual(ibm.Platforms.TEMPLATE_LIVE_METADATA, - ret['platform']) - self.assertEqual(tmpdir, ret['source']) - self.assertEqual(self.userdata, ret['userdata']) - self.assertEqual(self._get_expected_metadata(self.template_md), - ret['metadata']) - self.assertEqual(self.sysuuid, ret['system-uuid']) - - def test_os_code_live(self, m_platform, m_sysuuid): - """Verify an os_code metadata path.""" - tmpdir = self.tmp_dir() - m_platform.return_value = (ibm.Platforms.OS_CODE, tmpdir) - netdata = json.dumps(self.network_data) - test_helpers.populate_dir(tmpdir, { - 'openstack/latest/meta_data.json': json.dumps(self.oscode_md), - 'openstack/latest/user_data': self.userdata, - 'openstack/latest/vendor_data.json': json.dumps(self.vendor_data), - 'openstack/latest/network_data.json': netdata, - }) - - ret = ibm.read_md() - self.assertEqual(ibm.Platforms.OS_CODE, ret['platform']) - self.assertEqual(tmpdir, ret['source']) - self.assertEqual(self.userdata, ret['userdata']) - self.assertEqual(self._get_expected_metadata(self.oscode_md), - ret['metadata']) - - def test_os_code_live_no_userdata(self, m_platform, m_sysuuid): - """Verify os_code without user-data.""" - tmpdir = self.tmp_dir() - m_platform.return_value = (ibm.Platforms.OS_CODE, tmpdir) - test_helpers.populate_dir(tmpdir, { - 'openstack/latest/meta_data.json': json.dumps(self.oscode_md), - 'openstack/latest/vendor_data.json': json.dumps(self.vendor_data), - }) - - ret = ibm.read_md() - self.assertEqual(ibm.Platforms.OS_CODE, ret['platform']) - self.assertEqual(tmpdir, ret['source']) - self.assertIsNone(ret['userdata']) - self.assertEqual(self._get_expected_metadata(self.oscode_md), - ret['metadata']) - - -class TestIsIBMProvisioning(test_helpers.FilesystemMockingTestCase): - """Test the _is_ibm_provisioning method.""" - inst_log = "/root/swinstall.log" - prov_cfg = "/root/provisioningConfiguration.cfg" - boot_ref = "/proc/1/environ" - with_logs = True - - def _call_with_root(self, rootd): - self.reRoot(rootd) - return ibm._is_ibm_provisioning() - - def test_no_config(self): - """No provisioning config means not provisioning.""" - self.assertFalse(self._call_with_root(self.tmp_dir())) - - def test_config_only(self): - """A provisioning config without a log means provisioning.""" - rootd = self.tmp_dir() - test_helpers.populate_dir(rootd, {self.prov_cfg: "key=value"}) - self.assertTrue(self._call_with_root(rootd)) - - def test_config_with_old_log(self): - """A config with a log from previous boot is not provisioning.""" - rootd = self.tmp_dir() - data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10), - self.inst_log: ("log data\n", -30), - self.boot_ref: ("PWD=/", 0)} - test_helpers.populate_dir_with_ts(rootd, data) - self.assertFalse(self._call_with_root(rootd=rootd)) - self.assertIn("from previous boot", self.logs.getvalue()) - - def test_config_with_new_log(self): - """A config with a log from this boot is provisioning.""" - rootd = self.tmp_dir() - data = {self.prov_cfg: ("key=value\nkey2=val2\n", -10), - self.inst_log: ("log data\n", 30), - self.boot_ref: ("PWD=/", 0)} - test_helpers.populate_dir_with_ts(rootd, data) - self.assertTrue(self._call_with_root(rootd=rootd)) - self.assertIn("from current boot", self.logs.getvalue()) - - def test_config_and_log_no_reference(self): - """If the config and log existed, but no reference, assume not.""" - rootd = self.tmp_dir() - test_helpers.populate_dir( - rootd, {self.prov_cfg: "key=value", self.inst_log: "log data\n"}) - self.assertFalse(self._call_with_root(rootd=rootd)) - self.assertIn("no reference file", self.logs.getvalue()) - - -class TestDataSourceIBMCloud(test_helpers.CiTestCase): - - def setUp(self): - super(TestDataSourceIBMCloud, self).setUp() - self.tmp = self.tmp_dir() - self.cloud_dir = self.tmp_path('cloud', dir=self.tmp) - util.ensure_dir(self.cloud_dir) - paths = Paths({'run_dir': self.tmp, 'cloud_dir': self.cloud_dir}) - self.ds = ibm.DataSourceIBMCloud( - sys_cfg={}, distro=None, paths=paths) - - def test_get_data_false(self): - """When read_md returns None, get_data returns False.""" - with mock.patch(D_PATH + 'read_md', return_value=None): - self.assertFalse(self.ds.get_data()) - - def test_get_data_processes_read_md(self): - """get_data processes and caches content returned by read_md.""" - md = { - 'metadata': {}, 'networkdata': 'net', 'platform': 'plat', - 'source': 'src', 'system-uuid': 'uuid', 'userdata': 'ud', - 'vendordata': 'vd'} - with mock.patch(D_PATH + 'read_md', return_value=md): - self.assertTrue(self.ds.get_data()) - self.assertEqual('src', self.ds.source) - self.assertEqual('plat', self.ds.platform) - self.assertEqual({}, self.ds.metadata) - self.assertEqual('ud', self.ds.userdata_raw) - self.assertEqual('net', self.ds.network_json) - self.assertEqual('vd', self.ds.vendordata_pure) - self.assertEqual('uuid', self.ds.system_uuid) - self.assertEqual('ibmcloud', self.ds.cloud_name) - self.assertEqual('ibmcloud', self.ds.platform_type) - self.assertEqual('plat (src)', self.ds.subplatform) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py deleted file mode 100644 index 41b6c27b..00000000 --- a/tests/unittests/test_datasource/test_maas.py +++ /dev/null @@ -1,200 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from copy import copy -import os -import shutil -import tempfile -import yaml -from unittest import mock - -from cloudinit.sources import DataSourceMAAS -from cloudinit import url_helper -from cloudinit.tests.helpers import CiTestCase, populate_dir - - -class TestMAASDataSource(CiTestCase): - - def setUp(self): - super(TestMAASDataSource, self).setUp() - # Make a temp directoy for tests to use. - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - - def test_seed_dir_valid(self): - """Verify a valid seeddir is read as such.""" - - userdata = b'valid01-userdata' - data = {'meta-data/instance-id': 'i-valid01', - 'meta-data/local-hostname': 'valid01-hostname', - 'user-data': userdata, - 'public-keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname'} - - my_d = os.path.join(self.tmp, "valid") - populate_dir(my_d, data) - - ud, md, vd = DataSourceMAAS.read_maas_seed_dir(my_d) - - self.assertEqual(userdata, ud) - for key in ('instance-id', 'local-hostname'): - self.assertEqual(data["meta-data/" + key], md[key]) - - # verify that 'userdata' is not returned as part of the metadata - self.assertFalse(('user-data' in md)) - self.assertIsNone(vd) - - def test_seed_dir_valid_extra(self): - """Verify extra files do not affect seed_dir validity.""" - - userdata = b'valid-extra-userdata' - data = {'meta-data/instance-id': 'i-valid-extra', - 'meta-data/local-hostname': 'valid-extra-hostname', - 'user-data': userdata, 'foo': 'bar'} - - my_d = os.path.join(self.tmp, "valid_extra") - populate_dir(my_d, data) - - ud, md, _vd = DataSourceMAAS.read_maas_seed_dir(my_d) - - self.assertEqual(userdata, ud) - for key in ('instance-id', 'local-hostname'): - self.assertEqual(data['meta-data/' + key], md[key]) - - # additional files should not just appear as keys in metadata atm - self.assertFalse(('foo' in md)) - - def test_seed_dir_invalid(self): - """Verify that invalid seed_dir raises MAASSeedDirMalformed.""" - - valid = {'instance-id': 'i-instanceid', - 'local-hostname': 'test-hostname', 'user-data': ''} - - my_based = os.path.join(self.tmp, "valid_extra") - - # missing 'userdata' file - my_d = "%s-01" % my_based - invalid_data = copy(valid) - del invalid_data['local-hostname'] - populate_dir(my_d, invalid_data) - self.assertRaises(DataSourceMAAS.MAASSeedDirMalformed, - DataSourceMAAS.read_maas_seed_dir, my_d) - - # missing 'instance-id' - my_d = "%s-02" % my_based - invalid_data = copy(valid) - del invalid_data['instance-id'] - populate_dir(my_d, invalid_data) - self.assertRaises(DataSourceMAAS.MAASSeedDirMalformed, - DataSourceMAAS.read_maas_seed_dir, my_d) - - def test_seed_dir_none(self): - """Verify that empty seed_dir raises MAASSeedDirNone.""" - - my_d = os.path.join(self.tmp, "valid_empty") - self.assertRaises(DataSourceMAAS.MAASSeedDirNone, - DataSourceMAAS.read_maas_seed_dir, my_d) - - def test_seed_dir_missing(self): - """Verify that missing seed_dir raises MAASSeedDirNone.""" - self.assertRaises(DataSourceMAAS.MAASSeedDirNone, - DataSourceMAAS.read_maas_seed_dir, - os.path.join(self.tmp, "nonexistantdirectory")) - - def mock_read_maas_seed_url(self, data, seed, version="19991231"): - """mock up readurl to appear as a web server at seed has provided data. - return what read_maas_seed_url returns.""" - def my_readurl(*args, **kwargs): - if len(args): - url = args[0] - else: - url = kwargs['url'] - prefix = "%s/%s/" % (seed, version) - if not url.startswith(prefix): - raise ValueError("unexpected call %s" % url) - - short = url[len(prefix):] - if short not in data: - raise url_helper.UrlError("not found", code=404, url=url) - return url_helper.StringResponse(data[short]) - - # Now do the actual call of the code under test. - with mock.patch("cloudinit.url_helper.readurl") as mock_readurl: - mock_readurl.side_effect = my_readurl - return DataSourceMAAS.read_maas_seed_url(seed, version=version) - - def test_seed_url_valid(self): - """Verify that valid seed_url is read as such.""" - valid = { - 'meta-data/instance-id': 'i-instanceid', - 'meta-data/local-hostname': 'test-hostname', - 'meta-data/public-keys': 'test-hostname', - 'meta-data/vendor-data': b'my-vendordata', - 'user-data': b'foodata', - } - my_seed = "http://example.com/xmeta" - my_ver = "1999-99-99" - ud, md, vd = self.mock_read_maas_seed_url(valid, my_seed, my_ver) - - self.assertEqual(valid['meta-data/instance-id'], md['instance-id']) - self.assertEqual( - valid['meta-data/local-hostname'], md['local-hostname']) - self.assertEqual(valid['meta-data/public-keys'], md['public-keys']) - self.assertEqual(valid['user-data'], ud) - # vendor-data is yaml, which decodes a string - self.assertEqual(valid['meta-data/vendor-data'].decode(), vd) - - def test_seed_url_vendor_data_dict(self): - expected_vd = {'key1': 'value1'} - valid = { - 'meta-data/instance-id': 'i-instanceid', - 'meta-data/local-hostname': 'test-hostname', - 'meta-data/vendor-data': yaml.safe_dump(expected_vd).encode(), - } - _ud, md, vd = self.mock_read_maas_seed_url( - valid, "http://example.com/foo") - - self.assertEqual(valid['meta-data/instance-id'], md['instance-id']) - self.assertEqual(expected_vd, vd) - - -@mock.patch("cloudinit.sources.DataSourceMAAS.url_helper.OauthUrlHelper") -class TestGetOauthHelper(CiTestCase): - base_cfg = {'consumer_key': 'FAKE_CONSUMER_KEY', - 'token_key': 'FAKE_TOKEN_KEY', - 'token_secret': 'FAKE_TOKEN_SECRET', - 'consumer_secret': None} - - def test_all_required(self, m_helper): - """Valid config as expected.""" - DataSourceMAAS.get_oauth_helper(self.base_cfg.copy()) - m_helper.assert_has_calls([mock.call(**self.base_cfg)]) - - def test_other_fields_not_passed_through(self, m_helper): - """Only relevant fields are passed through.""" - mycfg = self.base_cfg.copy() - mycfg['unrelated_field'] = 'unrelated' - DataSourceMAAS.get_oauth_helper(mycfg) - m_helper.assert_has_calls([mock.call(**self.base_cfg)]) - - -class TestGetIdHash(CiTestCase): - v1_cfg = {'consumer_key': 'CKEY', 'token_key': 'TKEY', - 'token_secret': 'TSEC'} - v1_id = ( - 'v1:' - '403ee5f19c956507f1d0e50814119c405902137ea4f8838bde167c5da8110392') - - def test_v1_expected(self): - """Test v1 id generated as expected working behavior from config.""" - result = DataSourceMAAS.get_id_from_ds_cfg(self.v1_cfg.copy()) - self.assertEqual(self.v1_id, result) - - def test_v1_extra_fields_are_ignored(self): - """Test v1 id ignores unused entries in config.""" - cfg = self.v1_cfg.copy() - cfg['consumer_secret'] = "BOO" - cfg['unrelated'] = "HI MOM" - result = DataSourceMAAS.get_id_from_ds_cfg(cfg) - self.assertEqual(self.v1_id, result) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py deleted file mode 100644 index 02cc9b38..00000000 --- a/tests/unittests/test_datasource/test_nocloud.py +++ /dev/null @@ -1,393 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit import dmi -from cloudinit import helpers -from cloudinit.sources.DataSourceNoCloud import ( - DataSourceNoCloud as dsNoCloud, - _maybe_remove_top_network, - parse_cmdline_data) -from cloudinit import util -from cloudinit.tests.helpers import CiTestCase, populate_dir, mock, ExitStack - -import os -import textwrap -import yaml - - -@mock.patch('cloudinit.sources.DataSourceNoCloud.util.is_lxd') -class TestNoCloudDataSource(CiTestCase): - - def setUp(self): - super(TestNoCloudDataSource, self).setUp() - self.tmp = self.tmp_dir() - self.paths = helpers.Paths( - {'cloud_dir': self.tmp, 'run_dir': self.tmp}) - - self.cmdline = "root=TESTCMDLINE" - - self.mocks = ExitStack() - self.addCleanup(self.mocks.close) - - self.mocks.enter_context( - mock.patch.object(util, 'get_cmdline', return_value=self.cmdline)) - self.mocks.enter_context( - mock.patch.object(dmi, 'read_dmi_data', return_value=None)) - - def _test_fs_config_is_read(self, fs_label, fs_label_to_search): - vfat_device = 'device-1' - - def m_mount_cb(device, callback, mtype): - if (device == vfat_device): - return {'meta-data': yaml.dump({'instance-id': 'IID'})} - else: - return {} - - def m_find_devs_with(query='', path=''): - if 'TYPE=vfat' == query: - return [vfat_device] - elif 'LABEL={}'.format(fs_label) == query: - return [vfat_device] - else: - return [] - - self.mocks.enter_context( - mock.patch.object(util, 'find_devs_with', - side_effect=m_find_devs_with)) - self.mocks.enter_context( - mock.patch.object(util, 'mount_cb', - side_effect=m_mount_cb)) - sys_cfg = {'datasource': {'NoCloud': {'fs_label': fs_label_to_search}}} - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - ret = dsrc.get_data() - - self.assertEqual(dsrc.metadata.get('instance-id'), 'IID') - self.assertTrue(ret) - - def test_nocloud_seed_dir_on_lxd(self, m_is_lxd): - md = {'instance-id': 'IID', 'dsmode': 'local'} - ud = b"USER_DATA_HERE" - seed_dir = os.path.join(self.paths.seed_dir, "nocloud") - populate_dir(seed_dir, - {'user-data': ud, 'meta-data': yaml.safe_dump(md)}) - - sys_cfg = { - 'datasource': {'NoCloud': {'fs_label': None}} - } - - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - ret = dsrc.get_data() - self.assertEqual(dsrc.userdata_raw, ud) - self.assertEqual(dsrc.metadata, md) - self.assertEqual(dsrc.platform_type, 'lxd') - self.assertEqual( - dsrc.subplatform, 'seed-dir (%s)' % seed_dir) - self.assertTrue(ret) - - def test_nocloud_seed_dir_non_lxd_platform_is_nocloud(self, m_is_lxd): - """Non-lxd environments will list nocloud as the platform.""" - m_is_lxd.return_value = False - md = {'instance-id': 'IID', 'dsmode': 'local'} - seed_dir = os.path.join(self.paths.seed_dir, "nocloud") - populate_dir(seed_dir, - {'user-data': '', 'meta-data': yaml.safe_dump(md)}) - - sys_cfg = { - 'datasource': {'NoCloud': {'fs_label': None}} - } - - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - self.assertTrue(dsrc.get_data()) - self.assertEqual(dsrc.platform_type, 'nocloud') - self.assertEqual( - dsrc.subplatform, 'seed-dir (%s)' % seed_dir) - - def test_fs_label(self, m_is_lxd): - # find_devs_with should not be called ff fs_label is None - class PsuedoException(Exception): - pass - - self.mocks.enter_context( - mock.patch.object(util, 'find_devs_with', - side_effect=PsuedoException)) - - # by default, NoCloud should search for filesystems by label - sys_cfg = {'datasource': {'NoCloud': {}}} - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - self.assertRaises(PsuedoException, dsrc.get_data) - - # but disabling searching should just end up with None found - sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - ret = dsrc.get_data() - self.assertFalse(ret) - - def test_fs_config_lowercase_label(self, m_is_lxd): - self._test_fs_config_is_read('cidata', 'cidata') - - def test_fs_config_uppercase_label(self, m_is_lxd): - self._test_fs_config_is_read('CIDATA', 'cidata') - - def test_fs_config_lowercase_label_search_uppercase(self, m_is_lxd): - self._test_fs_config_is_read('cidata', 'CIDATA') - - def test_fs_config_uppercase_label_search_uppercase(self, m_is_lxd): - self._test_fs_config_is_read('CIDATA', 'CIDATA') - - def test_no_datasource_expected(self, m_is_lxd): - # no source should be found if no cmdline, config, and fs_label=None - sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - self.assertFalse(dsrc.get_data()) - - def test_seed_in_config(self, m_is_lxd): - data = { - 'fs_label': None, - 'meta-data': yaml.safe_dump({'instance-id': 'IID'}), - 'user-data': b"USER_DATA_RAW", - } - - sys_cfg = {'datasource': {'NoCloud': data}} - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - ret = dsrc.get_data() - self.assertEqual(dsrc.userdata_raw, b"USER_DATA_RAW") - self.assertEqual(dsrc.metadata.get('instance-id'), 'IID') - self.assertTrue(ret) - - def test_nocloud_seed_with_vendordata(self, m_is_lxd): - md = {'instance-id': 'IID', 'dsmode': 'local'} - ud = b"USER_DATA_HERE" - vd = b"THIS IS MY VENDOR_DATA" - - populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), - {'user-data': ud, 'meta-data': yaml.safe_dump(md), - 'vendor-data': vd}) - - sys_cfg = { - 'datasource': {'NoCloud': {'fs_label': None}} - } - - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - ret = dsrc.get_data() - self.assertEqual(dsrc.userdata_raw, ud) - self.assertEqual(dsrc.metadata, md) - self.assertEqual(dsrc.vendordata_raw, vd) - self.assertTrue(ret) - - def test_nocloud_no_vendordata(self, m_is_lxd): - populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), - {'user-data': b"ud", 'meta-data': "instance-id: IID\n"}) - - sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - ret = dsrc.get_data() - self.assertEqual(dsrc.userdata_raw, b"ud") - self.assertFalse(dsrc.vendordata) - self.assertTrue(ret) - - def test_metadata_network_interfaces(self, m_is_lxd): - gateway = "103.225.10.1" - md = { - 'instance-id': 'i-abcd', - 'local-hostname': 'hostname1', - 'network-interfaces': textwrap.dedent("""\ - auto eth0 - iface eth0 inet static - hwaddr 00:16:3e:70:e1:04 - address 103.225.10.12 - netmask 255.255.255.0 - gateway """ + gateway + """ - dns-servers 8.8.8.8""")} - - populate_dir( - os.path.join(self.paths.seed_dir, "nocloud"), - {'user-data': b"ud", - 'meta-data': yaml.dump(md) + "\n"}) - - sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - ret = dsrc.get_data() - self.assertTrue(ret) - # very simple check just for the strings above - self.assertIn(gateway, str(dsrc.network_config)) - - def test_metadata_network_config(self, m_is_lxd): - # network-config needs to get into network_config - netconf = {'version': 1, - 'config': [{'type': 'physical', 'name': 'interface0', - 'subnets': [{'type': 'dhcp'}]}]} - populate_dir( - os.path.join(self.paths.seed_dir, "nocloud"), - {'user-data': b"ud", - 'meta-data': "instance-id: IID\n", - 'network-config': yaml.dump(netconf) + "\n"}) - - sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(netconf, dsrc.network_config) - - def test_metadata_network_config_with_toplevel_network(self, m_is_lxd): - """network-config may have 'network' top level key.""" - netconf = {'config': 'disabled'} - populate_dir( - os.path.join(self.paths.seed_dir, "nocloud"), - {'user-data': b"ud", - 'meta-data': "instance-id: IID\n", - 'network-config': yaml.dump({'network': netconf}) + "\n"}) - - sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(netconf, dsrc.network_config) - - def test_metadata_network_config_over_interfaces(self, m_is_lxd): - # network-config should override meta-data/network-interfaces - gateway = "103.225.10.1" - md = { - 'instance-id': 'i-abcd', - 'local-hostname': 'hostname1', - 'network-interfaces': textwrap.dedent("""\ - auto eth0 - iface eth0 inet static - hwaddr 00:16:3e:70:e1:04 - address 103.225.10.12 - netmask 255.255.255.0 - gateway """ + gateway + """ - dns-servers 8.8.8.8""")} - - netconf = {'version': 1, - 'config': [{'type': 'physical', 'name': 'interface0', - 'subnets': [{'type': 'dhcp'}]}]} - populate_dir( - os.path.join(self.paths.seed_dir, "nocloud"), - {'user-data': b"ud", - 'meta-data': yaml.dump(md) + "\n", - 'network-config': yaml.dump(netconf) + "\n"}) - - sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(netconf, dsrc.network_config) - self.assertNotIn(gateway, str(dsrc.network_config)) - - @mock.patch("cloudinit.util.blkid") - def test_nocloud_get_devices_freebsd(self, m_is_lxd, fake_blkid): - populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), - {'user-data': b"ud", 'meta-data': "instance-id: IID\n"}) - - sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - - self.mocks.enter_context( - mock.patch.object(util, 'is_FreeBSD', return_value=True)) - - def _mfind_devs_with_freebsd( - criteria=None, oformat='device', - tag=None, no_cache=False, path=None): - if not criteria: - return ["/dev/msdosfs/foo", "/dev/iso9660/foo"] - if criteria.startswith("LABEL="): - return ["/dev/msdosfs/foo", "/dev/iso9660/foo"] - elif criteria == "TYPE=vfat": - return ["/dev/msdosfs/foo"] - elif criteria == "TYPE=iso9660": - return ["/dev/iso9660/foo"] - return [] - - self.mocks.enter_context( - mock.patch.object( - util, 'find_devs_with_freebsd', - side_effect=_mfind_devs_with_freebsd)) - - dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) - ret = dsrc._get_devices('foo') - self.assertEqual(['/dev/msdosfs/foo', '/dev/iso9660/foo'], ret) - fake_blkid.assert_not_called() - - -class TestParseCommandLineData(CiTestCase): - - def test_parse_cmdline_data_valid(self): - ds_id = "ds=nocloud" - pairs = ( - ("root=/dev/sda1 %(ds_id)s", {}), - ("%(ds_id)s; root=/dev/foo", {}), - ("%(ds_id)s", {}), - ("%(ds_id)s;", {}), - ("%(ds_id)s;s=SEED", {'seedfrom': 'SEED'}), - ("%(ds_id)s;seedfrom=SEED;local-hostname=xhost", - {'seedfrom': 'SEED', 'local-hostname': 'xhost'}), - ("%(ds_id)s;h=xhost", - {'local-hostname': 'xhost'}), - ("%(ds_id)s;h=xhost;i=IID", - {'local-hostname': 'xhost', 'instance-id': 'IID'}), - ) - - for (fmt, expected) in pairs: - fill = {} - cmdline = fmt % {'ds_id': ds_id} - ret = parse_cmdline_data(ds_id=ds_id, fill=fill, cmdline=cmdline) - self.assertEqual(expected, fill) - self.assertTrue(ret) - - def test_parse_cmdline_data_none(self): - ds_id = "ds=foo" - cmdlines = ( - "root=/dev/sda1 ro", - "console=/dev/ttyS0 root=/dev/foo", - "", - "ds=foocloud", - "ds=foo-net", - "ds=nocloud;s=SEED", - ) - - for cmdline in cmdlines: - fill = {} - ret = parse_cmdline_data(ds_id=ds_id, fill=fill, cmdline=cmdline) - self.assertEqual(fill, {}) - self.assertFalse(ret) - - -class TestMaybeRemoveToplevelNetwork(CiTestCase): - """test _maybe_remove_top_network function.""" - basecfg = [{'type': 'physical', 'name': 'interface0', - 'subnets': [{'type': 'dhcp'}]}] - - def test_should_remove_safely(self): - mcfg = {'config': self.basecfg, 'version': 1} - self.assertEqual(mcfg, _maybe_remove_top_network({'network': mcfg})) - - def test_no_remove_if_other_keys(self): - """should not shift if other keys at top level.""" - mcfg = {'network': {'config': self.basecfg, 'version': 1}, - 'unknown_keyname': 'keyval'} - self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) - - def test_no_remove_if_non_dict(self): - """should not shift if not a dict.""" - mcfg = {'network': '"content here'} - self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) - - def test_no_remove_if_missing_config_or_version(self): - """should not shift unless network entry has config and version.""" - mcfg = {'network': {'config': self.basecfg}} - self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) - - mcfg = {'network': {'version': 1}} - self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) - - def test_remove_with_config_disabled(self): - """network/config=disabled should be shifted.""" - mcfg = {'config': 'disabled'} - self.assertEqual(mcfg, _maybe_remove_top_network({'network': mcfg})) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py deleted file mode 100644 index 283b65c2..00000000 --- a/tests/unittests/test_datasource/test_opennebula.py +++ /dev/null @@ -1,977 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit import helpers -from cloudinit.sources import DataSourceOpenNebula as ds -from cloudinit import util -from cloudinit.tests.helpers import mock, populate_dir, CiTestCase - -import os -import pwd -import unittest - -import pytest - - -TEST_VARS = { - 'VAR1': 'single', - 'VAR2': 'double word', - 'VAR3': 'multi\nline\n', - 'VAR4': "'single'", - 'VAR5': "'double word'", - 'VAR6': "'multi\nline\n'", - 'VAR7': 'single\\t', - 'VAR8': 'double\\tword', - 'VAR9': 'multi\\t\nline\n', - 'VAR10': '\\', # expect '\' - 'VAR11': '\'', # expect ' - 'VAR12': '$', # expect $ -} - -INVALID_CONTEXT = ';' -USER_DATA = '#cloud-config\napt_upgrade: true' -SSH_KEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460-%i' -HOSTNAME = 'foo.example.com' -PUBLIC_IP = '10.0.0.3' -MACADDR = '02:00:0a:12:01:01' -IP_BY_MACADDR = '10.18.1.1' -IP4_PREFIX = '24' -IP6_GLOBAL = '2001:db8:1:0:400:c0ff:fea8:1ba' -IP6_ULA = 'fd01:dead:beaf:0:400:c0ff:fea8:1ba' -IP6_GW = '2001:db8:1::ffff' -IP6_PREFIX = '48' - -DS_PATH = "cloudinit.sources.DataSourceOpenNebula" - - -class TestOpenNebulaDataSource(CiTestCase): - parsed_user = None - allowed_subp = ['bash'] - - def setUp(self): - super(TestOpenNebulaDataSource, self).setUp() - self.tmp = self.tmp_dir() - self.paths = helpers.Paths( - {'cloud_dir': self.tmp, 'run_dir': self.tmp}) - - # defaults for few tests - self.ds = ds.DataSourceOpenNebula - self.seed_dir = os.path.join(self.paths.seed_dir, "opennebula") - self.sys_cfg = {'datasource': {'OpenNebula': {'dsmode': 'local'}}} - - # we don't want 'sudo' called in tests. so we patch switch_user_cmd - def my_switch_user_cmd(user): - self.parsed_user = user - return [] - - self.switch_user_cmd_real = ds.switch_user_cmd - ds.switch_user_cmd = my_switch_user_cmd - - def tearDown(self): - ds.switch_user_cmd = self.switch_user_cmd_real - super(TestOpenNebulaDataSource, self).tearDown() - - def test_get_data_non_contextdisk(self): - orig_find_devs_with = util.find_devs_with - try: - # dont' try to lookup for CDs - util.find_devs_with = lambda n: [] - dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths) - ret = dsrc.get_data() - self.assertFalse(ret) - finally: - util.find_devs_with = orig_find_devs_with - - def test_get_data_broken_contextdisk(self): - orig_find_devs_with = util.find_devs_with - try: - # dont' try to lookup for CDs - util.find_devs_with = lambda n: [] - populate_dir(self.seed_dir, {'context.sh': INVALID_CONTEXT}) - dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths) - self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data) - finally: - util.find_devs_with = orig_find_devs_with - - def test_get_data_invalid_identity(self): - orig_find_devs_with = util.find_devs_with - try: - # generate non-existing system user name - sys_cfg = self.sys_cfg - invalid_user = 'invalid' - while not sys_cfg['datasource']['OpenNebula'].get('parseuser'): - try: - pwd.getpwnam(invalid_user) - invalid_user += 'X' - except KeyError: - sys_cfg['datasource']['OpenNebula']['parseuser'] = \ - invalid_user - - # dont' try to lookup for CDs - util.find_devs_with = lambda n: [] - populate_context_dir(self.seed_dir, {'KEY1': 'val1'}) - dsrc = self.ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) - self.assertRaises(ds.BrokenContextDiskDir, dsrc.get_data) - finally: - util.find_devs_with = orig_find_devs_with - - def test_get_data(self): - orig_find_devs_with = util.find_devs_with - try: - # dont' try to lookup for CDs - util.find_devs_with = lambda n: [] - populate_context_dir(self.seed_dir, {'KEY1': 'val1'}) - dsrc = self.ds(sys_cfg=self.sys_cfg, distro=None, paths=self.paths) - ret = dsrc.get_data() - self.assertTrue(ret) - finally: - util.find_devs_with = orig_find_devs_with - self.assertEqual('opennebula', dsrc.cloud_name) - self.assertEqual('opennebula', dsrc.platform_type) - self.assertEqual( - 'seed-dir (%s/seed/opennebula)' % self.tmp, dsrc.subplatform) - - def test_seed_dir_non_contextdisk(self): - self.assertRaises(ds.NonContextDiskDir, ds.read_context_disk_dir, - self.seed_dir, mock.Mock()) - - def test_seed_dir_empty1_context(self): - populate_dir(self.seed_dir, {'context.sh': ''}) - results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - - self.assertIsNone(results['userdata']) - self.assertEqual(results['metadata'], {}) - - def test_seed_dir_empty2_context(self): - populate_context_dir(self.seed_dir, {}) - results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - - self.assertIsNone(results['userdata']) - self.assertEqual(results['metadata'], {}) - - def test_seed_dir_broken_context(self): - populate_dir(self.seed_dir, {'context.sh': INVALID_CONTEXT}) - - self.assertRaises(ds.BrokenContextDiskDir, - ds.read_context_disk_dir, - self.seed_dir, mock.Mock()) - - def test_context_parser(self): - populate_context_dir(self.seed_dir, TEST_VARS) - results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - - self.assertTrue('metadata' in results) - self.assertEqual(TEST_VARS, results['metadata']) - - def test_ssh_key(self): - public_keys = ['first key', 'second key'] - for c in range(4): - for k in ('SSH_KEY', 'SSH_PUBLIC_KEY'): - my_d = os.path.join(self.tmp, "%s-%i" % (k, c)) - populate_context_dir(my_d, {k: '\n'.join(public_keys)}) - results = ds.read_context_disk_dir(my_d, mock.Mock()) - - self.assertTrue('metadata' in results) - self.assertTrue('public-keys' in results['metadata']) - self.assertEqual(public_keys, - results['metadata']['public-keys']) - - public_keys.append(SSH_KEY % (c + 1,)) - - def test_user_data_plain(self): - for k in ('USER_DATA', 'USERDATA'): - my_d = os.path.join(self.tmp, k) - populate_context_dir(my_d, {k: USER_DATA, - 'USERDATA_ENCODING': ''}) - results = ds.read_context_disk_dir(my_d, mock.Mock()) - - self.assertTrue('userdata' in results) - self.assertEqual(USER_DATA, results['userdata']) - - def test_user_data_encoding_required_for_decode(self): - b64userdata = util.b64e(USER_DATA) - for k in ('USER_DATA', 'USERDATA'): - my_d = os.path.join(self.tmp, k) - populate_context_dir(my_d, {k: b64userdata}) - results = ds.read_context_disk_dir(my_d, mock.Mock()) - - self.assertTrue('userdata' in results) - self.assertEqual(b64userdata, results['userdata']) - - def test_user_data_base64_encoding(self): - for k in ('USER_DATA', 'USERDATA'): - my_d = os.path.join(self.tmp, k) - populate_context_dir(my_d, {k: util.b64e(USER_DATA), - 'USERDATA_ENCODING': 'base64'}) - results = ds.read_context_disk_dir(my_d, mock.Mock()) - - self.assertTrue('userdata' in results) - self.assertEqual(USER_DATA, results['userdata']) - - @mock.patch(DS_PATH + ".get_physical_nics_by_mac") - def test_hostname(self, m_get_phys_by_mac): - for dev in ('eth0', 'ens3'): - m_get_phys_by_mac.return_value = {MACADDR: dev} - for k in ('SET_HOSTNAME', 'HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', - 'ETH0_IP'): - my_d = os.path.join(self.tmp, k) - populate_context_dir(my_d, {k: PUBLIC_IP}) - results = ds.read_context_disk_dir(my_d, mock.Mock()) - - self.assertTrue('metadata' in results) - self.assertTrue('local-hostname' in results['metadata']) - self.assertEqual( - PUBLIC_IP, results['metadata']['local-hostname']) - - @mock.patch(DS_PATH + ".get_physical_nics_by_mac") - def test_network_interfaces(self, m_get_phys_by_mac): - for dev in ('eth0', 'ens3'): - m_get_phys_by_mac.return_value = {MACADDR: dev} - - # without ETH0_MAC - # for Older OpenNebula? - populate_context_dir(self.seed_dir, {'ETH0_IP': IP_BY_MACADDR}) - results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - - self.assertTrue('network-interfaces' in results) - self.assertTrue( - IP_BY_MACADDR + '/' + IP4_PREFIX in - results['network-interfaces']['ethernets'][dev]['addresses']) - - # ETH0_IP and ETH0_MAC - populate_context_dir( - self.seed_dir, {'ETH0_IP': IP_BY_MACADDR, 'ETH0_MAC': MACADDR}) - results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - - self.assertTrue('network-interfaces' in results) - self.assertTrue( - IP_BY_MACADDR + '/' + IP4_PREFIX in - results['network-interfaces']['ethernets'][dev]['addresses']) - - # ETH0_IP with empty string and ETH0_MAC - # in the case of using Virtual Network contains - # "AR = [ TYPE = ETHER ]" - populate_context_dir( - self.seed_dir, {'ETH0_IP': '', 'ETH0_MAC': MACADDR}) - results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - - self.assertTrue('network-interfaces' in results) - self.assertTrue( - IP_BY_MACADDR + '/' + IP4_PREFIX in - results['network-interfaces']['ethernets'][dev]['addresses']) - - # ETH0_MASK - populate_context_dir( - self.seed_dir, { - 'ETH0_IP': IP_BY_MACADDR, - 'ETH0_MAC': MACADDR, - 'ETH0_MASK': '255.255.0.0' - }) - results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - - self.assertTrue('network-interfaces' in results) - self.assertTrue( - IP_BY_MACADDR + '/16' in - results['network-interfaces']['ethernets'][dev]['addresses']) - - # ETH0_MASK with empty string - populate_context_dir( - self.seed_dir, { - 'ETH0_IP': IP_BY_MACADDR, - 'ETH0_MAC': MACADDR, - 'ETH0_MASK': '' - }) - results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - - self.assertTrue('network-interfaces' in results) - self.assertTrue( - IP_BY_MACADDR + '/' + IP4_PREFIX in - results['network-interfaces']['ethernets'][dev]['addresses']) - - # ETH0_IP6 - populate_context_dir( - self.seed_dir, { - 'ETH0_IP6': IP6_GLOBAL, - 'ETH0_MAC': MACADDR, - }) - results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - - self.assertTrue('network-interfaces' in results) - self.assertTrue( - IP6_GLOBAL + '/64' in - results['network-interfaces']['ethernets'][dev]['addresses']) - - # ETH0_IP6_ULA - populate_context_dir( - self.seed_dir, { - 'ETH0_IP6_ULA': IP6_ULA, - 'ETH0_MAC': MACADDR, - }) - results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - - self.assertTrue('network-interfaces' in results) - self.assertTrue( - IP6_ULA + '/64' in - results['network-interfaces']['ethernets'][dev]['addresses']) - - # ETH0_IP6 and ETH0_IP6_PREFIX_LENGTH - populate_context_dir( - self.seed_dir, { - 'ETH0_IP6': IP6_GLOBAL, - 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX, - 'ETH0_MAC': MACADDR, - }) - results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - - self.assertTrue('network-interfaces' in results) - self.assertTrue( - IP6_GLOBAL + '/' + IP6_PREFIX in - results['network-interfaces']['ethernets'][dev]['addresses']) - - # ETH0_IP6 and ETH0_IP6_PREFIX_LENGTH with empty string - populate_context_dir( - self.seed_dir, { - 'ETH0_IP6': IP6_GLOBAL, - 'ETH0_IP6_PREFIX_LENGTH': '', - 'ETH0_MAC': MACADDR, - }) - results = ds.read_context_disk_dir(self.seed_dir, mock.Mock()) - - self.assertTrue('network-interfaces' in results) - self.assertTrue( - IP6_GLOBAL + '/64' in - results['network-interfaces']['ethernets'][dev]['addresses']) - - def test_find_candidates(self): - def my_devs_with(criteria): - return { - "LABEL=CONTEXT": ["/dev/sdb"], - "LABEL=CDROM": ["/dev/sr0"], - "TYPE=iso9660": ["/dev/vdb"], - }.get(criteria, []) - - orig_find_devs_with = util.find_devs_with - try: - util.find_devs_with = my_devs_with - self.assertEqual(["/dev/sdb", "/dev/sr0", "/dev/vdb"], - ds.find_candidate_devs()) - finally: - util.find_devs_with = orig_find_devs_with - - -@mock.patch(DS_PATH + '.net.get_interfaces_by_mac', mock.Mock(return_value={})) -class TestOpenNebulaNetwork(unittest.TestCase): - - system_nics = ('eth0', 'ens3') - - def test_context_devname(self): - """Verify context_devname correctly returns mac and name.""" - context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH1_MAC': '02:00:0a:12:0f:0f', } - expected = { - '02:00:0a:12:01:01': 'ETH0', - '02:00:0a:12:0f:0f': 'ETH1', } - net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(expected, net.context_devname) - - def test_get_nameservers(self): - """ - Verify get_nameservers('device') correctly returns DNS server addresses - and search domains. - """ - context = { - 'DNS': '1.2.3.8', - 'ETH0_DNS': '1.2.3.6 1.2.3.7', - 'ETH0_SEARCH_DOMAIN': 'example.com example.org', } - expected = { - 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], - 'search': ['example.com', 'example.org']} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_nameservers('eth0') - self.assertEqual(expected, val) - - def test_get_mtu(self): - """Verify get_mtu('device') correctly returns MTU size.""" - context = {'ETH0_MTU': '1280'} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_mtu('eth0') - self.assertEqual('1280', val) - - def test_get_ip(self): - """Verify get_ip('device') correctly returns IPv4 address.""" - context = {'ETH0_IP': PUBLIC_IP} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_ip('eth0', MACADDR) - self.assertEqual(PUBLIC_IP, val) - - def test_get_ip_emptystring(self): - """ - Verify get_ip('device') correctly returns IPv4 address. - It returns IP address created by MAC address if ETH0_IP has empty - string. - """ - context = {'ETH0_IP': ''} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_ip('eth0', MACADDR) - self.assertEqual(IP_BY_MACADDR, val) - - def test_get_ip6(self): - """ - Verify get_ip6('device') correctly returns IPv6 address. - In this case, IPv6 address is Given by ETH0_IP6. - """ - context = { - 'ETH0_IP6': IP6_GLOBAL, - 'ETH0_IP6_ULA': '', } - expected = [IP6_GLOBAL] - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_ip6('eth0') - self.assertEqual(expected, val) - - def test_get_ip6_ula(self): - """ - Verify get_ip6('device') correctly returns IPv6 address. - In this case, IPv6 address is Given by ETH0_IP6_ULA. - """ - context = { - 'ETH0_IP6': '', - 'ETH0_IP6_ULA': IP6_ULA, } - expected = [IP6_ULA] - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_ip6('eth0') - self.assertEqual(expected, val) - - def test_get_ip6_dual(self): - """ - Verify get_ip6('device') correctly returns IPv6 address. - In this case, IPv6 addresses are Given by ETH0_IP6 and ETH0_IP6_ULA. - """ - context = { - 'ETH0_IP6': IP6_GLOBAL, - 'ETH0_IP6_ULA': IP6_ULA, } - expected = [IP6_GLOBAL, IP6_ULA] - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_ip6('eth0') - self.assertEqual(expected, val) - - def test_get_ip6_prefix(self): - """ - Verify get_ip6_prefix('device') correctly returns IPv6 prefix. - """ - context = {'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_ip6_prefix('eth0') - self.assertEqual(IP6_PREFIX, val) - - def test_get_ip6_prefix_emptystring(self): - """ - Verify get_ip6_prefix('device') correctly returns IPv6 prefix. - It returns default value '64' if ETH0_IP6_PREFIX_LENGTH has empty - string. - """ - context = {'ETH0_IP6_PREFIX_LENGTH': ''} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_ip6_prefix('eth0') - self.assertEqual('64', val) - - def test_get_gateway(self): - """ - Verify get_gateway('device') correctly returns IPv4 default gateway - address. - """ - context = {'ETH0_GATEWAY': '1.2.3.5'} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_gateway('eth0') - self.assertEqual('1.2.3.5', val) - - def test_get_gateway6(self): - """ - Verify get_gateway6('device') correctly returns IPv6 default gateway - address. - """ - for k in ('GATEWAY6', 'IP6_GATEWAY'): - context = {'ETH0_' + k: IP6_GW} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_gateway6('eth0') - self.assertEqual(IP6_GW, val) - - def test_get_mask(self): - """ - Verify get_mask('device') correctly returns IPv4 subnet mask. - """ - context = {'ETH0_MASK': '255.255.0.0'} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_mask('eth0') - self.assertEqual('255.255.0.0', val) - - def test_get_mask_emptystring(self): - """ - Verify get_mask('device') correctly returns IPv4 subnet mask. - It returns default value '255.255.255.0' if ETH0_MASK has empty string. - """ - context = {'ETH0_MASK': ''} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_mask('eth0') - self.assertEqual('255.255.255.0', val) - - def test_get_network(self): - """ - Verify get_network('device') correctly returns IPv4 network address. - """ - context = {'ETH0_NETWORK': '1.2.3.0'} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_network('eth0', MACADDR) - self.assertEqual('1.2.3.0', val) - - def test_get_network_emptystring(self): - """ - Verify get_network('device') correctly returns IPv4 network address. - It returns network address created by MAC address if ETH0_NETWORK has - empty string. - """ - context = {'ETH0_NETWORK': ''} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_network('eth0', MACADDR) - self.assertEqual('10.18.1.0', val) - - def test_get_field(self): - """ - Verify get_field('device', 'name') returns *context* value. - """ - context = {'ETH9_DUMMY': 'DUMMY_VALUE'} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_field('eth9', 'dummy') - self.assertEqual('DUMMY_VALUE', val) - - def test_get_field_withdefaultvalue(self): - """ - Verify get_field('device', 'name', 'default value') returns *context* - value. - """ - context = {'ETH9_DUMMY': 'DUMMY_VALUE'} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE') - self.assertEqual('DUMMY_VALUE', val) - - def test_get_field_withdefaultvalue_emptycontext(self): - """ - Verify get_field('device', 'name', 'default value') returns *default* - value if context value is empty string. - """ - context = {'ETH9_DUMMY': ''} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_field('eth9', 'dummy', 'DEFAULT_VALUE') - self.assertEqual('DEFAULT_VALUE', val) - - def test_get_field_emptycontext(self): - """ - Verify get_field('device', 'name') returns None if context value is - empty string. - """ - context = {'ETH9_DUMMY': ''} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_field('eth9', 'dummy') - self.assertEqual(None, val) - - def test_get_field_nonecontext(self): - """ - Verify get_field('device', 'name') returns None if context value is - None. - """ - context = {'ETH9_DUMMY': None} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - val = net.get_field('eth9', 'dummy') - self.assertEqual(None, val) - - @mock.patch(DS_PATH + ".get_physical_nics_by_mac") - def test_gen_conf_gateway(self, m_get_phys_by_mac): - """Test rendering with/without IPv4 gateway""" - self.maxDiff = None - # empty ETH0_GATEWAY - context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_GATEWAY': '', } - for nic in self.system_nics: - expected = { - 'version': 2, - 'ethernets': { - nic: { - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} - m_get_phys_by_mac.return_value = {MACADDR: nic} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) - - # set ETH0_GATEWAY - context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_GATEWAY': '1.2.3.5', } - for nic in self.system_nics: - expected = { - 'version': 2, - 'ethernets': { - nic: { - 'gateway4': '1.2.3.5', - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} - m_get_phys_by_mac.return_value = {MACADDR: nic} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) - - @mock.patch(DS_PATH + ".get_physical_nics_by_mac") - def test_gen_conf_gateway6(self, m_get_phys_by_mac): - """Test rendering with/without IPv6 gateway""" - self.maxDiff = None - # empty ETH0_GATEWAY6 - context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_GATEWAY6': '', } - for nic in self.system_nics: - expected = { - 'version': 2, - 'ethernets': { - nic: { - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} - m_get_phys_by_mac.return_value = {MACADDR: nic} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) - - # set ETH0_GATEWAY6 - context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_GATEWAY6': IP6_GW, } - for nic in self.system_nics: - expected = { - 'version': 2, - 'ethernets': { - nic: { - 'gateway6': IP6_GW, - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} - m_get_phys_by_mac.return_value = {MACADDR: nic} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) - - @mock.patch(DS_PATH + ".get_physical_nics_by_mac") - def test_gen_conf_ipv6address(self, m_get_phys_by_mac): - """Test rendering with/without IPv6 address""" - self.maxDiff = None - # empty ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH - context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_IP6': '', - 'ETH0_IP6_ULA': '', - 'ETH0_IP6_PREFIX_LENGTH': '', } - for nic in self.system_nics: - expected = { - 'version': 2, - 'ethernets': { - nic: { - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} - m_get_phys_by_mac.return_value = {MACADDR: nic} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) - - # set ETH0_IP6, ETH0_IP6_ULA, ETH0_IP6_PREFIX_LENGTH - context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_IP6': IP6_GLOBAL, - 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX, - 'ETH0_IP6_ULA': IP6_ULA, } - for nic in self.system_nics: - expected = { - 'version': 2, - 'ethernets': { - nic: { - 'match': {'macaddress': MACADDR}, - 'addresses': [ - IP_BY_MACADDR + '/' + IP4_PREFIX, - IP6_GLOBAL + '/' + IP6_PREFIX, - IP6_ULA + '/' + IP6_PREFIX]}}} - m_get_phys_by_mac.return_value = {MACADDR: nic} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) - - @mock.patch(DS_PATH + ".get_physical_nics_by_mac") - def test_gen_conf_dns(self, m_get_phys_by_mac): - """Test rendering with/without DNS server, search domain""" - self.maxDiff = None - # empty DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN - context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'DNS': '', - 'ETH0_DNS': '', - 'ETH0_SEARCH_DOMAIN': '', } - for nic in self.system_nics: - expected = { - 'version': 2, - 'ethernets': { - nic: { - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} - m_get_phys_by_mac.return_value = {MACADDR: nic} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) - - # set DNS, ETH0_DNS, ETH0_SEARCH_DOMAIN - context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'DNS': '1.2.3.8', - 'ETH0_DNS': '1.2.3.6 1.2.3.7', - 'ETH0_SEARCH_DOMAIN': 'example.com example.org', } - for nic in self.system_nics: - expected = { - 'version': 2, - 'ethernets': { - nic: { - 'nameservers': { - 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], - 'search': ['example.com', 'example.org']}, - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} - m_get_phys_by_mac.return_value = {MACADDR: nic} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) - - @mock.patch(DS_PATH + ".get_physical_nics_by_mac") - def test_gen_conf_mtu(self, m_get_phys_by_mac): - """Test rendering with/without MTU""" - self.maxDiff = None - # empty ETH0_MTU - context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_MTU': '', } - for nic in self.system_nics: - expected = { - 'version': 2, - 'ethernets': { - nic: { - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} - m_get_phys_by_mac.return_value = {MACADDR: nic} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) - - # set ETH0_MTU - context = { - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_MTU': '1280', } - for nic in self.system_nics: - expected = { - 'version': 2, - 'ethernets': { - nic: { - 'mtu': '1280', - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} - m_get_phys_by_mac.return_value = {MACADDR: nic} - net = ds.OpenNebulaNetwork(context, mock.Mock()) - self.assertEqual(net.gen_conf(), expected) - - @mock.patch(DS_PATH + ".get_physical_nics_by_mac") - def test_eth0(self, m_get_phys_by_mac): - for nic in self.system_nics: - m_get_phys_by_mac.return_value = {MACADDR: nic} - net = ds.OpenNebulaNetwork({}, mock.Mock()) - expected = { - 'version': 2, - 'ethernets': { - nic: { - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} - - self.assertEqual(net.gen_conf(), expected) - - @mock.patch(DS_PATH + ".get_physical_nics_by_mac") - def test_distro_passed_through(self, m_get_physical_nics_by_mac): - ds.OpenNebulaNetwork({}, mock.sentinel.distro) - self.assertEqual( - [mock.call(mock.sentinel.distro)], - m_get_physical_nics_by_mac.call_args_list, - ) - - def test_eth0_override(self): - self.maxDiff = None - context = { - 'DNS': '1.2.3.8', - 'ETH0_DNS': '1.2.3.6 1.2.3.7', - 'ETH0_GATEWAY': '1.2.3.5', - 'ETH0_GATEWAY6': '', - 'ETH0_IP': IP_BY_MACADDR, - 'ETH0_IP6': '', - 'ETH0_IP6_PREFIX_LENGTH': '', - 'ETH0_IP6_ULA': '', - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_MASK': '255.255.0.0', - 'ETH0_MTU': '', - 'ETH0_NETWORK': '10.18.0.0', - 'ETH0_SEARCH_DOMAIN': '', - } - for nic in self.system_nics: - net = ds.OpenNebulaNetwork(context, mock.Mock(), - system_nics_by_mac={MACADDR: nic}) - expected = { - 'version': 2, - 'ethernets': { - nic: { - 'match': {'macaddress': MACADDR}, - 'addresses': [IP_BY_MACADDR + '/16'], - 'gateway4': '1.2.3.5', - 'nameservers': { - 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8']}}}} - - self.assertEqual(expected, net.gen_conf()) - - def test_eth0_v4v6_override(self): - self.maxDiff = None - context = { - 'DNS': '1.2.3.8', - 'ETH0_DNS': '1.2.3.6 1.2.3.7', - 'ETH0_GATEWAY': '1.2.3.5', - 'ETH0_GATEWAY6': IP6_GW, - 'ETH0_IP': IP_BY_MACADDR, - 'ETH0_IP6': IP6_GLOBAL, - 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX, - 'ETH0_IP6_ULA': IP6_ULA, - 'ETH0_MAC': '02:00:0a:12:01:01', - 'ETH0_MASK': '255.255.0.0', - 'ETH0_MTU': '1280', - 'ETH0_NETWORK': '10.18.0.0', - 'ETH0_SEARCH_DOMAIN': 'example.com example.org', - } - for nic in self.system_nics: - net = ds.OpenNebulaNetwork(context, mock.Mock(), - system_nics_by_mac={MACADDR: nic}) - - expected = { - 'version': 2, - 'ethernets': { - nic: { - 'match': {'macaddress': MACADDR}, - 'addresses': [ - IP_BY_MACADDR + '/16', - IP6_GLOBAL + '/' + IP6_PREFIX, - IP6_ULA + '/' + IP6_PREFIX], - 'gateway4': '1.2.3.5', - 'gateway6': IP6_GW, - 'nameservers': { - 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], - 'search': ['example.com', 'example.org']}, - 'mtu': '1280'}}} - - self.assertEqual(expected, net.gen_conf()) - - def test_multiple_nics(self): - """Test rendering multiple nics with names that differ from context.""" - self.maxDiff = None - MAC_1 = "02:00:0a:12:01:01" - MAC_2 = "02:00:0a:12:01:02" - context = { - 'DNS': '1.2.3.8', - 'ETH0_DNS': '1.2.3.6 1.2.3.7', - 'ETH0_GATEWAY': '1.2.3.5', - 'ETH0_GATEWAY6': IP6_GW, - 'ETH0_IP': '10.18.1.1', - 'ETH0_IP6': IP6_GLOBAL, - 'ETH0_IP6_PREFIX_LENGTH': '', - 'ETH0_IP6_ULA': IP6_ULA, - 'ETH0_MAC': MAC_2, - 'ETH0_MASK': '255.255.0.0', - 'ETH0_MTU': '1280', - 'ETH0_NETWORK': '10.18.0.0', - 'ETH0_SEARCH_DOMAIN': 'example.com', - 'ETH3_DNS': '10.3.1.2', - 'ETH3_GATEWAY': '10.3.0.1', - 'ETH3_GATEWAY6': '', - 'ETH3_IP': '10.3.1.3', - 'ETH3_IP6': '', - 'ETH3_IP6_PREFIX_LENGTH': '', - 'ETH3_IP6_ULA': '', - 'ETH3_MAC': MAC_1, - 'ETH3_MASK': '255.255.0.0', - 'ETH3_MTU': '', - 'ETH3_NETWORK': '10.3.0.0', - 'ETH3_SEARCH_DOMAIN': 'third.example.com third.example.org', - } - net = ds.OpenNebulaNetwork( - context, - mock.Mock(), - system_nics_by_mac={MAC_1: 'enp0s25', MAC_2: 'enp1s2'} - ) - - expected = { - 'version': 2, - 'ethernets': { - 'enp1s2': { - 'match': {'macaddress': MAC_2}, - 'addresses': [ - '10.18.1.1/16', - IP6_GLOBAL + '/64', - IP6_ULA + '/64'], - 'gateway4': '1.2.3.5', - 'gateway6': IP6_GW, - 'nameservers': { - 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], - 'search': ['example.com']}, - 'mtu': '1280'}, - 'enp0s25': { - 'match': {'macaddress': MAC_1}, - 'addresses': ['10.3.1.3/16'], - 'gateway4': '10.3.0.1', - 'nameservers': { - 'addresses': ['10.3.1.2', '1.2.3.8'], - 'search': [ - 'third.example.com', - 'third.example.org']}}}} - - self.assertEqual(expected, net.gen_conf()) - - -class TestParseShellConfig: - @pytest.mark.allow_subp_for("bash") - def test_no_seconds(self): - cfg = '\n'.join(["foo=bar", "SECONDS=2", "xx=foo"]) - # we could test 'sleep 2', but that would make the test run slower. - ret = ds.parse_shell_config(cfg) - assert ret == {"foo": "bar", "xx": "foo"} - - -class TestGetPhysicalNicsByMac: - @pytest.mark.parametrize( - "interfaces_by_mac,physical_devs,expected_return", - [ - # No interfaces => empty return - ({}, [], {}), - # Only virtual interface => empty return - ({"mac1": "virtual0"}, [], {}), - # Only physical interface => it is returned - ({"mac2": "physical0"}, ["physical0"], {"mac2": "physical0"}), - # Combination of physical and virtual => only physical returned - ( - {"mac3": "physical1", "mac4": "virtual1"}, - ["physical1"], - {"mac3": "physical1"}, - ), - ], - ) - def test(self, interfaces_by_mac, physical_devs, expected_return): - distro = mock.Mock() - distro.networking.is_physical.side_effect = ( - lambda devname: devname in physical_devs - ) - with mock.patch( - DS_PATH + ".net.get_interfaces_by_mac", - return_value=interfaces_by_mac, - ): - assert expected_return == ds.get_physical_nics_by_mac(distro) - - -def populate_context_dir(path, variables): - data = "# Context variables generated by OpenNebula\n" - for k, v in variables.items(): - data += ("%s='%s'\n" % (k.upper(), v.replace(r"'", r"'\''"))) - populate_dir(path, {'context.sh': data}) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py deleted file mode 100644 index a9829c75..00000000 --- a/tests/unittests/test_datasource/test_openstack.py +++ /dev/null @@ -1,724 +0,0 @@ -# Copyright (C) 2014 Yahoo! Inc. -# -# Author: Joshua Harlow -# -# This file is part of cloud-init. See LICENSE file for license information. - -import copy -import httpretty as hp -import json -import re -from io import StringIO -from urllib.parse import urlparse - -from cloudinit.tests import helpers as test_helpers - -from cloudinit import helpers -from cloudinit import settings -from cloudinit.sources import BrokenMetadata, convert_vendordata, UNSET -from cloudinit.sources import DataSourceOpenStack as ds -from cloudinit.sources.helpers import openstack -from cloudinit import util - -BASE_URL = "http://169.254.169.254" -PUBKEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' -EC2_META = { - 'ami-id': 'ami-00000001', - 'ami-launch-index': '0', - 'ami-manifest-path': 'FIXME', - 'hostname': 'sm-foo-test.novalocal', - 'instance-action': 'none', - 'instance-id': 'i-00000001', - 'instance-type': 'm1.tiny', - 'local-hostname': 'sm-foo-test.novalocal', - 'local-ipv4': '0.0.0.0', - 'public-hostname': 'sm-foo-test.novalocal', - 'public-ipv4': '0.0.0.1', - 'reservation-id': 'r-iru5qm4m', -} -USER_DATA = b'#!/bin/sh\necho This is user data\n' -VENDOR_DATA = { - 'magic': '', -} -VENDOR_DATA2 = { - 'static': {} -} -OSTACK_META = { - 'availability_zone': 'nova', - 'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'}, - {'content_path': '/content/0001', 'path': '/etc/bar/bar.cfg'}], - 'hostname': 'sm-foo-test.novalocal', - 'meta': {'dsmode': 'local', 'my-meta': 'my-value'}, - 'name': 'sm-foo-test', - 'public_keys': {'mykey': PUBKEY}, - 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c', -} -CONTENT_0 = b'This is contents of /etc/foo.cfg\n' -CONTENT_1 = b'# this is /etc/bar/bar.cfg\n' -OS_FILES = { - 'openstack/content/0000': CONTENT_0, - 'openstack/content/0001': CONTENT_1, - 'openstack/latest/meta_data.json': json.dumps(OSTACK_META), - 'openstack/latest/network_data.json': json.dumps( - {'links': [], 'networks': [], 'services': []}), - 'openstack/latest/user_data': USER_DATA, - 'openstack/latest/vendor_data.json': json.dumps(VENDOR_DATA), - 'openstack/latest/vendor_data2.json': json.dumps(VENDOR_DATA2), -} -EC2_FILES = { - 'latest/user-data': USER_DATA, -} -EC2_VERSIONS = [ - 'latest', -] - -MOCK_PATH = 'cloudinit.sources.DataSourceOpenStack.' - - -# TODO _register_uris should leverage test_ec2.register_mock_metaserver. -def _register_uris(version, ec2_files, ec2_meta, os_files): - """Registers a set of url patterns into httpretty that will mimic the - same data returned by the openstack metadata service (and ec2 service).""" - - def match_ec2_url(uri, headers): - path = uri.path.strip("/") - if len(path) == 0: - return (200, headers, "\n".join(EC2_VERSIONS)) - path = uri.path.lstrip("/") - if path in ec2_files: - return (200, headers, ec2_files.get(path)) - if path == 'latest/meta-data/': - buf = StringIO() - for (k, v) in ec2_meta.items(): - if isinstance(v, (list, tuple)): - buf.write("%s/" % (k)) - else: - buf.write("%s" % (k)) - buf.write("\n") - return (200, headers, buf.getvalue()) - if path.startswith('latest/meta-data/'): - value = None - pieces = path.split("/") - if path.endswith("/"): - pieces = pieces[2:-1] - value = util.get_cfg_by_path(ec2_meta, pieces) - else: - pieces = pieces[2:] - value = util.get_cfg_by_path(ec2_meta, pieces) - if value is not None: - return (200, headers, str(value)) - return (404, headers, '') - - def match_os_uri(uri, headers): - path = uri.path.strip("/") - if path == 'openstack': - return (200, headers, "\n".join([openstack.OS_LATEST])) - path = uri.path.lstrip("/") - if path in os_files: - return (200, headers, os_files.get(path)) - return (404, headers, '') - - def get_request_callback(method, uri, headers): - uri = urlparse(uri) - path = uri.path.lstrip("/").split("/") - if path[0] == 'openstack': - return match_os_uri(uri, headers) - return match_ec2_url(uri, headers) - - hp.register_uri(hp.GET, re.compile(r'http://169.254.169.254/.*'), - body=get_request_callback) - - -def _read_metadata_service(): - return ds.read_metadata_service(BASE_URL, retries=0, timeout=0.1) - - -class TestOpenStackDataSource(test_helpers.HttprettyTestCase): - - with_logs = True - VERSION = 'latest' - - def setUp(self): - super(TestOpenStackDataSource, self).setUp() - self.tmp = self.tmp_dir() - - def test_successful(self): - _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) - f = _read_metadata_service() - self.assertEqual(VENDOR_DATA, f.get('vendordata')) - self.assertEqual(VENDOR_DATA2, f.get('vendordata2')) - self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) - self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) - self.assertEqual(2, len(f['files'])) - self.assertEqual(USER_DATA, f.get('userdata')) - self.assertEqual(EC2_META, f.get('ec2-metadata')) - self.assertEqual(2, f.get('version')) - metadata = f['metadata'] - self.assertEqual('nova', metadata.get('availability_zone')) - self.assertEqual('sm-foo-test.novalocal', metadata.get('hostname')) - self.assertEqual('sm-foo-test.novalocal', - metadata.get('local-hostname')) - self.assertEqual('sm-foo-test', metadata.get('name')) - self.assertEqual('b0fa911b-69d4-4476-bbe2-1c92bff6535c', - metadata.get('uuid')) - self.assertEqual('b0fa911b-69d4-4476-bbe2-1c92bff6535c', - metadata.get('instance-id')) - - def test_no_ec2(self): - _register_uris(self.VERSION, {}, {}, OS_FILES) - f = _read_metadata_service() - self.assertEqual(VENDOR_DATA, f.get('vendordata')) - self.assertEqual(VENDOR_DATA2, f.get('vendordata2')) - self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) - self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) - self.assertEqual(USER_DATA, f.get('userdata')) - self.assertEqual({}, f.get('ec2-metadata')) - self.assertEqual(2, f.get('version')) - - def test_bad_metadata(self): - os_files = copy.deepcopy(OS_FILES) - for k in list(os_files.keys()): - if k.endswith('meta_data.json'): - os_files.pop(k, None) - _register_uris(self.VERSION, {}, {}, os_files) - self.assertRaises(openstack.NonReadable, _read_metadata_service) - - def test_bad_uuid(self): - os_files = copy.deepcopy(OS_FILES) - os_meta = copy.deepcopy(OSTACK_META) - os_meta.pop('uuid') - for k in list(os_files.keys()): - if k.endswith('meta_data.json'): - os_files[k] = json.dumps(os_meta) - _register_uris(self.VERSION, {}, {}, os_files) - self.assertRaises(BrokenMetadata, _read_metadata_service) - - def test_userdata_empty(self): - os_files = copy.deepcopy(OS_FILES) - for k in list(os_files.keys()): - if k.endswith('user_data'): - os_files.pop(k, None) - _register_uris(self.VERSION, {}, {}, os_files) - f = _read_metadata_service() - self.assertEqual(VENDOR_DATA, f.get('vendordata')) - self.assertEqual(VENDOR_DATA2, f.get('vendordata2')) - self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) - self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) - self.assertFalse(f.get('userdata')) - - def test_vendordata_empty(self): - os_files = copy.deepcopy(OS_FILES) - for k in list(os_files.keys()): - if k.endswith('vendor_data.json'): - os_files.pop(k, None) - _register_uris(self.VERSION, {}, {}, os_files) - f = _read_metadata_service() - self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) - self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) - self.assertFalse(f.get('vendordata')) - - def test_vendordata2_empty(self): - os_files = copy.deepcopy(OS_FILES) - for k in list(os_files.keys()): - if k.endswith('vendor_data2.json'): - os_files.pop(k, None) - _register_uris(self.VERSION, {}, {}, os_files) - f = _read_metadata_service() - self.assertEqual(CONTENT_0, f['files']['/etc/foo.cfg']) - self.assertEqual(CONTENT_1, f['files']['/etc/bar/bar.cfg']) - self.assertFalse(f.get('vendordata2')) - - def test_vendordata_invalid(self): - os_files = copy.deepcopy(OS_FILES) - for k in list(os_files.keys()): - if k.endswith('vendor_data.json'): - os_files[k] = '{' # some invalid json - _register_uris(self.VERSION, {}, {}, os_files) - self.assertRaises(BrokenMetadata, _read_metadata_service) - - def test_vendordata2_invalid(self): - os_files = copy.deepcopy(OS_FILES) - for k in list(os_files.keys()): - if k.endswith('vendor_data2.json'): - os_files[k] = '{' # some invalid json - _register_uris(self.VERSION, {}, {}, os_files) - self.assertRaises(BrokenMetadata, _read_metadata_service) - - def test_metadata_invalid(self): - os_files = copy.deepcopy(OS_FILES) - for k in list(os_files.keys()): - if k.endswith('meta_data.json'): - os_files[k] = '{' # some invalid json - _register_uris(self.VERSION, {}, {}, os_files) - self.assertRaises(BrokenMetadata, _read_metadata_service) - - @test_helpers.mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') - def test_datasource(self, m_dhcp): - _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) - ds_os = ds.DataSourceOpenStack( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) - self.assertIsNone(ds_os.version) - mock_path = MOCK_PATH + 'detect_openstack' - with test_helpers.mock.patch(mock_path) as m_detect_os: - m_detect_os.return_value = True - found = ds_os.get_data() - self.assertTrue(found) - self.assertEqual(2, ds_os.version) - md = dict(ds_os.metadata) - md.pop('instance-id', None) - md.pop('local-hostname', None) - self.assertEqual(OSTACK_META, md) - self.assertEqual(EC2_META, ds_os.ec2_metadata) - self.assertEqual(USER_DATA, ds_os.userdata_raw) - self.assertEqual(2, len(ds_os.files)) - self.assertEqual(VENDOR_DATA, ds_os.vendordata_pure) - self.assertEqual(VENDOR_DATA2, ds_os.vendordata2_pure) - self.assertIsNone(ds_os.vendordata_raw) - m_dhcp.assert_not_called() - - @hp.activate - @test_helpers.mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') - @test_helpers.mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') - def test_local_datasource(self, m_dhcp, m_net): - """OpenStackLocal calls EphemeralDHCPNetwork and gets instance data.""" - _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) - ds_os_local = ds.DataSourceOpenStackLocal( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) - ds_os_local._fallback_interface = 'eth9' # Monkey patch for dhcp - m_dhcp.return_value = [{ - 'interface': 'eth9', 'fixed-address': '192.168.2.9', - 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', - 'broadcast-address': '192.168.2.255'}] - - self.assertIsNone(ds_os_local.version) - mock_path = MOCK_PATH + 'detect_openstack' - with test_helpers.mock.patch(mock_path) as m_detect_os: - m_detect_os.return_value = True - found = ds_os_local.get_data() - self.assertTrue(found) - self.assertEqual(2, ds_os_local.version) - md = dict(ds_os_local.metadata) - md.pop('instance-id', None) - md.pop('local-hostname', None) - self.assertEqual(OSTACK_META, md) - self.assertEqual(EC2_META, ds_os_local.ec2_metadata) - self.assertEqual(USER_DATA, ds_os_local.userdata_raw) - self.assertEqual(2, len(ds_os_local.files)) - self.assertEqual(VENDOR_DATA, ds_os_local.vendordata_pure) - self.assertEqual(VENDOR_DATA2, ds_os_local.vendordata2_pure) - self.assertIsNone(ds_os_local.vendordata_raw) - m_dhcp.assert_called_with('eth9', None) - - def test_bad_datasource_meta(self): - os_files = copy.deepcopy(OS_FILES) - for k in list(os_files.keys()): - if k.endswith('meta_data.json'): - os_files[k] = '{' # some invalid json - _register_uris(self.VERSION, {}, {}, os_files) - ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, - None, - helpers.Paths({'run_dir': self.tmp})) - self.assertIsNone(ds_os.version) - mock_path = MOCK_PATH + 'detect_openstack' - with test_helpers.mock.patch(mock_path) as m_detect_os: - m_detect_os.return_value = True - found = ds_os.get_data() - self.assertFalse(found) - self.assertIsNone(ds_os.version) - self.assertIn( - 'InvalidMetaDataException: Broken metadata address' - ' http://169.254.169.25', - self.logs.getvalue()) - - def test_no_datasource(self): - os_files = copy.deepcopy(OS_FILES) - for k in list(os_files.keys()): - if k.endswith('meta_data.json'): - os_files.pop(k) - _register_uris(self.VERSION, {}, {}, os_files) - ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, - None, - helpers.Paths({'run_dir': self.tmp})) - ds_os.ds_cfg = { - 'max_wait': 0, - 'timeout': 0, - } - self.assertIsNone(ds_os.version) - mock_path = MOCK_PATH + 'detect_openstack' - with test_helpers.mock.patch(mock_path) as m_detect_os: - m_detect_os.return_value = True - found = ds_os.get_data() - self.assertFalse(found) - self.assertIsNone(ds_os.version) - - def test_network_config_disabled_by_datasource_config(self): - """The network_config can be disabled from datasource config.""" - mock_path = MOCK_PATH + 'openstack.convert_net_json' - ds_os = ds.DataSourceOpenStack( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) - ds_os.ds_cfg = {'apply_network_config': False} - sample_json = {'links': [{'ethernet_mac_address': 'mymac'}], - 'networks': [], 'services': []} - ds_os.network_json = sample_json # Ignore this content from metadata - with test_helpers.mock.patch(mock_path) as m_convert_json: - self.assertIsNone(ds_os.network_config) - m_convert_json.assert_not_called() - - def test_network_config_from_network_json(self): - """The datasource gets network_config from network_data.json.""" - mock_path = MOCK_PATH + 'openstack.convert_net_json' - example_cfg = {'version': 1, 'config': []} - ds_os = ds.DataSourceOpenStack( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) - sample_json = {'links': [{'ethernet_mac_address': 'mymac'}], - 'networks': [], 'services': []} - ds_os.network_json = sample_json - with test_helpers.mock.patch(mock_path) as m_convert_json: - m_convert_json.return_value = example_cfg - self.assertEqual(example_cfg, ds_os.network_config) - self.assertIn( - 'network config provided via network_json', self.logs.getvalue()) - m_convert_json.assert_called_with(sample_json, known_macs=None) - - def test_network_config_cached(self): - """The datasource caches the network_config property.""" - mock_path = MOCK_PATH + 'openstack.convert_net_json' - example_cfg = {'version': 1, 'config': []} - ds_os = ds.DataSourceOpenStack( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) - ds_os._network_config = example_cfg - with test_helpers.mock.patch(mock_path) as m_convert_json: - self.assertEqual(example_cfg, ds_os.network_config) - m_convert_json.assert_not_called() - - def test_disabled_datasource(self): - os_files = copy.deepcopy(OS_FILES) - os_meta = copy.deepcopy(OSTACK_META) - os_meta['meta'] = { - 'dsmode': 'disabled', - } - for k in list(os_files.keys()): - if k.endswith('meta_data.json'): - os_files[k] = json.dumps(os_meta) - _register_uris(self.VERSION, {}, {}, os_files) - ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN, - None, - helpers.Paths({'run_dir': self.tmp})) - ds_os.ds_cfg = { - 'max_wait': 0, - 'timeout': 0, - } - self.assertIsNone(ds_os.version) - mock_path = MOCK_PATH + 'detect_openstack' - with test_helpers.mock.patch(mock_path) as m_detect_os: - m_detect_os.return_value = True - found = ds_os.get_data() - self.assertFalse(found) - self.assertIsNone(ds_os.version) - - @hp.activate - def test_wb__crawl_metadata_does_not_persist(self): - """_crawl_metadata returns current metadata and does not cache.""" - _register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES) - ds_os = ds.DataSourceOpenStack( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) - crawled_data = ds_os._crawl_metadata() - self.assertEqual(UNSET, ds_os.ec2_metadata) - self.assertIsNone(ds_os.userdata_raw) - self.assertEqual(0, len(ds_os.files)) - self.assertIsNone(ds_os.vendordata_raw) - self.assertEqual( - ['dsmode', 'ec2-metadata', 'files', 'metadata', 'networkdata', - 'userdata', 'vendordata', 'vendordata2', 'version'], - sorted(crawled_data.keys())) - self.assertEqual('local', crawled_data['dsmode']) - self.assertEqual(EC2_META, crawled_data['ec2-metadata']) - self.assertEqual(2, len(crawled_data['files'])) - md = copy.deepcopy(crawled_data['metadata']) - md.pop('instance-id') - md.pop('local-hostname') - self.assertEqual(OSTACK_META, md) - self.assertEqual( - json.loads(OS_FILES['openstack/latest/network_data.json']), - crawled_data['networkdata']) - self.assertEqual(USER_DATA, crawled_data['userdata']) - self.assertEqual(VENDOR_DATA, crawled_data['vendordata']) - self.assertEqual(VENDOR_DATA2, crawled_data['vendordata2']) - self.assertEqual(2, crawled_data['version']) - - -class TestVendorDataLoading(test_helpers.TestCase): - def cvj(self, data): - return convert_vendordata(data) - - def test_vd_load_none(self): - # non-existant vendor-data should return none - self.assertIsNone(self.cvj(None)) - - def test_vd_load_string(self): - self.assertEqual(self.cvj("foobar"), "foobar") - - def test_vd_load_list(self): - data = [{'foo': 'bar'}, 'mystring', list(['another', 'list'])] - self.assertEqual(self.cvj(data), data) - - def test_vd_load_dict_no_ci(self): - self.assertIsNone(self.cvj({'foo': 'bar'})) - - def test_vd_load_dict_ci_dict(self): - self.assertRaises(ValueError, self.cvj, - {'foo': 'bar', 'cloud-init': {'x': 1}}) - - def test_vd_load_dict_ci_string(self): - data = {'foo': 'bar', 'cloud-init': 'VENDOR_DATA'} - self.assertEqual(self.cvj(data), data['cloud-init']) - - def test_vd_load_dict_ci_list(self): - data = {'foo': 'bar', 'cloud-init': ['VD_1', 'VD_2']} - self.assertEqual(self.cvj(data), data['cloud-init']) - - -@test_helpers.mock.patch(MOCK_PATH + 'util.is_x86') -class TestDetectOpenStack(test_helpers.CiTestCase): - - def test_detect_openstack_non_intel_x86(self, m_is_x86): - """Return True on non-intel platforms because dmi isn't conclusive.""" - m_is_x86.return_value = False - self.assertTrue( - ds.detect_openstack(), 'Expected detect_openstack == True') - - @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env') - @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') - def test_not_detect_openstack_intel_x86_ec2(self, m_dmi, m_proc_env, - m_is_x86): - """Return False on EC2 platforms.""" - m_is_x86.return_value = True - # No product_name in proc/1/environ - m_proc_env.return_value = {'HOME': '/'} - - def fake_dmi_read(dmi_key): - if dmi_key == 'system-product-name': - return 'HVM domU' # Nothing 'openstackish' on EC2 - if dmi_key == 'chassis-asset-tag': - return '' # Empty string on EC2 - assert False, 'Unexpected dmi read of %s' % dmi_key - - m_dmi.side_effect = fake_dmi_read - self.assertFalse( - ds.detect_openstack(), 'Expected detect_openstack == False on EC2') - m_proc_env.assert_called_with(1) - - @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') - def test_detect_openstack_intel_product_name_compute(self, m_dmi, - m_is_x86): - """Return True on OpenStack compute and nova instances.""" - m_is_x86.return_value = True - openstack_product_names = ['OpenStack Nova', 'OpenStack Compute'] - - for product_name in openstack_product_names: - m_dmi.return_value = product_name - self.assertTrue( - ds.detect_openstack(), 'Failed to detect_openstack') - - @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') - def test_detect_openstack_opentelekomcloud_chassis_asset_tag(self, m_dmi, - m_is_x86): - """Return True on OpenStack reporting OpenTelekomCloud asset-tag.""" - m_is_x86.return_value = True - - def fake_dmi_read(dmi_key): - if dmi_key == 'system-product-name': - return 'HVM domU' # Nothing 'openstackish' on OpenTelekomCloud - if dmi_key == 'chassis-asset-tag': - return 'OpenTelekomCloud' - assert False, 'Unexpected dmi read of %s' % dmi_key - - m_dmi.side_effect = fake_dmi_read - self.assertTrue( - ds.detect_openstack(), - 'Expected detect_openstack == True on OpenTelekomCloud') - - @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') - def test_detect_openstack_sapccloud_chassis_asset_tag(self, m_dmi, - m_is_x86): - """Return True on OpenStack reporting SAP CCloud VM asset-tag.""" - m_is_x86.return_value = True - - def fake_dmi_read(dmi_key): - if dmi_key == 'system-product-name': - return 'VMware Virtual Platform' # SAP CCloud uses VMware - if dmi_key == 'chassis-asset-tag': - return 'SAP CCloud VM' - assert False, 'Unexpected dmi read of %s' % dmi_key - - m_dmi.side_effect = fake_dmi_read - self.assertTrue( - ds.detect_openstack(), - 'Expected detect_openstack == True on SAP CCloud VM') - - @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') - def test_detect_openstack_oraclecloud_chassis_asset_tag(self, m_dmi, - m_is_x86): - """Return True on OpenStack reporting Oracle cloud asset-tag.""" - m_is_x86.return_value = True - - def fake_dmi_read(dmi_key): - if dmi_key == 'system-product-name': - return 'Standard PC (i440FX + PIIX, 1996)' # No match - if dmi_key == 'chassis-asset-tag': - return 'OracleCloud.com' - assert False, 'Unexpected dmi read of %s' % dmi_key - - m_dmi.side_effect = fake_dmi_read - self.assertTrue( - ds.detect_openstack(accept_oracle=True), - 'Expected detect_openstack == True on OracleCloud.com') - self.assertFalse( - ds.detect_openstack(accept_oracle=False), - 'Expected detect_openstack == False.') - - def _test_detect_openstack_nova_compute_chassis_asset_tag(self, m_dmi, - m_is_x86, - chassis_tag): - """Return True on OpenStack reporting generic asset-tag.""" - m_is_x86.return_value = True - - def fake_dmi_read(dmi_key): - if dmi_key == 'system-product-name': - return 'Generic OpenStack Platform' - if dmi_key == 'chassis-asset-tag': - return chassis_tag - assert False, 'Unexpected dmi read of %s' % dmi_key - - m_dmi.side_effect = fake_dmi_read - self.assertTrue( - ds.detect_openstack(), - 'Expected detect_openstack == True on Generic OpenStack Platform') - - @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') - def test_detect_openstack_nova_chassis_asset_tag(self, m_dmi, - m_is_x86): - self._test_detect_openstack_nova_compute_chassis_asset_tag( - m_dmi, m_is_x86, 'OpenStack Nova') - - @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') - def test_detect_openstack_compute_chassis_asset_tag(self, m_dmi, - m_is_x86): - self._test_detect_openstack_nova_compute_chassis_asset_tag( - m_dmi, m_is_x86, 'OpenStack Compute') - - @test_helpers.mock.patch(MOCK_PATH + 'util.get_proc_env') - @test_helpers.mock.patch(MOCK_PATH + 'dmi.read_dmi_data') - def test_detect_openstack_by_proc_1_environ(self, m_dmi, m_proc_env, - m_is_x86): - """Return True when nova product_name specified in /proc/1/environ.""" - m_is_x86.return_value = True - # Nova product_name in proc/1/environ - m_proc_env.return_value = { - 'HOME': '/', 'product_name': 'OpenStack Nova'} - - def fake_dmi_read(dmi_key): - if dmi_key == 'system-product-name': - return 'HVM domU' # Nothing 'openstackish' - if dmi_key == 'chassis-asset-tag': - return '' # Nothin 'openstackish' - assert False, 'Unexpected dmi read of %s' % dmi_key - - m_dmi.side_effect = fake_dmi_read - self.assertTrue( - ds.detect_openstack(), - 'Expected detect_openstack == True on OpenTelekomCloud') - m_proc_env.assert_called_with(1) - - -class TestMetadataReader(test_helpers.HttprettyTestCase): - """Test the MetadataReader.""" - burl = 'http://169.254.169.254/' - md_base = { - 'availability_zone': 'myaz1', - 'hostname': 'sm-foo-test.novalocal', - "keys": [{"data": PUBKEY, "name": "brickies", "type": "ssh"}], - 'launch_index': 0, - 'name': 'sm-foo-test', - 'public_keys': {'mykey': PUBKEY}, - 'project_id': '6a103f813b774b9fb15a4fcd36e1c056', - 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'} - - def register(self, path, body=None, status=200): - content = body if not isinstance(body, str) else body.encode('utf-8') - hp.register_uri( - hp.GET, self.burl + "openstack" + path, status=status, - body=content) - - def register_versions(self, versions): - self.register("", '\n'.join(versions)) - self.register("/", '\n'.join(versions)) - - def register_version(self, version, data): - content = '\n'.join(sorted(data.keys())) - self.register(version, content) - self.register(version + "/", content) - for path, content in data.items(): - self.register("/%s/%s" % (version, path), content) - self.register("/%s/%s" % (version, path), content) - if 'user_data' not in data: - self.register("/%s/user_data" % version, "nodata", status=404) - - def test__find_working_version(self): - """Test a working version ignores unsupported.""" - unsup = "2016-11-09" - self.register_versions( - [openstack.OS_FOLSOM, openstack.OS_LIBERTY, unsup, - openstack.OS_LATEST]) - self.assertEqual( - openstack.OS_LIBERTY, - openstack.MetadataReader(self.burl)._find_working_version()) - - def test__find_working_version_uses_latest(self): - """'latest' should be used if no supported versions.""" - unsup1, unsup2 = ("2016-11-09", '2017-06-06') - self.register_versions([unsup1, unsup2, openstack.OS_LATEST]) - self.assertEqual( - openstack.OS_LATEST, - openstack.MetadataReader(self.burl)._find_working_version()) - - def test_read_v2_os_ocata(self): - """Validate return value of read_v2 for os_ocata data.""" - md = copy.deepcopy(self.md_base) - md['devices'] = [] - network_data = {'links': [], 'networks': [], 'services': []} - vendor_data = {} - vendor_data2 = {"static": {}} - - data = { - 'meta_data.json': json.dumps(md), - 'network_data.json': json.dumps(network_data), - 'vendor_data.json': json.dumps(vendor_data), - 'vendor_data2.json': json.dumps(vendor_data2), - } - - self.register_versions([openstack.OS_OCATA, openstack.OS_LATEST]) - self.register_version(openstack.OS_OCATA, data) - - mock_read_ec2 = test_helpers.mock.MagicMock( - return_value={'instance-id': 'unused-ec2'}) - expected_md = copy.deepcopy(md) - expected_md.update( - {'instance-id': md['uuid'], 'local-hostname': md['hostname']}) - expected = { - 'userdata': '', # Annoying, no user-data results in empty string. - 'version': 2, - 'metadata': expected_md, - 'vendordata': vendor_data, - 'vendordata2': vendor_data2, - 'networkdata': network_data, - 'ec2-metadata': mock_read_ec2.return_value, - 'files': {}, - } - reader = openstack.MetadataReader(self.burl) - reader._read_ec2_metadata = mock_read_ec2 - self.assertEqual(expected, reader.read_v2()) - self.assertEqual(1, mock_read_ec2.call_count) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_ovf.py b/tests/unittests/test_datasource/test_ovf.py deleted file mode 100644 index ad7446f8..00000000 --- a/tests/unittests/test_datasource/test_ovf.py +++ /dev/null @@ -1,1046 +0,0 @@ -# Copyright (C) 2016 Canonical Ltd. -# -# Author: Scott Moser -# -# This file is part of cloud-init. See LICENSE file for license information. - -import base64 -import os - -from collections import OrderedDict -from textwrap import dedent - -from cloudinit import subp -from cloudinit import util -from cloudinit.tests.helpers import CiTestCase, mock, wrap_and_call -from cloudinit.helpers import Paths -from cloudinit.sources import DataSourceOVF as dsovf -from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( - CustomScriptNotFound) -from cloudinit.safeyaml import YAMLError - -MPATH = 'cloudinit.sources.DataSourceOVF.' - -NOT_FOUND = None - -OVF_ENV_CONTENT = """ - - - - ESX Server - 3.0.1 - VMware, Inc. - en_US - - - -{properties} - - -""" - - -def fill_properties(props, template=OVF_ENV_CONTENT): - lines = [] - prop_tmpl = '' - for key, val in props.items(): - lines.append(prop_tmpl.format(key=key, val=val)) - indent = " " - properties = ''.join([indent + line + "\n" for line in lines]) - return template.format(properties=properties) - - -class TestReadOvfEnv(CiTestCase): - def test_with_b64_userdata(self): - user_data = "#!/bin/sh\necho hello world\n" - user_data_b64 = base64.b64encode(user_data.encode()).decode() - props = {"user-data": user_data_b64, "password": "passw0rd", - "instance-id": "inst-001"} - env = fill_properties(props) - md, ud, cfg = dsovf.read_ovf_environment(env) - self.assertEqual({"instance-id": "inst-001"}, md) - self.assertEqual(user_data.encode(), ud) - self.assertEqual({'password': "passw0rd"}, cfg) - - def test_with_non_b64_userdata(self): - user_data = "my-user-data" - props = {"user-data": user_data, "instance-id": "inst-001"} - env = fill_properties(props) - md, ud, cfg = dsovf.read_ovf_environment(env) - self.assertEqual({"instance-id": "inst-001"}, md) - self.assertEqual(user_data.encode(), ud) - self.assertEqual({}, cfg) - - def test_with_no_userdata(self): - props = {"password": "passw0rd", "instance-id": "inst-001"} - env = fill_properties(props) - md, ud, cfg = dsovf.read_ovf_environment(env) - self.assertEqual({"instance-id": "inst-001"}, md) - self.assertEqual({'password': "passw0rd"}, cfg) - self.assertIsNone(ud) - - def test_with_b64_network_config_enable_read_network(self): - network_config = dedent("""\ - network: - version: 2 - ethernets: - nics: - nameservers: - addresses: - - 127.0.0.53 - search: - - eng.vmware.com - - vmware.com - match: - name: eth* - gateway4: 10.10.10.253 - dhcp4: false - addresses: - - 10.10.10.1/24 - """) - network_config_b64 = base64.b64encode(network_config.encode()).decode() - props = {"network-config": network_config_b64, - "password": "passw0rd", - "instance-id": "inst-001"} - env = fill_properties(props) - md, ud, cfg = dsovf.read_ovf_environment(env, True) - self.assertEqual("inst-001", md["instance-id"]) - self.assertEqual({'password': "passw0rd"}, cfg) - self.assertEqual( - {'version': 2, 'ethernets': - {'nics': - {'nameservers': - {'addresses': ['127.0.0.53'], - 'search': ['eng.vmware.com', 'vmware.com']}, - 'match': {'name': 'eth*'}, - 'gateway4': '10.10.10.253', - 'dhcp4': False, - 'addresses': ['10.10.10.1/24']}}}, - md["network-config"]) - self.assertIsNone(ud) - - def test_with_non_b64_network_config_enable_read_network(self): - network_config = dedent("""\ - network: - version: 2 - ethernets: - nics: - nameservers: - addresses: - - 127.0.0.53 - search: - - eng.vmware.com - - vmware.com - match: - name: eth* - gateway4: 10.10.10.253 - dhcp4: false - addresses: - - 10.10.10.1/24 - """) - props = {"network-config": network_config, - "password": "passw0rd", - "instance-id": "inst-001"} - env = fill_properties(props) - md, ud, cfg = dsovf.read_ovf_environment(env, True) - self.assertEqual({"instance-id": "inst-001"}, md) - self.assertEqual({'password': "passw0rd"}, cfg) - self.assertIsNone(ud) - - def test_with_b64_network_config_disable_read_network(self): - network_config = dedent("""\ - network: - version: 2 - ethernets: - nics: - nameservers: - addresses: - - 127.0.0.53 - search: - - eng.vmware.com - - vmware.com - match: - name: eth* - gateway4: 10.10.10.253 - dhcp4: false - addresses: - - 10.10.10.1/24 - """) - network_config_b64 = base64.b64encode(network_config.encode()).decode() - props = {"network-config": network_config_b64, - "password": "passw0rd", - "instance-id": "inst-001"} - env = fill_properties(props) - md, ud, cfg = dsovf.read_ovf_environment(env) - self.assertEqual({"instance-id": "inst-001"}, md) - self.assertEqual({'password': "passw0rd"}, cfg) - self.assertIsNone(ud) - - -class TestMarkerFiles(CiTestCase): - - def setUp(self): - super(TestMarkerFiles, self).setUp() - self.tdir = self.tmp_dir() - - def test_false_when_markerid_none(self): - """Return False when markerid provided is None.""" - self.assertFalse( - dsovf.check_marker_exists(markerid=None, marker_dir=self.tdir)) - - def test_markerid_file_exist(self): - """Return False when markerid file path does not exist, - True otherwise.""" - self.assertFalse( - dsovf.check_marker_exists('123', self.tdir)) - - marker_file = self.tmp_path('.markerfile-123.txt', self.tdir) - util.write_file(marker_file, '') - self.assertTrue( - dsovf.check_marker_exists('123', self.tdir) - ) - - def test_marker_file_setup(self): - """Test creation of marker files.""" - markerfilepath = self.tmp_path('.markerfile-hi.txt', self.tdir) - self.assertFalse(os.path.exists(markerfilepath)) - dsovf.setup_marker_files(markerid='hi', marker_dir=self.tdir) - self.assertTrue(os.path.exists(markerfilepath)) - - -class TestDatasourceOVF(CiTestCase): - - with_logs = True - - def setUp(self): - super(TestDatasourceOVF, self).setUp() - self.datasource = dsovf.DataSourceOVF - self.tdir = self.tmp_dir() - - def test_get_data_false_on_none_dmi_data(self): - """When dmi for system-product-name is None, get_data returns False.""" - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource(sys_cfg={}, distro={}, paths=paths) - retcode = wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': None, - 'transport_iso9660': NOT_FOUND, - 'transport_vmware_guestinfo': NOT_FOUND}, - ds.get_data) - self.assertFalse(retcode, 'Expected False return from ds.get_data') - self.assertIn( - 'DEBUG: No system-product-name found', self.logs.getvalue()) - - def test_get_data_vmware_customization_disabled(self): - """When vmware customization is disabled via sys_cfg and - allow_raw_data is disabled via ds_cfg, log a message. - """ - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource( - sys_cfg={'disable_vmware_customization': True, - 'datasource': {'OVF': {'allow_raw_data': False}}}, - distro={}, paths=paths) - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ - [MISC] - MARKER-ID = 12345345 - """) - util.write_file(conf_file, conf_content) - retcode = wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'transport_iso9660': NOT_FOUND, - 'transport_vmware_guestinfo': NOT_FOUND, - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file}, - ds.get_data) - self.assertFalse(retcode, 'Expected False return from ds.get_data') - self.assertIn( - 'DEBUG: Customization for VMware platform is disabled.', - self.logs.getvalue()) - - def test_get_data_vmware_customization_sys_cfg_disabled(self): - """When vmware customization is disabled via sys_cfg and - no meta data is found, log a message. - """ - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource( - sys_cfg={'disable_vmware_customization': True, - 'datasource': {'OVF': {'allow_raw_data': True}}}, - distro={}, paths=paths) - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ - [MISC] - MARKER-ID = 12345345 - """) - util.write_file(conf_file, conf_content) - retcode = wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'transport_iso9660': NOT_FOUND, - 'transport_vmware_guestinfo': NOT_FOUND, - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file}, - ds.get_data) - self.assertFalse(retcode, 'Expected False return from ds.get_data') - self.assertIn( - 'DEBUG: Customization using VMware config is disabled.', - self.logs.getvalue()) - - def test_get_data_allow_raw_data_disabled(self): - """When allow_raw_data is disabled via ds_cfg and - meta data is found, log a message. - """ - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource( - sys_cfg={'disable_vmware_customization': False, - 'datasource': {'OVF': {'allow_raw_data': False}}}, - distro={}, paths=paths) - - # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ - [CLOUDINIT] - METADATA = test-meta - """) - util.write_file(conf_file, conf_content) - # Prepare the meta data file - metadata_file = self.tmp_path('test-meta', self.tdir) - util.write_file(metadata_file, "This is meta data") - retcode = wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'transport_iso9660': NOT_FOUND, - 'transport_vmware_guestinfo': NOT_FOUND, - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'collect_imc_file_paths': [self.tdir + '/test-meta', '', '']}, - ds.get_data) - self.assertFalse(retcode, 'Expected False return from ds.get_data') - self.assertIn( - 'DEBUG: Customization using raw data is disabled.', - self.logs.getvalue()) - - def test_get_data_vmware_customization_enabled(self): - """When cloud-init workflow for vmware is enabled via sys_cfg log a - message. - """ - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource( - sys_cfg={'disable_vmware_customization': False}, distro={}, - paths=paths) - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ - [CUSTOM-SCRIPT] - SCRIPT-NAME = test-script - [MISC] - MARKER-ID = 12345345 - """) - util.write_file(conf_file, conf_content) - with mock.patch(MPATH + 'get_tools_config', return_value='true'): - with self.assertRaises(CustomScriptNotFound) as context: - wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'get_nics_to_enable': ''}, - ds.get_data) - customscript = self.tmp_path('test-script', self.tdir) - self.assertIn('Script %s not found!!' % customscript, - str(context.exception)) - - def test_get_data_cust_script_disabled(self): - """If custom script is disabled by VMware tools configuration, - raise a RuntimeError. - """ - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource( - sys_cfg={'disable_vmware_customization': False}, distro={}, - paths=paths) - # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ - [CUSTOM-SCRIPT] - SCRIPT-NAME = test-script - [MISC] - MARKER-ID = 12345346 - """) - util.write_file(conf_file, conf_content) - # Prepare the custom sript - customscript = self.tmp_path('test-script', self.tdir) - util.write_file(customscript, "This is the post cust script") - - with mock.patch(MPATH + 'get_tools_config', return_value='invalid'): - with mock.patch(MPATH + 'set_customization_status', - return_value=('msg', b'')): - with self.assertRaises(RuntimeError) as context: - wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'get_nics_to_enable': ''}, - ds.get_data) - self.assertIn('Custom script is disabled by VM Administrator', - str(context.exception)) - - def test_get_data_cust_script_enabled(self): - """If custom script is enabled by VMware tools configuration, - execute the script. - """ - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource( - sys_cfg={'disable_vmware_customization': False}, distro={}, - paths=paths) - # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ - [CUSTOM-SCRIPT] - SCRIPT-NAME = test-script - [MISC] - MARKER-ID = 12345346 - """) - util.write_file(conf_file, conf_content) - - # Mock custom script is enabled by return true when calling - # get_tools_config - with mock.patch(MPATH + 'get_tools_config', return_value="true"): - with mock.patch(MPATH + 'set_customization_status', - return_value=('msg', b'')): - with self.assertRaises(CustomScriptNotFound) as context: - wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'get_nics_to_enable': ''}, - ds.get_data) - # Verify custom script is trying to be executed - customscript = self.tmp_path('test-script', self.tdir) - self.assertIn('Script %s not found!!' % customscript, - str(context.exception)) - - def test_get_data_force_run_post_script_is_yes(self): - """If DEFAULT-RUN-POST-CUST-SCRIPT is yes, custom script could run if - enable-custom-scripts is not defined in VM Tools configuration - """ - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource( - sys_cfg={'disable_vmware_customization': False}, distro={}, - paths=paths) - # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - # set DEFAULT-RUN-POST-CUST-SCRIPT = yes so that enable-custom-scripts - # default value is TRUE - conf_content = dedent("""\ - [CUSTOM-SCRIPT] - SCRIPT-NAME = test-script - [MISC] - MARKER-ID = 12345346 - DEFAULT-RUN-POST-CUST-SCRIPT = yes - """) - util.write_file(conf_file, conf_content) - - # Mock get_tools_config(section, key, defaultVal) to return - # defaultVal - def my_get_tools_config(*args, **kwargs): - return args[2] - - with mock.patch(MPATH + 'get_tools_config', - side_effect=my_get_tools_config): - with mock.patch(MPATH + 'set_customization_status', - return_value=('msg', b'')): - with self.assertRaises(CustomScriptNotFound) as context: - wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'get_nics_to_enable': ''}, - ds.get_data) - # Verify custom script still runs although it is - # disabled by VMware Tools - customscript = self.tmp_path('test-script', self.tdir) - self.assertIn('Script %s not found!!' % customscript, - str(context.exception)) - - def test_get_data_non_vmware_seed_platform_info(self): - """Platform info properly reports when on non-vmware platforms.""" - paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir}) - # Write ovf-env.xml seed file - seed_dir = self.tmp_path('seed', dir=self.tdir) - ovf_env = self.tmp_path('ovf-env.xml', dir=seed_dir) - util.write_file(ovf_env, OVF_ENV_CONTENT) - ds = self.datasource(sys_cfg={}, distro={}, paths=paths) - - self.assertEqual('ovf', ds.cloud_name) - self.assertEqual('ovf', ds.platform_type) - with mock.patch(MPATH + 'dmi.read_dmi_data', return_value='!VMware'): - with mock.patch(MPATH + 'transport_vmware_guestinfo') as m_guestd: - with mock.patch(MPATH + 'transport_iso9660') as m_iso9660: - m_iso9660.return_value = NOT_FOUND - m_guestd.return_value = NOT_FOUND - self.assertTrue(ds.get_data()) - self.assertEqual( - 'ovf (%s/seed/ovf-env.xml)' % self.tdir, - ds.subplatform) - - def test_get_data_vmware_seed_platform_info(self): - """Platform info properly reports when on VMware platform.""" - paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir}) - # Write ovf-env.xml seed file - seed_dir = self.tmp_path('seed', dir=self.tdir) - ovf_env = self.tmp_path('ovf-env.xml', dir=seed_dir) - util.write_file(ovf_env, OVF_ENV_CONTENT) - ds = self.datasource(sys_cfg={}, distro={}, paths=paths) - - self.assertEqual('ovf', ds.cloud_name) - self.assertEqual('ovf', ds.platform_type) - with mock.patch(MPATH + 'dmi.read_dmi_data', return_value='VMWare'): - with mock.patch(MPATH + 'transport_vmware_guestinfo') as m_guestd: - with mock.patch(MPATH + 'transport_iso9660') as m_iso9660: - m_iso9660.return_value = NOT_FOUND - m_guestd.return_value = NOT_FOUND - self.assertTrue(ds.get_data()) - self.assertEqual( - 'vmware (%s/seed/ovf-env.xml)' % self.tdir, - ds.subplatform) - - @mock.patch('cloudinit.subp.subp') - @mock.patch('cloudinit.sources.DataSource.persist_instance_data') - def test_get_data_vmware_guestinfo_with_network_config( - self, m_persist, m_subp - ): - self._test_get_data_with_network_config(guestinfo=False, iso=True) - - @mock.patch('cloudinit.subp.subp') - @mock.patch('cloudinit.sources.DataSource.persist_instance_data') - def test_get_data_iso9660_with_network_config(self, m_persist, m_subp): - self._test_get_data_with_network_config(guestinfo=True, iso=False) - - def _test_get_data_with_network_config(self, guestinfo, iso): - network_config = dedent("""\ - network: - version: 2 - ethernets: - nics: - nameservers: - addresses: - - 127.0.0.53 - search: - - vmware.com - match: - name: eth* - gateway4: 10.10.10.253 - dhcp4: false - addresses: - - 10.10.10.1/24 - """) - network_config_b64 = base64.b64encode(network_config.encode()).decode() - props = {"network-config": network_config_b64, - "password": "passw0rd", - "instance-id": "inst-001"} - env = fill_properties(props) - paths = Paths({'cloud_dir': self.tdir, 'run_dir': self.tdir}) - ds = self.datasource(sys_cfg={}, distro={}, paths=paths) - with mock.patch(MPATH + 'transport_vmware_guestinfo', - return_value=env if guestinfo else NOT_FOUND): - with mock.patch(MPATH + 'transport_iso9660', - return_value=env if iso else NOT_FOUND): - self.assertTrue(ds.get_data()) - self.assertEqual('inst-001', ds.metadata['instance-id']) - self.assertEqual( - {'version': 2, 'ethernets': - {'nics': - {'nameservers': - {'addresses': ['127.0.0.53'], - 'search': ['vmware.com']}, - 'match': {'name': 'eth*'}, - 'gateway4': '10.10.10.253', - 'dhcp4': False, - 'addresses': ['10.10.10.1/24']}}}, - ds.network_config) - - def test_get_data_cloudinit_metadata_json(self): - """Test metadata can be loaded to cloud-init metadata and network. - The metadata format is json. - """ - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource( - sys_cfg={'disable_vmware_customization': True}, distro={}, - paths=paths) - # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ - [CLOUDINIT] - METADATA = test-meta - """) - util.write_file(conf_file, conf_content) - # Prepare the meta data file - metadata_file = self.tmp_path('test-meta', self.tdir) - metadata_content = dedent("""\ - { - "instance-id": "cloud-vm", - "local-hostname": "my-host.domain.com", - "network": { - "version": 2, - "ethernets": { - "eths": { - "match": { - "name": "ens*" - }, - "dhcp4": true - } - } - } - } - """) - util.write_file(metadata_file, metadata_content) - - with mock.patch(MPATH + 'set_customization_status', - return_value=('msg', b'')): - result = wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'collect_imc_file_paths': [self.tdir + '/test-meta', '', ''], - 'get_nics_to_enable': ''}, - ds._get_data) - - self.assertTrue(result) - self.assertEqual("cloud-vm", ds.metadata['instance-id']) - self.assertEqual("my-host.domain.com", ds.metadata['local-hostname']) - self.assertEqual(2, ds.network_config['version']) - self.assertTrue(ds.network_config['ethernets']['eths']['dhcp4']) - - def test_get_data_cloudinit_metadata_yaml(self): - """Test metadata can be loaded to cloud-init metadata and network. - The metadata format is yaml. - """ - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource( - sys_cfg={'disable_vmware_customization': True}, distro={}, - paths=paths) - # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ - [CLOUDINIT] - METADATA = test-meta - """) - util.write_file(conf_file, conf_content) - # Prepare the meta data file - metadata_file = self.tmp_path('test-meta', self.tdir) - metadata_content = dedent("""\ - instance-id: cloud-vm - local-hostname: my-host.domain.com - network: - version: 2 - ethernets: - nics: - match: - name: ens* - dhcp4: yes - """) - util.write_file(metadata_file, metadata_content) - - with mock.patch(MPATH + 'set_customization_status', - return_value=('msg', b'')): - result = wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'collect_imc_file_paths': [self.tdir + '/test-meta', '', ''], - 'get_nics_to_enable': ''}, - ds._get_data) - - self.assertTrue(result) - self.assertEqual("cloud-vm", ds.metadata['instance-id']) - self.assertEqual("my-host.domain.com", ds.metadata['local-hostname']) - self.assertEqual(2, ds.network_config['version']) - self.assertTrue(ds.network_config['ethernets']['nics']['dhcp4']) - - def test_get_data_cloudinit_metadata_not_valid(self): - """Test metadata is not JSON or YAML format. - """ - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource( - sys_cfg={'disable_vmware_customization': True}, distro={}, - paths=paths) - - # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ - [CLOUDINIT] - METADATA = test-meta - """) - util.write_file(conf_file, conf_content) - - # Prepare the meta data file - metadata_file = self.tmp_path('test-meta', self.tdir) - metadata_content = "[This is not json or yaml format]a=b" - util.write_file(metadata_file, metadata_content) - - with mock.patch(MPATH + 'set_customization_status', - return_value=('msg', b'')): - with self.assertRaises(YAMLError) as context: - wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'collect_imc_file_paths': [ - self.tdir + '/test-meta', '', '' - ], - 'get_nics_to_enable': ''}, - ds.get_data) - - self.assertIn("expected '', but found ''", - str(context.exception)) - - def test_get_data_cloudinit_metadata_not_found(self): - """Test metadata file can't be found. - """ - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource( - sys_cfg={'disable_vmware_customization': True}, distro={}, - paths=paths) - # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ - [CLOUDINIT] - METADATA = test-meta - """) - util.write_file(conf_file, conf_content) - # Don't prepare the meta data file - - with mock.patch(MPATH + 'set_customization_status', - return_value=('msg', b'')): - with self.assertRaises(FileNotFoundError) as context: - wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'get_nics_to_enable': ''}, - ds.get_data) - - self.assertIn('is not found', str(context.exception)) - - def test_get_data_cloudinit_userdata(self): - """Test user data can be loaded to cloud-init user data. - """ - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource( - sys_cfg={'disable_vmware_customization': False}, distro={}, - paths=paths) - - # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ - [CLOUDINIT] - METADATA = test-meta - USERDATA = test-user - """) - util.write_file(conf_file, conf_content) - - # Prepare the meta data file - metadata_file = self.tmp_path('test-meta', self.tdir) - metadata_content = dedent("""\ - instance-id: cloud-vm - local-hostname: my-host.domain.com - network: - version: 2 - ethernets: - nics: - match: - name: ens* - dhcp4: yes - """) - util.write_file(metadata_file, metadata_content) - - # Prepare the user data file - userdata_file = self.tmp_path('test-user', self.tdir) - userdata_content = "This is the user data" - util.write_file(userdata_file, userdata_content) - - with mock.patch(MPATH + 'set_customization_status', - return_value=('msg', b'')): - result = wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'collect_imc_file_paths': [self.tdir + '/test-meta', - self.tdir + '/test-user', ''], - 'get_nics_to_enable': ''}, - ds._get_data) - - self.assertTrue(result) - self.assertEqual("cloud-vm", ds.metadata['instance-id']) - self.assertEqual(userdata_content, ds.userdata_raw) - - def test_get_data_cloudinit_userdata_not_found(self): - """Test userdata file can't be found. - """ - paths = Paths({'cloud_dir': self.tdir}) - ds = self.datasource( - sys_cfg={'disable_vmware_customization': True}, distro={}, - paths=paths) - - # Prepare the conf file - conf_file = self.tmp_path('test-cust', self.tdir) - conf_content = dedent("""\ - [CLOUDINIT] - METADATA = test-meta - USERDATA = test-user - """) - util.write_file(conf_file, conf_content) - - # Prepare the meta data file - metadata_file = self.tmp_path('test-meta', self.tdir) - metadata_content = dedent("""\ - instance-id: cloud-vm - local-hostname: my-host.domain.com - network: - version: 2 - ethernets: - nics: - match: - name: ens* - dhcp4: yes - """) - util.write_file(metadata_file, metadata_content) - - # Don't prepare the user data file - - with mock.patch(MPATH + 'set_customization_status', - return_value=('msg', b'')): - with self.assertRaises(FileNotFoundError) as context: - wrap_and_call( - 'cloudinit.sources.DataSourceOVF', - {'dmi.read_dmi_data': 'vmware', - 'util.del_dir': True, - 'search_file': self.tdir, - 'wait_for_imc_cfg_file': conf_file, - 'get_nics_to_enable': ''}, - ds.get_data) - - self.assertIn('is not found', str(context.exception)) - - -class TestTransportIso9660(CiTestCase): - - def setUp(self): - super(TestTransportIso9660, self).setUp() - self.add_patch('cloudinit.util.find_devs_with', - 'm_find_devs_with') - self.add_patch('cloudinit.util.mounts', 'm_mounts') - self.add_patch('cloudinit.util.mount_cb', 'm_mount_cb') - self.add_patch('cloudinit.sources.DataSourceOVF.get_ovf_env', - 'm_get_ovf_env') - self.m_get_ovf_env.return_value = ('myfile', 'mycontent') - - def test_find_already_mounted(self): - """Check we call get_ovf_env from on matching mounted devices""" - mounts = { - '/dev/sr9': { - 'fstype': 'iso9660', - 'mountpoint': 'wark/media/sr9', - 'opts': 'ro', - } - } - self.m_mounts.return_value = mounts - - self.assertEqual("mycontent", dsovf.transport_iso9660()) - - def test_find_already_mounted_skips_non_iso9660(self): - """Check we call get_ovf_env ignoring non iso9660""" - mounts = { - '/dev/xvdb': { - 'fstype': 'vfat', - 'mountpoint': 'wark/foobar', - 'opts': 'defaults,noatime', - }, - '/dev/xvdc': { - 'fstype': 'iso9660', - 'mountpoint': 'wark/media/sr9', - 'opts': 'ro', - } - } - # We use an OrderedDict here to ensure we check xvdb before xvdc - # as we're not mocking the regex matching, however, if we place - # an entry in the results then we can be reasonably sure that - # we're skipping an entry which fails to match. - self.m_mounts.return_value = ( - OrderedDict(sorted(mounts.items(), key=lambda t: t[0]))) - - self.assertEqual("mycontent", dsovf.transport_iso9660()) - - def test_find_already_mounted_matches_kname(self): - """Check we dont regex match on basename of the device""" - mounts = { - '/dev/foo/bar/xvdc': { - 'fstype': 'iso9660', - 'mountpoint': 'wark/media/sr9', - 'opts': 'ro', - } - } - # we're skipping an entry which fails to match. - self.m_mounts.return_value = mounts - - self.assertEqual(NOT_FOUND, dsovf.transport_iso9660()) - - def test_mount_cb_called_on_blkdevs_with_iso9660(self): - """Check we call mount_cb on blockdevs with iso9660 only""" - self.m_mounts.return_value = {} - self.m_find_devs_with.return_value = ['/dev/sr0'] - self.m_mount_cb.return_value = ("myfile", "mycontent") - - self.assertEqual("mycontent", dsovf.transport_iso9660()) - self.m_mount_cb.assert_called_with( - "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660") - - def test_mount_cb_called_on_blkdevs_with_iso9660_check_regex(self): - """Check we call mount_cb on blockdevs with iso9660 and match regex""" - self.m_mounts.return_value = {} - self.m_find_devs_with.return_value = [ - '/dev/abc', '/dev/my-cdrom', '/dev/sr0'] - self.m_mount_cb.return_value = ("myfile", "mycontent") - - self.assertEqual("mycontent", dsovf.transport_iso9660()) - self.m_mount_cb.assert_called_with( - "/dev/sr0", dsovf.get_ovf_env, mtype="iso9660") - - def test_mount_cb_not_called_no_matches(self): - """Check we don't call mount_cb if nothing matches""" - self.m_mounts.return_value = {} - self.m_find_devs_with.return_value = ['/dev/vg/myovf'] - - self.assertEqual(NOT_FOUND, dsovf.transport_iso9660()) - self.assertEqual(0, self.m_mount_cb.call_count) - - def test_mount_cb_called_require_iso_false(self): - """Check we call mount_cb on blockdevs with require_iso=False""" - self.m_mounts.return_value = {} - self.m_find_devs_with.return_value = ['/dev/xvdz'] - self.m_mount_cb.return_value = ("myfile", "mycontent") - - self.assertEqual( - "mycontent", dsovf.transport_iso9660(require_iso=False)) - - self.m_mount_cb.assert_called_with( - "/dev/xvdz", dsovf.get_ovf_env, mtype=None) - - def test_maybe_cdrom_device_none(self): - """Test maybe_cdrom_device returns False for none/empty input""" - self.assertFalse(dsovf.maybe_cdrom_device(None)) - self.assertFalse(dsovf.maybe_cdrom_device('')) - - def test_maybe_cdrom_device_non_string_exception(self): - """Test maybe_cdrom_device raises ValueError on non-string types""" - with self.assertRaises(ValueError): - dsovf.maybe_cdrom_device({'a': 'eleven'}) - - def test_maybe_cdrom_device_false_on_multi_dir_paths(self): - """Test maybe_cdrom_device is false on /dev[/.*]/* paths""" - self.assertFalse(dsovf.maybe_cdrom_device('/dev/foo/sr0')) - self.assertFalse(dsovf.maybe_cdrom_device('foo/sr0')) - self.assertFalse(dsovf.maybe_cdrom_device('../foo/sr0')) - self.assertFalse(dsovf.maybe_cdrom_device('../foo/sr0')) - - def test_maybe_cdrom_device_true_on_hd_partitions(self): - """Test maybe_cdrom_device is false on /dev/hd[a-z][0-9]+ paths""" - self.assertTrue(dsovf.maybe_cdrom_device('/dev/hda1')) - self.assertTrue(dsovf.maybe_cdrom_device('hdz9')) - - def test_maybe_cdrom_device_true_on_valid_relative_paths(self): - """Test maybe_cdrom_device normalizes paths""" - self.assertTrue(dsovf.maybe_cdrom_device('/dev/wark/../sr9')) - self.assertTrue(dsovf.maybe_cdrom_device('///sr0')) - self.assertTrue(dsovf.maybe_cdrom_device('/sr0')) - self.assertTrue(dsovf.maybe_cdrom_device('//dev//hda')) - - def test_maybe_cdrom_device_true_on_xvd_partitions(self): - """Test maybe_cdrom_device returns true on xvd*""" - self.assertTrue(dsovf.maybe_cdrom_device('/dev/xvda')) - self.assertTrue(dsovf.maybe_cdrom_device('/dev/xvda1')) - self.assertTrue(dsovf.maybe_cdrom_device('xvdza1')) - - -@mock.patch(MPATH + "subp.which") -@mock.patch(MPATH + "subp.subp") -class TestTransportVmwareGuestinfo(CiTestCase): - """Test the com.vmware.guestInfo transport implemented in - transport_vmware_guestinfo.""" - - rpctool = 'vmware-rpctool' - with_logs = True - rpctool_path = '/not/important/vmware-rpctool' - - def test_without_vmware_rpctool_returns_notfound(self, m_subp, m_which): - m_which.return_value = None - self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) - self.assertEqual(0, m_subp.call_count, - "subp should not be called if no rpctool in path.") - - def test_notfound_on_exit_code_1(self, m_subp, m_which): - """If vmware-rpctool exits 1, then must return not found.""" - m_which.return_value = self.rpctool_path - m_subp.side_effect = subp.ProcessExecutionError( - stdout="", stderr="No value found", exit_code=1, cmd=["unused"]) - self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) - self.assertEqual(1, m_subp.call_count) - self.assertNotIn("WARNING", self.logs.getvalue(), - "exit code of 1 by rpctool should not cause warning.") - - def test_notfound_if_no_content_but_exit_zero(self, m_subp, m_which): - """If vmware-rpctool exited 0 with no stdout is normal not-found. - - This isn't actually a case I've seen. normally on "not found", - rpctool would exit 1 with 'No value found' on stderr. But cover - the case where it exited 0 and just wrote nothing to stdout. - """ - m_which.return_value = self.rpctool_path - m_subp.return_value = ('', '') - self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) - self.assertEqual(1, m_subp.call_count) - - def test_notfound_and_warns_on_unexpected_exit_code(self, m_subp, m_which): - """If vmware-rpctool exits non zero or 1, warnings should be logged.""" - m_which.return_value = self.rpctool_path - m_subp.side_effect = subp.ProcessExecutionError( - stdout=None, stderr="No value found", exit_code=2, cmd=["unused"]) - self.assertEqual(NOT_FOUND, dsovf.transport_vmware_guestinfo()) - self.assertEqual(1, m_subp.call_count) - self.assertIn("WARNING", self.logs.getvalue(), - "exit code of 2 by rpctool should log WARNING.") - - def test_found_when_guestinfo_present(self, m_subp, m_which): - """When there is a ovf info, transport should return it.""" - m_which.return_value = self.rpctool_path - content = fill_properties({}) - m_subp.return_value = (content, '') - self.assertEqual(content, dsovf.transport_vmware_guestinfo()) - self.assertEqual(1, m_subp.call_count) - -# -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_rbx.py b/tests/unittests/test_datasource/test_rbx.py deleted file mode 100644 index d017510e..00000000 --- a/tests/unittests/test_datasource/test_rbx.py +++ /dev/null @@ -1,238 +0,0 @@ -import json - -from cloudinit import helpers -from cloudinit import distros -from cloudinit.sources import DataSourceRbxCloud as ds -from cloudinit.tests.helpers import mock, CiTestCase, populate_dir -from cloudinit import subp - -DS_PATH = "cloudinit.sources.DataSourceRbxCloud" - -CRYPTO_PASS = "$6$uktth46t$FvpDzFD2iL9YNZIG1Epz7957hJqbH0f" \ - "QKhnzcfBcUhEodGAWRqTy7tYG4nEW7SUOYBjxOSFIQW5" \ - "tToyGP41.s1" - -CLOUD_METADATA = { - "vm": { - "memory": 4, - "cpu": 2, - "name": "vm-image-builder", - "_id": "5beab44f680cffd11f0e60fc" - }, - "additionalMetadata": { - "username": "guru", - "sshKeys": ["ssh-rsa ..."], - "password": { - "sha512": CRYPTO_PASS - } - }, - "disk": [ - {"size": 10, "type": "ssd", - "name": "vm-image-builder-os", - "_id": "5beab450680cffd11f0e60fe"}, - {"size": 2, "type": "ssd", - "name": "ubuntu-1804-bionic", - "_id": "5bef002c680cffd11f107590"} - ], - "netadp": [ - { - "ip": [{"address": "62.181.8.174"}], - "network": { - "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]}, - "routing": [], - "gateway": "62.181.8.1", - "netmask": "255.255.248.0", - "name": "public", - "type": "public", - "_id": "5784e97be2627505227b578c" - }, - "speed": 1000, - "type": "hv", - "macaddress": "00:15:5D:FF:0F:03", - "_id": "5beab450680cffd11f0e6102" - }, - { - "ip": [{"address": "10.209.78.11"}], - "network": { - "dns": {"nameservers": ["9.9.9.9", "8.8.8.8"]}, - "routing": [], - "gateway": "10.209.78.1", - "netmask": "255.255.255.0", - "name": "network-determined-bardeen", - "type": "private", - "_id": "5beaec64680cffd11f0e7c31" - }, - "speed": 1000, - "type": "hv", - "macaddress": "00:15:5D:FF:0F:24", - "_id": "5bec18c6680cffd11f0f0d8b" - } - ], - "dvddrive": [{"iso": {}}] -} - - -class TestRbxDataSource(CiTestCase): - parsed_user = None - allowed_subp = ['bash'] - - def _fetch_distro(self, kind): - cls = distros.fetch(kind) - paths = helpers.Paths({}) - return cls(kind, {}, paths) - - def setUp(self): - super(TestRbxDataSource, self).setUp() - self.tmp = self.tmp_dir() - self.paths = helpers.Paths( - {'cloud_dir': self.tmp, 'run_dir': self.tmp} - ) - - # defaults for few tests - self.ds = ds.DataSourceRbxCloud - self.seed_dir = self.paths.seed_dir - self.sys_cfg = {'datasource': {'RbxCloud': {'dsmode': 'local'}}} - - def test_seed_read_user_data_callback_empty_file(self): - populate_user_metadata(self.seed_dir, '') - populate_cloud_metadata(self.seed_dir, {}) - results = ds.read_user_data_callback(self.seed_dir) - - self.assertIsNone(results) - - def test_seed_read_user_data_callback_valid_disk(self): - populate_user_metadata(self.seed_dir, '') - populate_cloud_metadata(self.seed_dir, CLOUD_METADATA) - results = ds.read_user_data_callback(self.seed_dir) - - self.assertNotEqual(results, None) - self.assertTrue('userdata' in results) - self.assertTrue('metadata' in results) - self.assertTrue('cfg' in results) - - def test_seed_read_user_data_callback_userdata(self): - userdata = "#!/bin/sh\nexit 1" - populate_user_metadata(self.seed_dir, userdata) - populate_cloud_metadata(self.seed_dir, CLOUD_METADATA) - - results = ds.read_user_data_callback(self.seed_dir) - - self.assertNotEqual(results, None) - self.assertTrue('userdata' in results) - self.assertEqual(results['userdata'], userdata) - - def test_generate_network_config(self): - expected = { - 'version': 1, - 'config': [ - { - 'subnets': [ - {'control': 'auto', - 'dns_nameservers': ['8.8.8.8', '8.8.4.4'], - 'netmask': '255.255.248.0', - 'address': '62.181.8.174', - 'type': 'static', 'gateway': '62.181.8.1'} - ], - 'type': 'physical', - 'name': 'eth0', - 'mac_address': '00:15:5d:ff:0f:03' - }, - { - 'subnets': [ - {'control': 'auto', - 'dns_nameservers': ['9.9.9.9', '8.8.8.8'], - 'netmask': '255.255.255.0', - 'address': '10.209.78.11', - 'type': 'static', - 'gateway': '10.209.78.1'} - ], - 'type': 'physical', - 'name': 'eth1', - 'mac_address': '00:15:5d:ff:0f:24' - } - ] - } - self.assertTrue( - ds.generate_network_config(CLOUD_METADATA['netadp']), - expected - ) - - @mock.patch(DS_PATH + '.subp.subp') - def test_gratuitous_arp_run_standard_arping(self, m_subp): - """Test handle run arping & parameters.""" - items = [ - { - 'destination': '172.17.0.2', - 'source': '172.16.6.104' - }, - { - 'destination': '172.17.0.2', - 'source': '172.16.6.104', - }, - ] - ds.gratuitous_arp(items, self._fetch_distro('ubuntu')) - self.assertEqual([ - mock.call([ - 'arping', '-c', '2', '-S', - '172.16.6.104', '172.17.0.2' - ]), - mock.call([ - 'arping', '-c', '2', '-S', - '172.16.6.104', '172.17.0.2' - ]) - ], m_subp.call_args_list - ) - - @mock.patch(DS_PATH + '.subp.subp') - def test_handle_rhel_like_arping(self, m_subp): - """Test handle on RHEL-like distros.""" - items = [ - { - 'source': '172.16.6.104', - 'destination': '172.17.0.2', - } - ] - ds.gratuitous_arp(items, self._fetch_distro('fedora')) - self.assertEqual([ - mock.call( - ['arping', '-c', '2', '-s', '172.16.6.104', '172.17.0.2'] - )], - m_subp.call_args_list - ) - - @mock.patch( - DS_PATH + '.subp.subp', - side_effect=subp.ProcessExecutionError() - ) - def test_continue_on_arping_error(self, m_subp): - """Continue when command error""" - items = [ - { - 'destination': '172.17.0.2', - 'source': '172.16.6.104' - }, - { - 'destination': '172.17.0.2', - 'source': '172.16.6.104', - }, - ] - ds.gratuitous_arp(items, self._fetch_distro('ubuntu')) - self.assertEqual([ - mock.call([ - 'arping', '-c', '2', '-S', - '172.16.6.104', '172.17.0.2' - ]), - mock.call([ - 'arping', '-c', '2', '-S', - '172.16.6.104', '172.17.0.2' - ]) - ], m_subp.call_args_list - ) - - -def populate_cloud_metadata(path, data): - populate_dir(path, {'cloud.json': json.dumps(data)}) - - -def populate_user_metadata(path, data): - populate_dir(path, {'user.data': data}) diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py deleted file mode 100644 index f9e968c5..00000000 --- a/tests/unittests/test_datasource/test_scaleway.py +++ /dev/null @@ -1,473 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import json - -import httpretty -import requests - -from cloudinit import helpers -from cloudinit import settings -from cloudinit import sources -from cloudinit.sources import DataSourceScaleway - -from cloudinit.tests.helpers import mock, HttprettyTestCase, CiTestCase - - -class DataResponses(object): - """ - Possible responses of the API endpoint - 169.254.42.42/user_data/cloud-init and - 169.254.42.42/vendor_data/cloud-init. - """ - - FAKE_USER_DATA = '#!/bin/bash\necho "user-data"' - - @staticmethod - def rate_limited(method, uri, headers): - return 429, headers, '' - - @staticmethod - def api_error(method, uri, headers): - return 500, headers, '' - - @classmethod - def get_ok(cls, method, uri, headers): - return 200, headers, cls.FAKE_USER_DATA - - @staticmethod - def empty(method, uri, headers): - """ - No user data for this server. - """ - return 404, headers, '' - - -class MetadataResponses(object): - """ - Possible responses of the metadata API. - """ - - FAKE_METADATA = { - 'id': '00000000-0000-0000-0000-000000000000', - 'hostname': 'scaleway.host', - 'tags': [ - "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", - ], - 'ssh_public_keys': [{ - 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', - 'fingerprint': '2048 06:ae:... login (RSA)' - }, { - 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', - 'fingerprint': '2048 06:ff:... login2 (RSA)' - }] - } - - @classmethod - def get_ok(cls, method, uri, headers): - return 200, headers, json.dumps(cls.FAKE_METADATA) - - -class TestOnScaleway(CiTestCase): - - def setUp(self): - super(TestOnScaleway, self).setUp() - self.tmp = self.tmp_dir() - - def install_mocks(self, fake_dmi, fake_file_exists, fake_cmdline): - mock, faked = fake_dmi - mock.return_value = 'Scaleway' if faked else 'Whatever' - - mock, faked = fake_file_exists - mock.return_value = faked - - mock, faked = fake_cmdline - mock.return_value = \ - 'initrd=initrd showopts scaleway nousb' if faked \ - else 'BOOT_IMAGE=/vmlinuz-3.11.0-26-generic' - - @mock.patch('cloudinit.util.get_cmdline') - @mock.patch('os.path.exists') - @mock.patch('cloudinit.dmi.read_dmi_data') - def test_not_on_scaleway(self, m_read_dmi_data, m_file_exists, - m_get_cmdline): - self.install_mocks( - fake_dmi=(m_read_dmi_data, False), - fake_file_exists=(m_file_exists, False), - fake_cmdline=(m_get_cmdline, False) - ) - self.assertFalse(DataSourceScaleway.on_scaleway()) - - # When not on Scaleway, get_data() returns False. - datasource = DataSourceScaleway.DataSourceScaleway( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp}) - ) - self.assertFalse(datasource.get_data()) - - @mock.patch('cloudinit.util.get_cmdline') - @mock.patch('os.path.exists') - @mock.patch('cloudinit.dmi.read_dmi_data') - def test_on_scaleway_dmi(self, m_read_dmi_data, m_file_exists, - m_get_cmdline): - """ - dmidecode returns "Scaleway". - """ - # dmidecode returns "Scaleway" - self.install_mocks( - fake_dmi=(m_read_dmi_data, True), - fake_file_exists=(m_file_exists, False), - fake_cmdline=(m_get_cmdline, False) - ) - self.assertTrue(DataSourceScaleway.on_scaleway()) - - @mock.patch('cloudinit.util.get_cmdline') - @mock.patch('os.path.exists') - @mock.patch('cloudinit.dmi.read_dmi_data') - def test_on_scaleway_var_run_scaleway(self, m_read_dmi_data, m_file_exists, - m_get_cmdline): - """ - /var/run/scaleway exists. - """ - self.install_mocks( - fake_dmi=(m_read_dmi_data, False), - fake_file_exists=(m_file_exists, True), - fake_cmdline=(m_get_cmdline, False) - ) - self.assertTrue(DataSourceScaleway.on_scaleway()) - - @mock.patch('cloudinit.util.get_cmdline') - @mock.patch('os.path.exists') - @mock.patch('cloudinit.dmi.read_dmi_data') - def test_on_scaleway_cmdline(self, m_read_dmi_data, m_file_exists, - m_get_cmdline): - """ - "scaleway" in /proc/cmdline. - """ - self.install_mocks( - fake_dmi=(m_read_dmi_data, False), - fake_file_exists=(m_file_exists, False), - fake_cmdline=(m_get_cmdline, True) - ) - self.assertTrue(DataSourceScaleway.on_scaleway()) - - -def get_source_address_adapter(*args, **kwargs): - """ - Scaleway user/vendor data API requires to be called with a privileged port. - - If the unittests are run as non-root, the user doesn't have the permission - to bind on ports below 1024. - - This function removes the bind on a privileged address, since anyway the - HTTP call is mocked by httpretty. - """ - kwargs.pop('source_address') - return requests.adapters.HTTPAdapter(*args, **kwargs) - - -class TestDataSourceScaleway(HttprettyTestCase): - - def setUp(self): - tmp = self.tmp_dir() - self.datasource = DataSourceScaleway.DataSourceScaleway( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': tmp}) - ) - super(TestDataSourceScaleway, self).setUp() - - self.metadata_url = \ - DataSourceScaleway.BUILTIN_DS_CONFIG['metadata_url'] - self.userdata_url = \ - DataSourceScaleway.BUILTIN_DS_CONFIG['userdata_url'] - self.vendordata_url = \ - DataSourceScaleway.BUILTIN_DS_CONFIG['vendordata_url'] - - self.add_patch('cloudinit.sources.DataSourceScaleway.on_scaleway', - '_m_on_scaleway', return_value=True) - self.add_patch( - 'cloudinit.sources.DataSourceScaleway.net.find_fallback_nic', - '_m_find_fallback_nic', return_value='scalewaynic0') - - @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') - @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', - get_source_address_adapter) - @mock.patch('cloudinit.util.get_cmdline') - @mock.patch('time.sleep', return_value=None) - def test_metadata_ok(self, sleep, m_get_cmdline, dhcpv4): - """ - get_data() returns metadata, user data and vendor data. - """ - m_get_cmdline.return_value = 'scaleway' - - # Make user data API return a valid response - httpretty.register_uri(httpretty.GET, self.metadata_url, - body=MetadataResponses.get_ok) - httpretty.register_uri(httpretty.GET, self.userdata_url, - body=DataResponses.get_ok) - httpretty.register_uri(httpretty.GET, self.vendordata_url, - body=DataResponses.get_ok) - self.datasource.get_data() - - self.assertEqual(self.datasource.get_instance_id(), - MetadataResponses.FAKE_METADATA['id']) - self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', - ].sort()) - self.assertEqual(self.datasource.get_hostname(), - MetadataResponses.FAKE_METADATA['hostname']) - self.assertEqual(self.datasource.get_userdata_raw(), - DataResponses.FAKE_USER_DATA) - self.assertEqual(self.datasource.get_vendordata_raw(), - DataResponses.FAKE_USER_DATA) - self.assertIsNone(self.datasource.availability_zone) - self.assertIsNone(self.datasource.region) - self.assertEqual(sleep.call_count, 0) - - def test_ssh_keys_empty(self): - """ - get_public_ssh_keys() should return empty list if no ssh key are - available - """ - self.datasource.metadata['tags'] = [] - self.datasource.metadata['ssh_public_keys'] = [] - self.assertEqual(self.datasource.get_public_ssh_keys(), []) - - def test_ssh_keys_only_tags(self): - """ - get_public_ssh_keys() should return list of keys available in tags - """ - self.datasource.metadata['tags'] = [ - "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", - "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABCCCCC", - ] - self.datasource.metadata['ssh_public_keys'] = [] - self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', - ].sort()) - - def test_ssh_keys_only_conf(self): - """ - get_public_ssh_keys() should return list of keys available in - ssh_public_keys field - """ - self.datasource.metadata['tags'] = [] - self.datasource.metadata['ssh_public_keys'] = [{ - 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', - 'fingerprint': '2048 06:ae:... login (RSA)' - }, { - 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', - 'fingerprint': '2048 06:ff:... login2 (RSA)' - }] - self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', - ].sort()) - - def test_ssh_keys_both(self): - """ - get_public_ssh_keys() should return a merge of keys available - in ssh_public_keys and tags - """ - self.datasource.metadata['tags'] = [ - "AUTHORIZED_KEY=ssh-rsa_AAAAB3NzaC1yc2EAAAADAQABDDDDD", - ] - - self.datasource.metadata['ssh_public_keys'] = [{ - 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', - 'fingerprint': '2048 06:ae:... login (RSA)' - }, { - 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', - 'fingerprint': '2048 06:ff:... login2 (RSA)' - }] - self.assertEqual(self.datasource.get_public_ssh_keys().sort(), [ - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABDDDDD', - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', - ].sort()) - - @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') - @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', - get_source_address_adapter) - @mock.patch('cloudinit.util.get_cmdline') - @mock.patch('time.sleep', return_value=None) - def test_metadata_404(self, sleep, m_get_cmdline, dhcpv4): - """ - get_data() returns metadata, but no user data nor vendor data. - """ - m_get_cmdline.return_value = 'scaleway' - - # Make user and vendor data APIs return HTTP/404, which means there is - # no user / vendor data for the server. - httpretty.register_uri(httpretty.GET, self.metadata_url, - body=MetadataResponses.get_ok) - httpretty.register_uri(httpretty.GET, self.userdata_url, - body=DataResponses.empty) - httpretty.register_uri(httpretty.GET, self.vendordata_url, - body=DataResponses.empty) - self.datasource.get_data() - self.assertIsNone(self.datasource.get_userdata_raw()) - self.assertIsNone(self.datasource.get_vendordata_raw()) - self.assertEqual(sleep.call_count, 0) - - @mock.patch('cloudinit.sources.DataSourceScaleway.EphemeralDHCPv4') - @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', - get_source_address_adapter) - @mock.patch('cloudinit.util.get_cmdline') - @mock.patch('time.sleep', return_value=None) - def test_metadata_rate_limit(self, sleep, m_get_cmdline, dhcpv4): - """ - get_data() is rate limited two times by the metadata API when fetching - user data. - """ - m_get_cmdline.return_value = 'scaleway' - - httpretty.register_uri(httpretty.GET, self.metadata_url, - body=MetadataResponses.get_ok) - httpretty.register_uri(httpretty.GET, self.vendordata_url, - body=DataResponses.empty) - - httpretty.register_uri( - httpretty.GET, self.userdata_url, - responses=[ - httpretty.Response(body=DataResponses.rate_limited), - httpretty.Response(body=DataResponses.rate_limited), - httpretty.Response(body=DataResponses.get_ok), - ] - ) - self.datasource.get_data() - self.assertEqual(self.datasource.get_userdata_raw(), - DataResponses.FAKE_USER_DATA) - self.assertEqual(sleep.call_count, 2) - - @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') - @mock.patch('cloudinit.util.get_cmdline') - def test_network_config_ok(self, m_get_cmdline, fallback_nic): - """ - network_config will only generate IPv4 config if no ipv6 data is - available in the metadata - """ - m_get_cmdline.return_value = 'scaleway' - fallback_nic.return_value = 'ens2' - self.datasource.metadata['ipv6'] = None - - netcfg = self.datasource.network_config - resp = { - 'version': 1, - 'config': [ - { - 'type': 'physical', - 'name': 'ens2', - 'subnets': [{'type': 'dhcp4'}] - } - ] - } - self.assertEqual(netcfg, resp) - - @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') - @mock.patch('cloudinit.util.get_cmdline') - def test_network_config_ipv6_ok(self, m_get_cmdline, fallback_nic): - """ - network_config will only generate IPv4/v6 configs if ipv6 data is - available in the metadata - """ - m_get_cmdline.return_value = 'scaleway' - fallback_nic.return_value = 'ens2' - self.datasource.metadata['ipv6'] = { - 'address': '2000:abc:4444:9876::42:999', - 'gateway': '2000:abc:4444:9876::42:000', - 'netmask': '127', - } - - netcfg = self.datasource.network_config - resp = { - 'version': 1, - 'config': [ - { - 'type': 'physical', - 'name': 'ens2', - 'subnets': [ - { - 'type': 'dhcp4' - }, - { - 'type': 'static', - 'address': '2000:abc:4444:9876::42:999', - 'gateway': '2000:abc:4444:9876::42:000', - 'netmask': '127', - } - ] - } - ] - } - self.assertEqual(netcfg, resp) - - @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') - @mock.patch('cloudinit.util.get_cmdline') - def test_network_config_existing(self, m_get_cmdline, fallback_nic): - """ - network_config() should return the same data if a network config - already exists - """ - m_get_cmdline.return_value = 'scaleway' - self.datasource._network_config = '0xdeadbeef' - - netcfg = self.datasource.network_config - self.assertEqual(netcfg, '0xdeadbeef') - - @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') - @mock.patch('cloudinit.util.get_cmdline') - def test_network_config_unset(self, m_get_cmdline, fallback_nic): - """ - _network_config will be set to sources.UNSET after the first boot. - Make sure it behave correctly. - """ - m_get_cmdline.return_value = 'scaleway' - fallback_nic.return_value = 'ens2' - self.datasource.metadata['ipv6'] = None - self.datasource._network_config = sources.UNSET - - resp = { - 'version': 1, - 'config': [ - { - 'type': 'physical', - 'name': 'ens2', - 'subnets': [{'type': 'dhcp4'}] - } - ] - } - - netcfg = self.datasource.network_config - self.assertEqual(netcfg, resp) - - @mock.patch('cloudinit.sources.DataSourceScaleway.LOG.warning') - @mock.patch('cloudinit.sources.DataSourceScaleway.net.find_fallback_nic') - @mock.patch('cloudinit.util.get_cmdline') - def test_network_config_cached_none(self, m_get_cmdline, fallback_nic, - logwarning): - """ - network_config() should return config data if cached data is None - rather than sources.UNSET - """ - m_get_cmdline.return_value = 'scaleway' - fallback_nic.return_value = 'ens2' - self.datasource.metadata['ipv6'] = None - self.datasource._network_config = None - - resp = { - 'version': 1, - 'config': [ - { - 'type': 'physical', - 'name': 'ens2', - 'subnets': [{'type': 'dhcp4'}] - } - ] - } - - netcfg = self.datasource.network_config - self.assertEqual(netcfg, resp) - logwarning.assert_called_with('Found None as cached _network_config. ' - 'Resetting to %s', sources.UNSET) diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py deleted file mode 100644 index 9c499672..00000000 --- a/tests/unittests/test_datasource/test_smartos.py +++ /dev/null @@ -1,1163 +0,0 @@ -# Copyright (C) 2013 Canonical Ltd. -# Copyright 2019 Joyent, Inc. -# -# Author: Ben Howard -# -# This file is part of cloud-init. See LICENSE file for license information. - -'''This is a testcase for the SmartOS datasource. - -It replicates a serial console and acts like the SmartOS console does in -order to validate return responses. - -''' - -from binascii import crc32 -import json -import multiprocessing -import os -import os.path -import re -import signal -import stat -import unittest -import uuid - -from cloudinit import serial -from cloudinit.sources import DataSourceSmartOS -from cloudinit.sources.DataSourceSmartOS import ( - convert_smartos_network_data as convert_net, - SMARTOS_ENV_KVM, SERIAL_DEVICE, get_smartos_environ, - identify_file) -from cloudinit.event import EventScope, EventType - -from cloudinit import helpers as c_helpers -from cloudinit.util import (b64e, write_file) -from cloudinit.subp import (subp, ProcessExecutionError, which) - -from cloudinit.tests.helpers import ( - CiTestCase, mock, FilesystemMockingTestCase, skipIf) - - -try: - import serial as _pyserial - assert _pyserial # avoid pyflakes error F401: import unused - HAS_PYSERIAL = True -except ImportError: - HAS_PYSERIAL = False - -DSMOS = 'cloudinit.sources.DataSourceSmartOS' -SDC_NICS = json.loads(""" -[ - { - "nic_tag": "external", - "primary": true, - "mtu": 1500, - "model": "virtio", - "gateway": "8.12.42.1", - "netmask": "255.255.255.0", - "ip": "8.12.42.102", - "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", - "gateways": [ - "8.12.42.1" - ], - "vlan_id": 324, - "mac": "90:b8:d0:f5:e4:f5", - "interface": "net0", - "ips": [ - "8.12.42.102/24" - ] - }, - { - "nic_tag": "sdc_overlay/16187209", - "gateway": "192.168.128.1", - "model": "virtio", - "mac": "90:b8:d0:a5:ff:cd", - "netmask": "255.255.252.0", - "ip": "192.168.128.93", - "network_uuid": "4cad71da-09bc-452b-986d-03562a03a0a9", - "gateways": [ - "192.168.128.1" - ], - "vlan_id": 2, - "mtu": 8500, - "interface": "net1", - "ips": [ - "192.168.128.93/22" - ] - } -] -""") - - -SDC_NICS_ALT = json.loads(""" -[ - { - "interface": "net0", - "mac": "90:b8:d0:ae:64:51", - "vlan_id": 324, - "nic_tag": "external", - "gateway": "8.12.42.1", - "gateways": [ - "8.12.42.1" - ], - "netmask": "255.255.255.0", - "ip": "8.12.42.51", - "ips": [ - "8.12.42.51/24" - ], - "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", - "model": "virtio", - "mtu": 1500, - "primary": true - }, - { - "interface": "net1", - "mac": "90:b8:d0:bd:4f:9c", - "vlan_id": 600, - "nic_tag": "internal", - "netmask": "255.255.255.0", - "ip": "10.210.1.217", - "ips": [ - "10.210.1.217/24" - ], - "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", - "model": "virtio", - "mtu": 1500 - } -] -""") - -SDC_NICS_DHCP = json.loads(""" -[ - { - "interface": "net0", - "mac": "90:b8:d0:ae:64:51", - "vlan_id": 324, - "nic_tag": "external", - "gateway": "8.12.42.1", - "gateways": [ - "8.12.42.1" - ], - "netmask": "255.255.255.0", - "ip": "8.12.42.51", - "ips": [ - "8.12.42.51/24" - ], - "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", - "model": "virtio", - "mtu": 1500, - "primary": true - }, - { - "interface": "net1", - "mac": "90:b8:d0:bd:4f:9c", - "vlan_id": 600, - "nic_tag": "internal", - "netmask": "255.255.255.0", - "ip": "10.210.1.217", - "ips": [ - "dhcp" - ], - "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", - "model": "virtio", - "mtu": 1500 - } -] -""") - -SDC_NICS_MIP = json.loads(""" -[ - { - "interface": "net0", - "mac": "90:b8:d0:ae:64:51", - "vlan_id": 324, - "nic_tag": "external", - "gateway": "8.12.42.1", - "gateways": [ - "8.12.42.1" - ], - "netmask": "255.255.255.0", - "ip": "8.12.42.51", - "ips": [ - "8.12.42.51/24", - "8.12.42.52/24" - ], - "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", - "model": "virtio", - "mtu": 1500, - "primary": true - }, - { - "interface": "net1", - "mac": "90:b8:d0:bd:4f:9c", - "vlan_id": 600, - "nic_tag": "internal", - "netmask": "255.255.255.0", - "ip": "10.210.1.217", - "ips": [ - "10.210.1.217/24", - "10.210.1.151/24" - ], - "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", - "model": "virtio", - "mtu": 1500 - } -] -""") - -SDC_NICS_MIP_IPV6 = json.loads(""" -[ - { - "interface": "net0", - "mac": "90:b8:d0:ae:64:51", - "vlan_id": 324, - "nic_tag": "external", - "gateway": "8.12.42.1", - "gateways": [ - "8.12.42.1" - ], - "netmask": "255.255.255.0", - "ip": "8.12.42.51", - "ips": [ - "2001:4800:78ff:1b:be76:4eff:fe06:96b3/64", - "8.12.42.51/24" - ], - "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", - "model": "virtio", - "mtu": 1500, - "primary": true - }, - { - "interface": "net1", - "mac": "90:b8:d0:bd:4f:9c", - "vlan_id": 600, - "nic_tag": "internal", - "netmask": "255.255.255.0", - "ip": "10.210.1.217", - "ips": [ - "10.210.1.217/24" - ], - "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", - "model": "virtio", - "mtu": 1500 - } -] -""") - -SDC_NICS_IPV4_IPV6 = json.loads(""" -[ - { - "interface": "net0", - "mac": "90:b8:d0:ae:64:51", - "vlan_id": 324, - "nic_tag": "external", - "gateway": "8.12.42.1", - "gateways": ["8.12.42.1", "2001::1", "2001::2"], - "netmask": "255.255.255.0", - "ip": "8.12.42.51", - "ips": ["2001::10/64", "8.12.42.51/24", "2001::11/64", - "8.12.42.52/32"], - "network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", - "model": "virtio", - "mtu": 1500, - "primary": true - }, - { - "interface": "net1", - "mac": "90:b8:d0:bd:4f:9c", - "vlan_id": 600, - "nic_tag": "internal", - "netmask": "255.255.255.0", - "ip": "10.210.1.217", - "ips": ["10.210.1.217/24"], - "gateways": ["10.210.1.210"], - "network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", - "model": "virtio", - "mtu": 1500 - } -] -""") - -SDC_NICS_SINGLE_GATEWAY = json.loads(""" -[ - { - "interface":"net0", - "mac":"90:b8:d0:d8:82:b4", - "vlan_id":324, - "nic_tag":"external", - "gateway":"8.12.42.1", - "gateways":["8.12.42.1"], - "netmask":"255.255.255.0", - "ip":"8.12.42.26", - "ips":["8.12.42.26/24"], - "network_uuid":"992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe", - "model":"virtio", - "mtu":1500, - "primary":true - }, - { - "interface":"net1", - "mac":"90:b8:d0:0a:51:31", - "vlan_id":600, - "nic_tag":"internal", - "netmask":"255.255.255.0", - "ip":"10.210.1.27", - "ips":["10.210.1.27/24"], - "network_uuid":"98657fdf-11f4-4ee2-88a4-ce7fe73e33a6", - "model":"virtio", - "mtu":1500 - } -] -""") - - -MOCK_RETURNS = { - 'hostname': 'test-host', - 'root_authorized_keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname', - 'disable_iptables_flag': None, - 'enable_motd_sys_info': None, - 'test-var1': 'some data', - 'cloud-init:user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']), - 'sdc:datacenter_name': 'somewhere2', - 'sdc:operator-script': '\n'.join(['bin/true', '']), - 'sdc:uuid': str(uuid.uuid4()), - 'sdc:vendor-data': '\n'.join(['VENDOR_DATA', '']), - 'user-data': '\n'.join(['something', '']), - 'user-script': '\n'.join(['/bin/true', '']), - 'sdc:nics': json.dumps(SDC_NICS), -} - -DMI_DATA_RETURN = 'smartdc' - -# Useful for calculating the length of a frame body. A SUCCESS body will be -# followed by more characters or be one character less if SUCCESS with no -# payload. See Section 4.3 of https://eng.joyent.com/mdata/protocol.html. -SUCCESS_LEN = len('0123abcd SUCCESS ') -NOTFOUND_LEN = len('0123abcd NOTFOUND') - - -class PsuedoJoyentClient(object): - def __init__(self, data=None): - if data is None: - data = MOCK_RETURNS.copy() - self.data = data - self._is_open = False - return - - def get(self, key, default=None, strip=False): - if key in self.data: - r = self.data[key] - if strip: - r = r.strip() - else: - r = default - return r - - def get_json(self, key, default=None): - result = self.get(key, default=default) - if result is None: - return default - return json.loads(result) - - def exists(self): - return True - - def open_transport(self): - assert(not self._is_open) - self._is_open = True - - def close_transport(self): - assert(self._is_open) - self._is_open = False - - -class TestSmartOSDataSource(FilesystemMockingTestCase): - jmc_cfact = None - get_smartos_environ = None - - def setUp(self): - super(TestSmartOSDataSource, self).setUp() - - self.add_patch(DSMOS + ".get_smartos_environ", "get_smartos_environ") - self.add_patch(DSMOS + ".jmc_client_factory", "jmc_cfact") - self.legacy_user_d = self.tmp_path('legacy_user_tmp') - os.mkdir(self.legacy_user_d) - self.add_patch(DSMOS + ".LEGACY_USER_D", "m_legacy_user_d", - autospec=False, new=self.legacy_user_d) - self.add_patch(DSMOS + ".identify_file", "m_identify_file", - return_value="text/plain") - - def _get_ds(self, mockdata=None, mode=DataSourceSmartOS.SMARTOS_ENV_KVM, - sys_cfg=None, ds_cfg=None): - self.jmc_cfact.return_value = PsuedoJoyentClient(mockdata) - self.get_smartos_environ.return_value = mode - - tmpd = self.tmp_dir() - dirs = {'cloud_dir': self.tmp_path('cloud_dir', tmpd), - 'run_dir': self.tmp_path('run_dir')} - for d in dirs.values(): - os.mkdir(d) - paths = c_helpers.Paths(dirs) - - if sys_cfg is None: - sys_cfg = {} - - if ds_cfg is not None: - sys_cfg['datasource'] = sys_cfg.get('datasource', {}) - sys_cfg['datasource']['SmartOS'] = ds_cfg - - return DataSourceSmartOS.DataSourceSmartOS( - sys_cfg, distro=None, paths=paths) - - def test_no_base64(self): - ds_cfg = {'no_base64_decode': ['test_var1'], 'all_base': True} - dsrc = self._get_ds(ds_cfg=ds_cfg) - ret = dsrc.get_data() - self.assertTrue(ret) - - def test_uuid(self): - dsrc = self._get_ds(mockdata=MOCK_RETURNS) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['sdc:uuid'], - dsrc.metadata['instance-id']) - - def test_platform_info(self): - """All platform-related attributes are properly set.""" - dsrc = self._get_ds(mockdata=MOCK_RETURNS) - self.assertEqual('joyent', dsrc.cloud_name) - self.assertEqual('joyent', dsrc.platform_type) - self.assertEqual('serial (/dev/ttyS1)', dsrc.subplatform) - - def test_root_keys(self): - dsrc = self._get_ds(mockdata=MOCK_RETURNS) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['root_authorized_keys'], - dsrc.metadata['public-keys']) - - def test_hostname_b64(self): - dsrc = self._get_ds(mockdata=MOCK_RETURNS) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['hostname'], - dsrc.metadata['local-hostname']) - - def test_hostname(self): - dsrc = self._get_ds(mockdata=MOCK_RETURNS) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['hostname'], - dsrc.metadata['local-hostname']) - - def test_hostname_if_no_sdc_hostname(self): - my_returns = MOCK_RETURNS.copy() - my_returns['sdc:hostname'] = 'sdc-' + my_returns['hostname'] - dsrc = self._get_ds(mockdata=my_returns) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(my_returns['hostname'], - dsrc.metadata['local-hostname']) - - def test_sdc_hostname_if_no_hostname(self): - my_returns = MOCK_RETURNS.copy() - my_returns['sdc:hostname'] = 'sdc-' + my_returns['hostname'] - del my_returns['hostname'] - dsrc = self._get_ds(mockdata=my_returns) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(my_returns['sdc:hostname'], - dsrc.metadata['local-hostname']) - - def test_sdc_uuid_if_no_hostname_or_sdc_hostname(self): - my_returns = MOCK_RETURNS.copy() - del my_returns['hostname'] - dsrc = self._get_ds(mockdata=my_returns) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(my_returns['sdc:uuid'], - dsrc.metadata['local-hostname']) - - def test_userdata(self): - dsrc = self._get_ds(mockdata=MOCK_RETURNS) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['user-data'], - dsrc.metadata['legacy-user-data']) - self.assertEqual(MOCK_RETURNS['cloud-init:user-data'], - dsrc.userdata_raw) - - def test_sdc_nics(self): - dsrc = self._get_ds(mockdata=MOCK_RETURNS) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(json.loads(MOCK_RETURNS['sdc:nics']), - dsrc.metadata['network-data']) - - def test_sdc_scripts(self): - dsrc = self._get_ds(mockdata=MOCK_RETURNS) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['user-script'], - dsrc.metadata['user-script']) - - legacy_script_f = "%s/user-script" % self.legacy_user_d - print("legacy_script_f=%s" % legacy_script_f) - self.assertTrue(os.path.exists(legacy_script_f)) - self.assertTrue(os.path.islink(legacy_script_f)) - user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:] - self.assertEqual(user_script_perm, '700') - - def test_scripts_shebanged(self): - dsrc = self._get_ds(mockdata=MOCK_RETURNS) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['user-script'], - dsrc.metadata['user-script']) - - legacy_script_f = "%s/user-script" % self.legacy_user_d - self.assertTrue(os.path.exists(legacy_script_f)) - self.assertTrue(os.path.islink(legacy_script_f)) - shebang = None - with open(legacy_script_f, 'r') as f: - shebang = f.readlines()[0].strip() - self.assertEqual(shebang, "#!/bin/bash") - user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:] - self.assertEqual(user_script_perm, '700') - - def test_scripts_shebang_not_added(self): - """ - Test that the SmartOS requirement that plain text scripts - are executable. This test makes sure that plain texts scripts - with out file magic have it added appropriately by cloud-init. - """ - - my_returns = MOCK_RETURNS.copy() - my_returns['user-script'] = '\n'.join(['#!/usr/bin/perl', - 'print("hi")', '']) - - dsrc = self._get_ds(mockdata=my_returns) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(my_returns['user-script'], - dsrc.metadata['user-script']) - - legacy_script_f = "%s/user-script" % self.legacy_user_d - self.assertTrue(os.path.exists(legacy_script_f)) - self.assertTrue(os.path.islink(legacy_script_f)) - shebang = None - with open(legacy_script_f, 'r') as f: - shebang = f.readlines()[0].strip() - self.assertEqual(shebang, "#!/usr/bin/perl") - - def test_userdata_removed(self): - """ - User-data in the SmartOS world is supposed to be written to a file - each and every boot. This tests to make sure that in the event the - legacy user-data is removed, the existing user-data is backed-up - and there is no /var/db/user-data left. - """ - - user_data_f = "%s/mdata-user-data" % self.legacy_user_d - with open(user_data_f, 'w') as f: - f.write("PREVIOUS") - - my_returns = MOCK_RETURNS.copy() - del my_returns['user-data'] - - dsrc = self._get_ds(mockdata=my_returns) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertFalse(dsrc.metadata.get('legacy-user-data')) - - found_new = False - for root, _dirs, files in os.walk(self.legacy_user_d): - for name in files: - name_f = os.path.join(root, name) - permissions = oct(os.stat(name_f)[stat.ST_MODE])[-3:] - if re.match(r'.*\/mdata-user-data$', name_f): - found_new = True - print(name_f) - self.assertEqual(permissions, '400') - - self.assertFalse(found_new) - - def test_vendor_data_not_default(self): - dsrc = self._get_ds(mockdata=MOCK_RETURNS) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['sdc:vendor-data'], - dsrc.metadata['vendor-data']) - - def test_default_vendor_data(self): - my_returns = MOCK_RETURNS.copy() - def_op_script = my_returns['sdc:vendor-data'] - del my_returns['sdc:vendor-data'] - dsrc = self._get_ds(mockdata=my_returns) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertNotEqual(def_op_script, dsrc.metadata['vendor-data']) - - # we expect default vendor-data is a boothook - self.assertTrue(dsrc.vendordata_raw.startswith("#cloud-boothook")) - - def test_disable_iptables_flag(self): - dsrc = self._get_ds(mockdata=MOCK_RETURNS) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['disable_iptables_flag'], - dsrc.metadata['iptables_disable']) - - def test_motd_sys_info(self): - dsrc = self._get_ds(mockdata=MOCK_RETURNS) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(MOCK_RETURNS['enable_motd_sys_info'], - dsrc.metadata['motd_sys_info']) - - def test_default_ephemeral(self): - # Test to make sure that the builtin config has the ephemeral - # configuration. - dsrc = self._get_ds() - cfg = dsrc.get_config_obj() - - ret = dsrc.get_data() - self.assertTrue(ret) - - assert 'disk_setup' in cfg - assert 'fs_setup' in cfg - self.assertIsInstance(cfg['disk_setup'], dict) - self.assertIsInstance(cfg['fs_setup'], list) - - def test_override_disk_aliases(self): - # Test to make sure that the built-in DS is overriden - builtin = DataSourceSmartOS.BUILTIN_DS_CONFIG - - mydscfg = {'disk_aliases': {'FOO': '/dev/bar'}} - - # expect that these values are in builtin, or this is pointless - for k in mydscfg: - self.assertIn(k, builtin) - - dsrc = self._get_ds(ds_cfg=mydscfg) - ret = dsrc.get_data() - self.assertTrue(ret) - - self.assertEqual(mydscfg['disk_aliases']['FOO'], - dsrc.ds_cfg['disk_aliases']['FOO']) - - self.assertEqual(dsrc.device_name_to_device('FOO'), - mydscfg['disk_aliases']['FOO']) - - def test_reconfig_network_on_boot(self): - # Test to ensure that network is configured from metadata on each boot - dsrc = self._get_ds(mockdata=MOCK_RETURNS) - self.assertSetEqual( - {EventType.BOOT_NEW_INSTANCE, - EventType.BOOT, - EventType.BOOT_LEGACY}, - dsrc.default_update_events[EventScope.NETWORK] - ) - - -class TestIdentifyFile(CiTestCase): - """Test the 'identify_file' utility.""" - @skipIf(not which("file"), "command 'file' not available.") - def test_file_happy_path(self): - """Test file is available and functional on plain text.""" - fname = self.tmp_path("myfile") - write_file(fname, "plain text content here\n") - with self.allow_subp(["file"]): - self.assertEqual("text/plain", identify_file(fname)) - - @mock.patch(DSMOS + ".subp.subp") - def test_returns_none_on_error(self, m_subp): - """On 'file' execution error, None should be returned.""" - m_subp.side_effect = ProcessExecutionError("FILE_FAILED", exit_code=99) - fname = self.tmp_path("myfile") - write_file(fname, "plain text content here\n") - self.assertEqual(None, identify_file(fname)) - self.assertEqual( - [mock.call(["file", "--brief", "--mime-type", fname])], - m_subp.call_args_list) - - -class ShortReader(object): - """Implements a 'read' interface for bytes provided. - much like io.BytesIO but the 'endbyte' acts as if EOF. - When it is reached a short will be returned.""" - def __init__(self, initial_bytes, endbyte=b'\0'): - self.data = initial_bytes - self.index = 0 - self.len = len(self.data) - self.endbyte = endbyte - - @property - def emptied(self): - return self.index >= self.len - - def read(self, size=-1): - """Read size bytes but not past a null.""" - if size == 0 or self.index >= self.len: - return b'' - - rsize = size - if size < 0 or size + self.index > self.len: - rsize = self.len - self.index - - next_null = self.data.find(self.endbyte, self.index, rsize) - if next_null >= 0: - rsize = next_null - self.index + 1 - i = self.index - self.index += rsize - ret = self.data[i:i + rsize] - if len(ret) and ret[-1:] == self.endbyte: - ret = ret[:-1] - return ret - - -class TestJoyentMetadataClient(FilesystemMockingTestCase): - - invalid = b'invalid command\n' - failure = b'FAILURE\n' - v2_ok = b'V2_OK\n' - - def setUp(self): - super(TestJoyentMetadataClient, self).setUp() - - self.serial = mock.MagicMock(spec=serial.Serial) - self.request_id = 0xabcdef12 - self.metadata_value = 'value' - self.response_parts = { - 'command': 'SUCCESS', - 'crc': 'b5a9ff00', - 'length': SUCCESS_LEN + len(b64e(self.metadata_value)), - 'payload': b64e(self.metadata_value), - 'request_id': '{0:08x}'.format(self.request_id), - } - - def make_response(): - payloadstr = '' - if 'payload' in self.response_parts: - payloadstr = ' {0}'.format(self.response_parts['payload']) - return ('V2 {length} {crc} {request_id} ' - '{command}{payloadstr}\n'.format( - payloadstr=payloadstr, - **self.response_parts).encode('ascii')) - - self.metasource_data = None - - def read_response(length): - if not self.metasource_data: - self.metasource_data = make_response() - self.metasource_data_len = len(self.metasource_data) - resp = self.metasource_data[:length] - self.metasource_data = self.metasource_data[length:] - return resp - - self.serial.read.side_effect = read_response - self.patched_funcs.enter_context( - mock.patch('cloudinit.sources.DataSourceSmartOS.random.randint', - mock.Mock(return_value=self.request_id))) - - def _get_client(self): - return DataSourceSmartOS.JoyentMetadataClient( - fp=self.serial, smartos_type=DataSourceSmartOS.SMARTOS_ENV_KVM) - - def _get_serial_client(self): - self.serial.timeout = 1 - return DataSourceSmartOS.JoyentMetadataSerialClient(None, - fp=self.serial) - - def assertEndsWith(self, haystack, prefix): - self.assertTrue(haystack.endswith(prefix), - "{0} does not end with '{1}'".format( - repr(haystack), prefix)) - - def assertStartsWith(self, haystack, prefix): - self.assertTrue(haystack.startswith(prefix), - "{0} does not start with '{1}'".format( - repr(haystack), prefix)) - - def assertNoMoreSideEffects(self, obj): - self.assertRaises(StopIteration, obj) - - def test_get_metadata_writes_a_single_line(self): - client = self._get_client() - client.get('some_key') - self.assertEqual(1, self.serial.write.call_count) - written_line = self.serial.write.call_args[0][0] - self.assertEndsWith(written_line.decode('ascii'), - b'\n'.decode('ascii')) - self.assertEqual(1, written_line.count(b'\n')) - - def _get_written_line(self, key='some_key'): - client = self._get_client() - client.get(key) - return self.serial.write.call_args[0][0] - - def test_get_metadata_writes_bytes(self): - self.assertIsInstance(self._get_written_line(), bytes) - - def test_get_metadata_line_starts_with_v2(self): - foo = self._get_written_line() - self.assertStartsWith(foo.decode('ascii'), b'V2'.decode('ascii')) - - def test_get_metadata_uses_get_command(self): - parts = self._get_written_line().decode('ascii').strip().split(' ') - self.assertEqual('GET', parts[4]) - - def test_get_metadata_base64_encodes_argument(self): - key = 'my_key' - parts = self._get_written_line(key).decode('ascii').strip().split(' ') - self.assertEqual(b64e(key), parts[5]) - - def test_get_metadata_calculates_length_correctly(self): - parts = self._get_written_line().decode('ascii').strip().split(' ') - expected_length = len(' '.join(parts[3:])) - self.assertEqual(expected_length, int(parts[1])) - - def test_get_metadata_uses_appropriate_request_id(self): - parts = self._get_written_line().decode('ascii').strip().split(' ') - request_id = parts[3] - self.assertEqual(8, len(request_id)) - self.assertEqual(request_id, request_id.lower()) - - def test_get_metadata_uses_random_number_for_request_id(self): - line = self._get_written_line() - request_id = line.decode('ascii').strip().split(' ')[3] - self.assertEqual('{0:08x}'.format(self.request_id), request_id) - - def test_get_metadata_checksums_correctly(self): - parts = self._get_written_line().decode('ascii').strip().split(' ') - expected_checksum = '{0:08x}'.format( - crc32(' '.join(parts[3:]).encode('utf-8')) & 0xffffffff) - checksum = parts[2] - self.assertEqual(expected_checksum, checksum) - - def test_get_metadata_reads_a_line(self): - client = self._get_client() - client.get('some_key') - self.assertEqual(self.metasource_data_len, self.serial.read.call_count) - - def test_get_metadata_returns_valid_value(self): - client = self._get_client() - value = client.get('some_key') - self.assertEqual(self.metadata_value, value) - - def test_get_metadata_throws_exception_for_incorrect_length(self): - self.response_parts['length'] = 0 - client = self._get_client() - self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, - client.get, 'some_key') - - def test_get_metadata_throws_exception_for_incorrect_crc(self): - self.response_parts['crc'] = 'deadbeef' - client = self._get_client() - self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, - client.get, 'some_key') - - def test_get_metadata_throws_exception_for_request_id_mismatch(self): - self.response_parts['request_id'] = 'deadbeef' - client = self._get_client() - client._checksum = lambda _: self.response_parts['crc'] - self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, - client.get, 'some_key') - - def test_get_metadata_returns_None_if_value_not_found(self): - self.response_parts['payload'] = '' - self.response_parts['command'] = 'NOTFOUND' - self.response_parts['length'] = NOTFOUND_LEN - client = self._get_client() - client._checksum = lambda _: self.response_parts['crc'] - self.assertIsNone(client.get('some_key')) - - def test_negotiate(self): - client = self._get_client() - reader = ShortReader(self.v2_ok) - client.fp.read.side_effect = reader.read - client._negotiate() - self.assertTrue(reader.emptied) - - def test_negotiate_short_response(self): - client = self._get_client() - # chopped '\n' from v2_ok. - reader = ShortReader(self.v2_ok[:-1] + b'\0') - client.fp.read.side_effect = reader.read - self.assertRaises(DataSourceSmartOS.JoyentMetadataTimeoutException, - client._negotiate) - self.assertTrue(reader.emptied) - - def test_negotiate_bad_response(self): - client = self._get_client() - reader = ShortReader(b'garbage\n' + self.v2_ok) - client.fp.read.side_effect = reader.read - self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, - client._negotiate) - self.assertEqual(self.v2_ok, client.fp.read()) - - def test_serial_open_transport(self): - client = self._get_serial_client() - reader = ShortReader(b'garbage\0' + self.invalid + self.v2_ok) - client.fp.read.side_effect = reader.read - client.open_transport() - self.assertTrue(reader.emptied) - - def test_flush_failure(self): - client = self._get_serial_client() - reader = ShortReader(b'garbage' + b'\0' + self.failure + - self.invalid + self.v2_ok) - client.fp.read.side_effect = reader.read - client.open_transport() - self.assertTrue(reader.emptied) - - def test_flush_many_timeouts(self): - client = self._get_serial_client() - reader = ShortReader(b'\0' * 100 + self.invalid + self.v2_ok) - client.fp.read.side_effect = reader.read - client.open_transport() - self.assertTrue(reader.emptied) - - def test_list_metadata_returns_list(self): - parts = ['foo', 'bar'] - value = b64e('\n'.join(parts)) - self.response_parts['payload'] = value - self.response_parts['crc'] = '40873553' - self.response_parts['length'] = SUCCESS_LEN + len(value) - client = self._get_client() - self.assertEqual(client.list(), parts) - - def test_list_metadata_returns_empty_list_if_no_customer_metadata(self): - del self.response_parts['payload'] - self.response_parts['length'] = SUCCESS_LEN - 1 - self.response_parts['crc'] = '14e563ba' - client = self._get_client() - self.assertEqual(client.list(), []) - - -class TestNetworkConversion(CiTestCase): - def test_convert_simple(self): - expected = { - 'version': 1, - 'config': [ - {'name': 'net0', 'type': 'physical', - 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', - 'address': '8.12.42.102/24'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:f5:e4:f5'}, - {'name': 'net1', 'type': 'physical', - 'subnets': [{'type': 'static', - 'address': '192.168.128.93/22'}], - 'mtu': 8500, 'mac_address': '90:b8:d0:a5:ff:cd'}]} - found = convert_net(SDC_NICS) - self.assertEqual(expected, found) - - def test_convert_simple_alt(self): - expected = { - 'version': 1, - 'config': [ - {'name': 'net0', 'type': 'physical', - 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', - 'address': '8.12.42.51/24'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, - {'name': 'net1', 'type': 'physical', - 'subnets': [{'type': 'static', - 'address': '10.210.1.217/24'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} - found = convert_net(SDC_NICS_ALT) - self.assertEqual(expected, found) - - def test_convert_simple_dhcp(self): - expected = { - 'version': 1, - 'config': [ - {'name': 'net0', 'type': 'physical', - 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', - 'address': '8.12.42.51/24'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, - {'name': 'net1', 'type': 'physical', - 'subnets': [{'type': 'dhcp4'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} - found = convert_net(SDC_NICS_DHCP) - self.assertEqual(expected, found) - - def test_convert_simple_multi_ip(self): - expected = { - 'version': 1, - 'config': [ - {'name': 'net0', 'type': 'physical', - 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', - 'address': '8.12.42.51/24'}, - {'type': 'static', - 'address': '8.12.42.52/24'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, - {'name': 'net1', 'type': 'physical', - 'subnets': [{'type': 'static', - 'address': '10.210.1.217/24'}, - {'type': 'static', - 'address': '10.210.1.151/24'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} - found = convert_net(SDC_NICS_MIP) - self.assertEqual(expected, found) - - def test_convert_with_dns(self): - expected = { - 'version': 1, - 'config': [ - {'name': 'net0', 'type': 'physical', - 'subnets': [{'type': 'static', 'gateway': '8.12.42.1', - 'address': '8.12.42.51/24'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, - {'name': 'net1', 'type': 'physical', - 'subnets': [{'type': 'dhcp4'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}, - {'type': 'nameserver', - 'address': ['8.8.8.8', '8.8.8.1'], 'search': ["local"]}]} - found = convert_net( - network_data=SDC_NICS_DHCP, dns_servers=['8.8.8.8', '8.8.8.1'], - dns_domain="local") - self.assertEqual(expected, found) - - def test_convert_simple_multi_ipv6(self): - expected = { - 'version': 1, - 'config': [ - {'name': 'net0', 'type': 'physical', - 'subnets': [{'type': 'static', 'address': - '2001:4800:78ff:1b:be76:4eff:fe06:96b3/64'}, - {'type': 'static', 'gateway': '8.12.42.1', - 'address': '8.12.42.51/24'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'}, - {'name': 'net1', 'type': 'physical', - 'subnets': [{'type': 'static', - 'address': '10.210.1.217/24'}], - 'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]} - found = convert_net(SDC_NICS_MIP_IPV6) - self.assertEqual(expected, found) - - def test_convert_simple_both_ipv4_ipv6(self): - expected = { - 'version': 1, - 'config': [ - {'mac_address': '90:b8:d0:ae:64:51', 'mtu': 1500, - 'name': 'net0', 'type': 'physical', - 'subnets': [{'address': '2001::10/64', 'gateway': '2001::1', - 'type': 'static'}, - {'address': '8.12.42.51/24', - 'gateway': '8.12.42.1', - 'type': 'static'}, - {'address': '2001::11/64', 'type': 'static'}, - {'address': '8.12.42.52/32', 'type': 'static'}]}, - {'mac_address': '90:b8:d0:bd:4f:9c', 'mtu': 1500, - 'name': 'net1', 'type': 'physical', - 'subnets': [{'address': '10.210.1.217/24', - 'type': 'static'}]}]} - found = convert_net(SDC_NICS_IPV4_IPV6) - self.assertEqual(expected, found) - - def test_gateways_not_on_all_nics(self): - expected = { - 'version': 1, - 'config': [ - {'mac_address': '90:b8:d0:d8:82:b4', 'mtu': 1500, - 'name': 'net0', 'type': 'physical', - 'subnets': [{'address': '8.12.42.26/24', - 'gateway': '8.12.42.1', 'type': 'static'}]}, - {'mac_address': '90:b8:d0:0a:51:31', 'mtu': 1500, - 'name': 'net1', 'type': 'physical', - 'subnets': [{'address': '10.210.1.27/24', - 'type': 'static'}]}]} - found = convert_net(SDC_NICS_SINGLE_GATEWAY) - self.assertEqual(expected, found) - - def test_routes_on_all_nics(self): - routes = [ - {'linklocal': False, 'dst': '3.0.0.0/8', 'gateway': '8.12.42.3'}, - {'linklocal': False, 'dst': '4.0.0.0/8', 'gateway': '10.210.1.4'}] - expected = { - 'version': 1, - 'config': [ - {'mac_address': '90:b8:d0:d8:82:b4', 'mtu': 1500, - 'name': 'net0', 'type': 'physical', - 'subnets': [{'address': '8.12.42.26/24', - 'gateway': '8.12.42.1', 'type': 'static', - 'routes': [{'network': '3.0.0.0/8', - 'gateway': '8.12.42.3'}, - {'network': '4.0.0.0/8', - 'gateway': '10.210.1.4'}]}]}, - {'mac_address': '90:b8:d0:0a:51:31', 'mtu': 1500, - 'name': 'net1', 'type': 'physical', - 'subnets': [{'address': '10.210.1.27/24', 'type': 'static', - 'routes': [{'network': '3.0.0.0/8', - 'gateway': '8.12.42.3'}, - {'network': '4.0.0.0/8', - 'gateway': '10.210.1.4'}]}]}]} - found = convert_net(SDC_NICS_SINGLE_GATEWAY, routes=routes) - self.maxDiff = None - self.assertEqual(expected, found) - - -@unittest.skipUnless(get_smartos_environ() == SMARTOS_ENV_KVM, - "Only supported on KVM and bhyve guests under SmartOS") -@unittest.skipUnless(os.access(SERIAL_DEVICE, os.W_OK), - "Requires write access to " + SERIAL_DEVICE) -@unittest.skipUnless(HAS_PYSERIAL is True, "pyserial not available") -class TestSerialConcurrency(CiTestCase): - """ - This class tests locking on an actual serial port, and as such can only - be run in a kvm or bhyve guest running on a SmartOS host. A test run on - a metadata socket will not be valid because a metadata socket ensures - there is only one session over a connection. In contrast, in the - absence of proper locking multiple processes opening the same serial - port can corrupt each others' exchanges with the metadata server. - - This takes on the order of 2 to 3 minutes to run. - """ - allowed_subp = ['mdata-get'] - - def setUp(self): - self.mdata_proc = multiprocessing.Process(target=self.start_mdata_loop) - self.mdata_proc.start() - super(TestSerialConcurrency, self).setUp() - - def tearDown(self): - # os.kill() rather than mdata_proc.terminate() to avoid console spam. - os.kill(self.mdata_proc.pid, signal.SIGKILL) - self.mdata_proc.join() - super(TestSerialConcurrency, self).tearDown() - - def start_mdata_loop(self): - """ - The mdata-get command is repeatedly run in a separate process so - that it may try to race with metadata operations performed in the - main test process. Use of mdata-get is better than two processes - using the protocol implementation in DataSourceSmartOS because we - are testing to be sure that cloud-init and mdata-get respect each - others locks. - """ - rcs = list(range(0, 256)) - while True: - subp(['mdata-get', 'sdc:routes'], rcs=rcs) - - def test_all_keys(self): - self.assertIsNotNone(self.mdata_proc.pid) - ds = DataSourceSmartOS - keys = [tup[0] for tup in ds.SMARTOS_ATTRIB_MAP.values()] - keys.extend(ds.SMARTOS_ATTRIB_JSON.values()) - - client = ds.jmc_client_factory(smartos_type=SMARTOS_ENV_KVM) - self.assertIsNotNone(client) - - # The behavior that we are testing for was observed mdata-get running - # 10 times at roughly the same time as cloud-init fetched each key - # once. cloud-init would regularly see failures before making it - # through all keys once. - for _ in range(0, 3): - for key in keys: - # We don't care about the return value, just that it doesn't - # thrown any exceptions. - client.get(key) - - self.assertIsNone(self.mdata_proc.exitcode) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_upcloud.py b/tests/unittests/test_datasource/test_upcloud.py deleted file mode 100644 index cec48b4b..00000000 --- a/tests/unittests/test_datasource/test_upcloud.py +++ /dev/null @@ -1,314 +0,0 @@ -# Author: Antti Myyrä -# -# This file is part of cloud-init. See LICENSE file for license information. - -import json - -from cloudinit import helpers -from cloudinit import settings -from cloudinit import sources -from cloudinit.sources.DataSourceUpCloud import DataSourceUpCloud, \ - DataSourceUpCloudLocal - -from cloudinit.tests.helpers import mock, CiTestCase - -UC_METADATA = json.loads(""" -{ - "cloud_name": "upcloud", - "instance_id": "00322b68-0096-4042-9406-faad61922128", - "hostname": "test.example.com", - "platform": "servers", - "subplatform": "metadata (http://169.254.169.254)", - "public_keys": [ - "ssh-rsa AAAAB.... test1@example.com", - "ssh-rsa AAAAB.... test2@example.com" - ], - "region": "fi-hel2", - "network": { - "interfaces": [ - { - "index": 1, - "ip_addresses": [ - { - "address": "94.237.105.53", - "dhcp": true, - "dns": [ - "94.237.127.9", - "94.237.40.9" - ], - "family": "IPv4", - "floating": false, - "gateway": "94.237.104.1", - "network": "94.237.104.0/22" - }, - { - "address": "94.237.105.50", - "dhcp": false, - "dns": null, - "family": "IPv4", - "floating": true, - "gateway": "", - "network": "94.237.105.50/32" - } - ], - "mac": "3a:d6:ba:4a:36:e7", - "network_id": "031457f4-0f8c-483c-96f2-eccede02909c", - "type": "public" - }, - { - "index": 2, - "ip_addresses": [ - { - "address": "10.6.3.27", - "dhcp": true, - "dns": null, - "family": "IPv4", - "floating": false, - "gateway": "10.6.0.1", - "network": "10.6.0.0/22" - } - ], - "mac": "3a:d6:ba:4a:84:cc", - "network_id": "03d82553-5bea-4132-b29a-e1cf67ec2dd1", - "type": "utility" - }, - { - "index": 3, - "ip_addresses": [ - { - "address": "2a04:3545:1000:720:38d6:baff:fe4a:63e7", - "dhcp": true, - "dns": [ - "2a04:3540:53::1", - "2a04:3544:53::1" - ], - "family": "IPv6", - "floating": false, - "gateway": "2a04:3545:1000:720::1", - "network": "2a04:3545:1000:720::/64" - } - ], - "mac": "3a:d6:ba:4a:63:e7", - "network_id": "03000000-0000-4000-8046-000000000000", - "type": "public" - }, - { - "index": 4, - "ip_addresses": [ - { - "address": "172.30.1.10", - "dhcp": true, - "dns": null, - "family": "IPv4", - "floating": false, - "gateway": "172.30.1.1", - "network": "172.30.1.0/24" - } - ], - "mac": "3a:d6:ba:4a:8a:e1", - "network_id": "035a0a4a-7704-4de5-820d-189fc8132714", - "type": "private" - } - ], - "dns": [ - "94.237.127.9", - "94.237.40.9" - ] - }, - "storage": { - "disks": [ - { - "id": "014efb65-223b-4d44-8f0a-c29535b88dcf", - "serial": "014efb65223b4d448f0a", - "size": 10240, - "type": "disk", - "tier": "maxiops" - } - ] - }, - "tags": [], - "user_data": "", - "vendor_data": "" -} -""") - -UC_METADATA["user_data"] = b"""#cloud-config -runcmd: -- [touch, /root/cloud-init-worked ] -""" - -MD_URL = 'http://169.254.169.254/metadata/v1.json' - - -def _mock_dmi(): - return True, "00322b68-0096-4042-9406-faad61922128" - - -class TestUpCloudMetadata(CiTestCase): - """ - Test reading the meta-data - """ - def setUp(self): - super(TestUpCloudMetadata, self).setUp() - self.tmp = self.tmp_dir() - - def get_ds(self, get_sysinfo=_mock_dmi): - ds = DataSourceUpCloud( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) - if get_sysinfo: - ds._get_sysinfo = get_sysinfo - return ds - - @mock.patch('cloudinit.sources.helpers.upcloud.read_sysinfo') - def test_returns_false_not_on_upcloud(self, m_read_sysinfo): - m_read_sysinfo.return_value = (False, None) - ds = self.get_ds(get_sysinfo=None) - self.assertEqual(False, ds.get_data()) - self.assertTrue(m_read_sysinfo.called) - - @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata') - def test_metadata(self, mock_readmd): - mock_readmd.return_value = UC_METADATA.copy() - - ds = self.get_ds() - ds.perform_dhcp_setup = False - - ret = ds.get_data() - self.assertTrue(ret) - - self.assertTrue(mock_readmd.called) - - self.assertEqual(UC_METADATA.get('user_data'), ds.get_userdata_raw()) - self.assertEqual(UC_METADATA.get('vendor_data'), - ds.get_vendordata_raw()) - self.assertEqual(UC_METADATA.get('region'), ds.availability_zone) - self.assertEqual(UC_METADATA.get('instance_id'), ds.get_instance_id()) - self.assertEqual(UC_METADATA.get('cloud_name'), ds.cloud_name) - - self.assertEqual(UC_METADATA.get('public_keys'), - ds.get_public_ssh_keys()) - self.assertIsInstance(ds.get_public_ssh_keys(), list) - - -class TestUpCloudNetworkSetup(CiTestCase): - """ - Test reading the meta-data on networked context - """ - - def setUp(self): - super(TestUpCloudNetworkSetup, self).setUp() - self.tmp = self.tmp_dir() - - def get_ds(self, get_sysinfo=_mock_dmi): - ds = DataSourceUpCloudLocal( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) - if get_sysinfo: - ds._get_sysinfo = get_sysinfo - return ds - - @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata') - @mock.patch('cloudinit.net.find_fallback_nic') - @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') - @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') - def test_network_configured_metadata(self, m_net, m_dhcp, - m_fallback_nic, mock_readmd): - mock_readmd.return_value = UC_METADATA.copy() - - m_fallback_nic.return_value = 'eth1' - m_dhcp.return_value = [{ - 'interface': 'eth1', 'fixed-address': '10.6.3.27', - 'routers': '10.6.0.1', 'subnet-mask': '22', - 'broadcast-address': '10.6.3.255'} - ] - - ds = self.get_ds() - - ret = ds.get_data() - self.assertTrue(ret) - - self.assertTrue(m_dhcp.called) - m_dhcp.assert_called_with('eth1', None) - - m_net.assert_called_once_with( - broadcast='10.6.3.255', interface='eth1', - ip='10.6.3.27', prefix_or_mask='22', - router='10.6.0.1', static_routes=None - ) - - self.assertTrue(mock_readmd.called) - - self.assertEqual(UC_METADATA.get('region'), ds.availability_zone) - self.assertEqual(UC_METADATA.get('instance_id'), ds.get_instance_id()) - self.assertEqual(UC_METADATA.get('cloud_name'), ds.cloud_name) - - @mock.patch('cloudinit.sources.helpers.upcloud.read_metadata') - @mock.patch('cloudinit.net.get_interfaces_by_mac') - def test_network_configuration(self, m_get_by_mac, mock_readmd): - mock_readmd.return_value = UC_METADATA.copy() - - raw_ifaces = UC_METADATA.get('network').get('interfaces') - self.assertEqual(4, len(raw_ifaces)) - - m_get_by_mac.return_value = { - raw_ifaces[0].get('mac'): 'eth0', - raw_ifaces[1].get('mac'): 'eth1', - raw_ifaces[2].get('mac'): 'eth2', - raw_ifaces[3].get('mac'): 'eth3', - } - - ds = self.get_ds() - ds.perform_dhcp_setup = False - - ret = ds.get_data() - self.assertTrue(ret) - - self.assertTrue(mock_readmd.called) - - netcfg = ds.network_config - - self.assertEqual(1, netcfg.get('version')) - - config = netcfg.get('config') - self.assertIsInstance(config, list) - self.assertEqual(5, len(config)) - self.assertEqual('physical', config[3].get('type')) - - self.assertEqual(raw_ifaces[2].get('mac'), config[2] - .get('mac_address')) - self.assertEqual(1, len(config[2].get('subnets'))) - self.assertEqual('ipv6_dhcpv6-stateless', config[2].get('subnets')[0] - .get('type')) - - self.assertEqual(2, len(config[0].get('subnets'))) - self.assertEqual('static', config[0].get('subnets')[1].get('type')) - - dns = config[4] - self.assertEqual('nameserver', dns.get('type')) - self.assertEqual(2, len(dns.get('address'))) - self.assertEqual( - UC_METADATA.get('network').get('dns')[1], - dns.get('address')[1] - ) - - -class TestUpCloudDatasourceLoading(CiTestCase): - def test_get_datasource_list_returns_in_local(self): - deps = (sources.DEP_FILESYSTEM, ) - ds_list = sources.DataSourceUpCloud.get_datasource_list(deps) - self.assertEqual(ds_list, - [DataSourceUpCloudLocal]) - - def test_get_datasource_list_returns_in_normal(self): - deps = (sources.DEP_FILESYSTEM, sources.DEP_NETWORK) - ds_list = sources.DataSourceUpCloud.get_datasource_list(deps) - self.assertEqual(ds_list, - [DataSourceUpCloud]) - - def test_list_sources_finds_ds(self): - found = sources.list_sources( - ['UpCloud'], (sources.DEP_FILESYSTEM, sources.DEP_NETWORK), - ['cloudinit.sources']) - self.assertEqual([DataSourceUpCloud], - found) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_vmware.py b/tests/unittests/test_datasource/test_vmware.py deleted file mode 100644 index 52f910b5..00000000 --- a/tests/unittests/test_datasource/test_vmware.py +++ /dev/null @@ -1,391 +0,0 @@ -# Copyright (c) 2021 VMware, Inc. All Rights Reserved. -# -# Authors: Andrew Kutz -# -# This file is part of cloud-init. See LICENSE file for license information. - -import base64 -import gzip -import os - -import pytest - -from cloudinit import dmi, helpers, safeyaml -from cloudinit import settings -from cloudinit.sources import DataSourceVMware -from cloudinit.tests.helpers import ( - mock, - CiTestCase, - FilesystemMockingTestCase, - populate_dir, -) - - -PRODUCT_NAME_FILE_PATH = "/sys/class/dmi/id/product_name" -PRODUCT_NAME = "VMware7,1" -PRODUCT_UUID = "82343CED-E4C7-423B-8F6B-0D34D19067AB" -REROOT_FILES = { - DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, - PRODUCT_NAME_FILE_PATH: PRODUCT_NAME, -} - -VMW_MULTIPLE_KEYS = [ - "ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@vmw.com", - "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@vmw.com", -] -VMW_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@vmw.com" - -VMW_METADATA_YAML = """instance-id: cloud-vm -local-hostname: cloud-vm -network: - version: 2 - ethernets: - nics: - match: - name: ens* - dhcp4: yes -""" - -VMW_USERDATA_YAML = """## template: jinja -#cloud-config -users: -- default -""" - -VMW_VENDORDATA_YAML = """## template: jinja -#cloud-config -runcmd: -- echo "Hello, world." -""" - - -@pytest.yield_fixture(autouse=True) -def common_patches(): - with mock.patch('cloudinit.util.platform.platform', return_value='Linux'): - with mock.patch.multiple( - 'cloudinit.dmi', - is_container=mock.Mock(return_value=False), - is_FreeBSD=mock.Mock(return_value=False) - ): - yield - - -class TestDataSourceVMware(CiTestCase): - """ - Test common functionality that is not transport specific. - """ - - def setUp(self): - super(TestDataSourceVMware, self).setUp() - self.tmp = self.tmp_dir() - - def test_no_data_access_method(self): - ds = get_ds(self.tmp) - ds.vmware_rpctool = None - ret = ds.get_data() - self.assertFalse(ret) - - def test_get_host_info(self): - host_info = DataSourceVMware.get_host_info() - self.assertTrue(host_info) - self.assertTrue(host_info["hostname"]) - self.assertTrue(host_info["local-hostname"]) - self.assertTrue(host_info["local_hostname"]) - self.assertTrue(host_info[DataSourceVMware.LOCAL_IPV4]) - - -class TestDataSourceVMwareEnvVars(FilesystemMockingTestCase): - """ - Test the envvar transport. - """ - - def setUp(self): - super(TestDataSourceVMwareEnvVars, self).setUp() - self.tmp = self.tmp_dir() - os.environ[DataSourceVMware.VMX_GUESTINFO] = "1" - self.create_system_files() - - def tearDown(self): - del os.environ[DataSourceVMware.VMX_GUESTINFO] - return super(TestDataSourceVMwareEnvVars, self).tearDown() - - def create_system_files(self): - rootd = self.tmp_dir() - populate_dir( - rootd, - { - DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, - }, - ) - self.assertTrue(self.reRoot(rootd)) - - def assert_get_data_ok(self, m_fn, m_fn_call_count=6): - ds = get_ds(self.tmp) - ds.vmware_rpctool = None - ret = ds.get_data() - self.assertTrue(ret) - self.assertEqual(m_fn_call_count, m_fn.call_count) - self.assertEqual( - ds.data_access_method, DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR - ) - return ds - - def assert_metadata(self, metadata, m_fn, m_fn_call_count=6): - ds = self.assert_get_data_ok(m_fn, m_fn_call_count) - assert_metadata(self, ds, metadata) - - @mock.patch( - "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" - ) - def test_get_subplatform(self, m_fn): - m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] - ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4) - self.assertEqual( - ds.subplatform, - "%s (%s)" - % ( - DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR, - DataSourceVMware.get_guestinfo_envvar_key_name("metadata"), - ), - ) - - @mock.patch( - "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" - ) - def test_get_data_metadata_only(self, m_fn): - m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] - self.assert_get_data_ok(m_fn, m_fn_call_count=4) - - @mock.patch( - "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" - ) - def test_get_data_userdata_only(self, m_fn): - m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""] - self.assert_get_data_ok(m_fn, m_fn_call_count=4) - - @mock.patch( - "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" - ) - def test_get_data_vendordata_only(self, m_fn): - m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""] - self.assert_get_data_ok(m_fn, m_fn_call_count=4) - - @mock.patch( - "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" - ) - def test_get_data_metadata_base64(self, m_fn): - data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) - m_fn.side_effect = [data, "base64", "", ""] - self.assert_get_data_ok(m_fn, m_fn_call_count=4) - - @mock.patch( - "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" - ) - def test_get_data_metadata_b64(self, m_fn): - data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) - m_fn.side_effect = [data, "b64", "", ""] - self.assert_get_data_ok(m_fn, m_fn_call_count=4) - - @mock.patch( - "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" - ) - def test_get_data_metadata_gzip_base64(self, m_fn): - data = VMW_METADATA_YAML.encode("utf-8") - data = gzip.compress(data) - data = base64.b64encode(data) - m_fn.side_effect = [data, "gzip+base64", "", ""] - self.assert_get_data_ok(m_fn, m_fn_call_count=4) - - @mock.patch( - "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" - ) - def test_get_data_metadata_gz_b64(self, m_fn): - data = VMW_METADATA_YAML.encode("utf-8") - data = gzip.compress(data) - data = base64.b64encode(data) - m_fn.side_effect = [data, "gz+b64", "", ""] - self.assert_get_data_ok(m_fn, m_fn_call_count=4) - - @mock.patch( - "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" - ) - def test_metadata_single_ssh_key(self, m_fn): - metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) - metadata["public_keys"] = VMW_SINGLE_KEY - metadata_yaml = safeyaml.dumps(metadata) - m_fn.side_effect = [metadata_yaml, "", "", ""] - self.assert_metadata(metadata, m_fn, m_fn_call_count=4) - - @mock.patch( - "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" - ) - def test_metadata_multiple_ssh_keys(self, m_fn): - metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) - metadata["public_keys"] = VMW_MULTIPLE_KEYS - metadata_yaml = safeyaml.dumps(metadata) - m_fn.side_effect = [metadata_yaml, "", "", ""] - self.assert_metadata(metadata, m_fn, m_fn_call_count=4) - - -class TestDataSourceVMwareGuestInfo(FilesystemMockingTestCase): - """ - Test the guestinfo transport on a VMware platform. - """ - - def setUp(self): - super(TestDataSourceVMwareGuestInfo, self).setUp() - self.tmp = self.tmp_dir() - self.create_system_files() - - def create_system_files(self): - rootd = self.tmp_dir() - populate_dir( - rootd, - { - DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, - PRODUCT_NAME_FILE_PATH: PRODUCT_NAME, - }, - ) - self.assertTrue(self.reRoot(rootd)) - - def assert_get_data_ok(self, m_fn, m_fn_call_count=6): - ds = get_ds(self.tmp) - ds.vmware_rpctool = "vmware-rpctool" - ret = ds.get_data() - self.assertTrue(ret) - self.assertEqual(m_fn_call_count, m_fn.call_count) - self.assertEqual( - ds.data_access_method, - DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO, - ) - return ds - - def assert_metadata(self, metadata, m_fn, m_fn_call_count=6): - ds = self.assert_get_data_ok(m_fn, m_fn_call_count) - assert_metadata(self, ds, metadata) - - def test_ds_valid_on_vmware_platform(self): - system_type = dmi.read_dmi_data("system-product-name") - self.assertEqual(system_type, PRODUCT_NAME) - - @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") - def test_get_subplatform(self, m_fn): - m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] - ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4) - self.assertEqual( - ds.subplatform, - "%s (%s)" - % ( - DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO, - DataSourceVMware.get_guestinfo_key_name("metadata"), - ), - ) - - @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") - def test_get_data_userdata_only(self, m_fn): - m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""] - self.assert_get_data_ok(m_fn, m_fn_call_count=4) - - @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") - def test_get_data_vendordata_only(self, m_fn): - m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""] - self.assert_get_data_ok(m_fn, m_fn_call_count=4) - - @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") - def test_metadata_single_ssh_key(self, m_fn): - metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) - metadata["public_keys"] = VMW_SINGLE_KEY - metadata_yaml = safeyaml.dumps(metadata) - m_fn.side_effect = [metadata_yaml, "", "", ""] - self.assert_metadata(metadata, m_fn, m_fn_call_count=4) - - @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") - def test_metadata_multiple_ssh_keys(self, m_fn): - metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) - metadata["public_keys"] = VMW_MULTIPLE_KEYS - metadata_yaml = safeyaml.dumps(metadata) - m_fn.side_effect = [metadata_yaml, "", "", ""] - self.assert_metadata(metadata, m_fn, m_fn_call_count=4) - - @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") - def test_get_data_metadata_base64(self, m_fn): - data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) - m_fn.side_effect = [data, "base64", "", ""] - self.assert_get_data_ok(m_fn, m_fn_call_count=4) - - @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") - def test_get_data_metadata_b64(self, m_fn): - data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) - m_fn.side_effect = [data, "b64", "", ""] - self.assert_get_data_ok(m_fn, m_fn_call_count=4) - - @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") - def test_get_data_metadata_gzip_base64(self, m_fn): - data = VMW_METADATA_YAML.encode("utf-8") - data = gzip.compress(data) - data = base64.b64encode(data) - m_fn.side_effect = [data, "gzip+base64", "", ""] - self.assert_get_data_ok(m_fn, m_fn_call_count=4) - - @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") - def test_get_data_metadata_gz_b64(self, m_fn): - data = VMW_METADATA_YAML.encode("utf-8") - data = gzip.compress(data) - data = base64.b64encode(data) - m_fn.side_effect = [data, "gz+b64", "", ""] - self.assert_get_data_ok(m_fn, m_fn_call_count=4) - - -class TestDataSourceVMwareGuestInfo_InvalidPlatform(FilesystemMockingTestCase): - """ - Test the guestinfo transport on a non-VMware platform. - """ - - def setUp(self): - super(TestDataSourceVMwareGuestInfo_InvalidPlatform, self).setUp() - self.tmp = self.tmp_dir() - self.create_system_files() - - def create_system_files(self): - rootd = self.tmp_dir() - populate_dir( - rootd, - { - DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, - }, - ) - self.assertTrue(self.reRoot(rootd)) - - @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") - def test_ds_invalid_on_non_vmware_platform(self, m_fn): - system_type = dmi.read_dmi_data("system-product-name") - self.assertEqual(system_type, None) - - m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] - ds = get_ds(self.tmp) - ds.vmware_rpctool = "vmware-rpctool" - ret = ds.get_data() - self.assertFalse(ret) - - -def assert_metadata(test_obj, ds, metadata): - test_obj.assertEqual(metadata.get("instance-id"), ds.get_instance_id()) - test_obj.assertEqual(metadata.get("local-hostname"), ds.get_hostname()) - - expected_public_keys = metadata.get("public_keys") - if not isinstance(expected_public_keys, list): - expected_public_keys = [expected_public_keys] - - test_obj.assertEqual(expected_public_keys, ds.get_public_ssh_keys()) - test_obj.assertIsInstance(ds.get_public_ssh_keys(), list) - - -def get_ds(temp_dir): - ds = DataSourceVMware.DataSourceVMware( - settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": temp_dir}) - ) - ds.vmware_rpctool = "vmware-rpctool" - return ds - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_vultr.py b/tests/unittests/test_datasource/test_vultr.py deleted file mode 100644 index 63235009..00000000 --- a/tests/unittests/test_datasource/test_vultr.py +++ /dev/null @@ -1,337 +0,0 @@ -# Author: Eric Benner -# -# This file is part of cloud-init. See LICENSE file for license information. - -# Vultr Metadata API: -# https://www.vultr.com/metadata/ - -import json - -from cloudinit import helpers -from cloudinit import settings -from cloudinit.sources import DataSourceVultr -from cloudinit.sources.helpers import vultr - -from cloudinit.tests.helpers import mock, CiTestCase - -# Vultr metadata test data -VULTR_V1_1 = { - 'bgp': { - 'ipv4': { - 'my-address': '', - 'my-asn': '', - 'peer-address': '', - 'peer-asn': '' - }, - 'ipv6': { - 'my-address': '', - 'my-asn': '', - 'peer-address': '', - 'peer-asn': '' - } - }, - 'hostname': 'CLOUDINIT_1', - 'instanceid': '42506325', - 'interfaces': [ - { - 'ipv4': { - 'additional': [ - ], - 'address': '108.61.89.242', - 'gateway': '108.61.89.1', - 'netmask': '255.255.255.0' - }, - 'ipv6': { - 'additional': [ - ], - 'address': '2001:19f0:5:56c2:5400:03ff:fe15:c465', - 'network': '2001:19f0:5:56c2::', - 'prefix': '64' - }, - 'mac': '56:00:03:15:c4:65', - 'network-type': 'public' - } - ], - 'public-keys': [ - 'ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key' - ], - 'region': { - 'regioncode': 'EWR' - }, - 'user-defined': [ - ], - 'startup-script': 'echo No configured startup script', - 'raid1-script': '', - 'user-data': [ - ], - 'vendor-data': [ - { - 'package_upgrade': 'true', - 'disable_root': 0, - 'ssh_pwauth': 1, - 'chpasswd': { - 'expire': False, - 'list': [ - 'root:$6$S2Smuj.../VqxmIR9Urw0jPZ88i4yvB/' - ] - }, - 'system_info': { - 'default_user': { - 'name': 'root' - } - } - } - ] -} - -VULTR_V1_2 = { - 'bgp': { - 'ipv4': { - 'my-address': '', - 'my-asn': '', - 'peer-address': '', - 'peer-asn': '' - }, - 'ipv6': { - 'my-address': '', - 'my-asn': '', - 'peer-address': '', - 'peer-asn': '' - } - }, - 'hostname': 'CLOUDINIT_2', - 'instance-v2-id': '29bea708-2e6e-480a-90ad-0e6b5d5ad62f', - 'instanceid': '42872224', - 'interfaces': [ - { - 'ipv4': { - 'additional': [ - ], - 'address':'45.76.7.171', - 'gateway':'45.76.6.1', - 'netmask':'255.255.254.0' - }, - 'ipv6':{ - 'additional': [ - ], - 'address':'2001:19f0:5:28a7:5400:03ff:fe1b:4eca', - 'network':'2001:19f0:5:28a7::', - 'prefix':'64' - }, - 'mac':'56:00:03:1b:4e:ca', - 'network-type':'public' - }, - { - 'ipv4': { - 'additional': [ - ], - 'address':'10.1.112.3', - 'gateway':'', - 'netmask':'255.255.240.0' - }, - 'ipv6':{ - 'additional': [ - ], - 'network':'', - 'prefix':'' - }, - 'mac':'5a:00:03:1b:4e:ca', - 'network-type':'private', - 'network-v2-id':'fbbe2b5b-b986-4396-87f5-7246660ccb64', - 'networkid':'net5e7155329d730' - } - ], - 'public-keys': [ - 'ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key' - ], - 'region': { - 'regioncode': 'EWR' - }, - 'user-defined': [ - ], - 'startup-script': 'echo No configured startup script', - 'user-data': [ - ], - - 'vendor-data': [ - { - 'package_upgrade': 'true', - 'disable_root': 0, - 'ssh_pwauth': 1, - 'chpasswd': { - 'expire': False, - 'list': [ - 'root:$6$SxXx...k2mJNIzZB5vMCDBlYT1' - ] - }, - 'system_info': { - 'default_user': { - 'name': 'root' - } - } - } - ] -} - -SSH_KEYS_1 = [ - "ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key" -] - -# Expected generated objects - -# Expected config -EXPECTED_VULTR_CONFIG = { - 'package_upgrade': 'true', - 'disable_root': 0, - 'ssh_pwauth': 1, - 'chpasswd': { - 'expire': False, - 'list': [ - 'root:$6$SxXx...k2mJNIzZB5vMCDBlYT1' - ] - }, - 'system_info': { - 'default_user': { - 'name': 'root' - } - } -} - -# Expected network config object from generator -EXPECTED_VULTR_NETWORK_1 = { - 'version': 1, - 'config': [ - { - 'type': 'nameserver', - 'address': ['108.61.10.10'] - }, - { - 'name': 'eth0', - 'type': 'physical', - 'mac_address': '56:00:03:15:c4:65', - 'accept-ra': 1, - 'subnets': [ - {'type': 'dhcp', 'control': 'auto'}, - {'type': 'ipv6_slaac', 'control': 'auto'} - ], - } - ] -} - -EXPECTED_VULTR_NETWORK_2 = { - 'version': 1, - 'config': [ - { - 'type': 'nameserver', - 'address': ['108.61.10.10'] - }, - { - 'name': 'eth0', - 'type': 'physical', - 'mac_address': '56:00:03:1b:4e:ca', - 'accept-ra': 1, - 'subnets': [ - {'type': 'dhcp', 'control': 'auto'}, - {'type': 'ipv6_slaac', 'control': 'auto'} - ], - }, - { - 'name': 'eth1', - 'type': 'physical', - 'mac_address': '5a:00:03:1b:4e:ca', - 'subnets': [ - { - "type": "static", - "control": "auto", - "address": "10.1.112.3", - "netmask": "255.255.240.0" - } - ], - } - ] -} - - -INTERFACE_MAP = { - '56:00:03:15:c4:65': 'eth0', - '56:00:03:1b:4e:ca': 'eth0', - '5a:00:03:1b:4e:ca': 'eth1' -} - - -class TestDataSourceVultr(CiTestCase): - def setUp(self): - super(TestDataSourceVultr, self).setUp() - - # Stored as a dict to make it easier to maintain - raw1 = json.dumps(VULTR_V1_1['vendor-data'][0]) - raw2 = json.dumps(VULTR_V1_2['vendor-data'][0]) - - # Make expected format - VULTR_V1_1['vendor-data'] = [raw1] - VULTR_V1_2['vendor-data'] = [raw2] - - self.tmp = self.tmp_dir() - - # Test the datasource itself - @mock.patch('cloudinit.net.get_interfaces_by_mac') - @mock.patch('cloudinit.sources.helpers.vultr.is_vultr') - @mock.patch('cloudinit.sources.helpers.vultr.get_metadata') - def test_datasource(self, - mock_getmeta, - mock_isvultr, - mock_netmap): - mock_getmeta.return_value = VULTR_V1_2 - mock_isvultr.return_value = True - mock_netmap.return_value = INTERFACE_MAP - - source = DataSourceVultr.DataSourceVultr( - settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) - - # Test for failure - self.assertEqual(True, source._get_data()) - - # Test instance id - self.assertEqual("42872224", source.metadata['instanceid']) - - # Test hostname - self.assertEqual("CLOUDINIT_2", source.metadata['local-hostname']) - - # Test ssh keys - self.assertEqual(SSH_KEYS_1, source.metadata['public-keys']) - - # Test vendor data generation - orig_val = self.maxDiff - self.maxDiff = None - - vendordata = source.vendordata_raw - - # Test vendor config - self.assertEqual( - EXPECTED_VULTR_CONFIG, - json.loads(vendordata[0].replace("#cloud-config", ""))) - - self.maxDiff = orig_val - - # Test network config generation - self.assertEqual(EXPECTED_VULTR_NETWORK_2, source.network_config) - - # Test network config generation - @mock.patch('cloudinit.net.get_interfaces_by_mac') - def test_network_config(self, mock_netmap): - mock_netmap.return_value = INTERFACE_MAP - interf = VULTR_V1_1['interfaces'] - - self.assertEqual(EXPECTED_VULTR_NETWORK_1, - vultr.generate_network_config(interf)) - - # Test Private Networking config generation - @mock.patch('cloudinit.net.get_interfaces_by_mac') - def test_private_network_config(self, mock_netmap): - mock_netmap.return_value = INTERFACE_MAP - interf = VULTR_V1_2['interfaces'] - - self.assertEqual(EXPECTED_VULTR_NETWORK_2, - vultr.generate_network_config(interf)) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_dhclient_hook.py b/tests/unittests/test_dhclient_hook.py new file mode 100644 index 00000000..14549111 --- /dev/null +++ b/tests/unittests/test_dhclient_hook.py @@ -0,0 +1,105 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Tests for cloudinit.dhclient_hook.""" + +from cloudinit import dhclient_hook as dhc +from tests.unittests.helpers import CiTestCase, dir2dict, populate_dir + +import argparse +import json +import os +from unittest import mock + + +class TestDhclientHook(CiTestCase): + + ex_env = { + 'interface': 'eth0', + 'new_dhcp_lease_time': '3600', + 'new_host_name': 'x1', + 'new_ip_address': '10.145.210.163', + 'new_subnet_mask': '255.255.255.0', + 'old_host_name': 'x1', + 'PATH': '/usr/sbin:/usr/bin:/sbin:/bin', + 'pid': '614', + 'reason': 'BOUND', + } + + # some older versions of dhclient put the same content, + # but in upper case with DHCP4_ instead of new_ + ex_env_dhcp4 = { + 'REASON': 'BOUND', + 'DHCP4_dhcp_lease_time': '3600', + 'DHCP4_host_name': 'x1', + 'DHCP4_ip_address': '10.145.210.163', + 'DHCP4_subnet_mask': '255.255.255.0', + 'INTERFACE': 'eth0', + 'PATH': '/usr/sbin:/usr/bin:/sbin:/bin', + 'pid': '614', + } + + expected = { + 'dhcp_lease_time': '3600', + 'host_name': 'x1', + 'ip_address': '10.145.210.163', + 'subnet_mask': '255.255.255.0'} + + def setUp(self): + super(TestDhclientHook, self).setUp() + self.tmp = self.tmp_dir() + + def test_handle_args(self): + """quick test of call to handle_args.""" + nic = 'eth0' + args = argparse.Namespace(event=dhc.UP, interface=nic) + with mock.patch.dict("os.environ", clear=True, values=self.ex_env): + dhc.handle_args(dhc.NAME, args, data_d=self.tmp) + found = dir2dict(self.tmp + os.path.sep) + self.assertEqual([nic + ".json"], list(found.keys())) + self.assertEqual(self.expected, json.loads(found[nic + ".json"])) + + def test_run_hook_up_creates_dir(self): + """If dir does not exist, run_hook should create it.""" + subd = self.tmp_path("subdir", self.tmp) + nic = 'eth1' + dhc.run_hook(nic, 'up', data_d=subd, env=self.ex_env) + self.assertEqual( + set([nic + ".json"]), set(dir2dict(subd + os.path.sep))) + + def test_run_hook_up(self): + """Test expected use of run_hook_up.""" + nic = 'eth0' + dhc.run_hook(nic, 'up', data_d=self.tmp, env=self.ex_env) + found = dir2dict(self.tmp + os.path.sep) + self.assertEqual([nic + ".json"], list(found.keys())) + self.assertEqual(self.expected, json.loads(found[nic + ".json"])) + + def test_run_hook_up_dhcp4_prefix(self): + """Test run_hook filters correctly with older DHCP4_ data.""" + nic = 'eth0' + dhc.run_hook(nic, 'up', data_d=self.tmp, env=self.ex_env_dhcp4) + found = dir2dict(self.tmp + os.path.sep) + self.assertEqual([nic + ".json"], list(found.keys())) + self.assertEqual(self.expected, json.loads(found[nic + ".json"])) + + def test_run_hook_down_deletes(self): + """down should delete the created json file.""" + nic = 'eth1' + populate_dir( + self.tmp, {nic + ".json": "{'abcd'}", 'myfile.txt': 'text'}) + dhc.run_hook(nic, 'down', data_d=self.tmp, env={'old_host_name': 'x1'}) + self.assertEqual( + set(['myfile.txt']), + set(dir2dict(self.tmp + os.path.sep))) + + def test_get_parser(self): + """Smoke test creation of get_parser.""" + # cloud-init main uses 'action'. + event, interface = (dhc.UP, 'mynic0') + self.assertEqual( + argparse.Namespace(event=event, interface=interface, + action=(dhc.NAME, dhc.handle_args)), + dhc.get_parser().parse_args([event, interface])) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_distros/__init__.py b/tests/unittests/test_distros/__init__.py deleted file mode 100644 index 5394aa56..00000000 --- a/tests/unittests/test_distros/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -import copy - -from cloudinit import distros -from cloudinit import helpers -from cloudinit import settings - - -def _get_distro(dtype, system_info=None): - """Return a Distro class of distro 'dtype'. - - cfg is format of CFG_BUILTIN['system_info']. - - example: _get_distro("debian") - """ - if system_info is None: - system_info = copy.deepcopy(settings.CFG_BUILTIN['system_info']) - system_info['distro'] = dtype - paths = helpers.Paths(system_info['paths']) - distro_cls = distros.fetch(dtype) - return distro_cls(dtype, system_info, paths) diff --git a/tests/unittests/test_distros/test_arch.py b/tests/unittests/test_distros/test_arch.py deleted file mode 100644 index a95ba3b5..00000000 --- a/tests/unittests/test_distros/test_arch.py +++ /dev/null @@ -1,45 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.distros.arch import _render_network -from cloudinit import util - -from cloudinit.tests.helpers import (CiTestCase, dir2dict) - -from . import _get_distro - - -class TestArch(CiTestCase): - - def test_get_distro(self): - distro = _get_distro("arch") - hostname = "myhostname" - hostfile = self.tmp_path("hostfile") - distro._write_hostname(hostname, hostfile) - self.assertEqual(hostname + "\n", util.load_file(hostfile)) - - -class TestRenderNetwork(CiTestCase): - def test_basic_static(self): - """Just the most basic static config. - - note 'lo' should not be rendered as an interface.""" - entries = {'eth0': {'auto': True, - 'dns-nameservers': ['8.8.8.8'], - 'bootproto': 'static', - 'address': '10.0.0.2', - 'gateway': '10.0.0.1', - 'netmask': '255.255.255.0'}, - 'lo': {'auto': True}} - target = self.tmp_dir() - devs = _render_network(entries, target=target) - files = dir2dict(target, prefix=target) - self.assertEqual(['eth0'], devs) - self.assertEqual( - {'/etc/netctl/eth0': '\n'.join([ - "Address=10.0.0.2/255.255.255.0", - "Connection=ethernet", - "DNS=('8.8.8.8')", - "Gateway=10.0.0.1", - "IP=static", - "Interface=eth0", ""]), - '/etc/resolv.conf': 'nameserver 8.8.8.8\n'}, files) diff --git a/tests/unittests/test_distros/test_bsd_utils.py b/tests/unittests/test_distros/test_bsd_utils.py deleted file mode 100644 index 3a68f2a9..00000000 --- a/tests/unittests/test_distros/test_bsd_utils.py +++ /dev/null @@ -1,67 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import cloudinit.distros.bsd_utils as bsd_utils - -from cloudinit.tests.helpers import (CiTestCase, ExitStack, mock) - -RC_FILE = """ -if something; then - do something here -fi -hostname={hostname} -""" - - -class TestBsdUtils(CiTestCase): - - def setUp(self): - super().setUp() - patches = ExitStack() - self.addCleanup(patches.close) - - self.load_file = patches.enter_context( - mock.patch.object(bsd_utils.util, 'load_file')) - - self.write_file = patches.enter_context( - mock.patch.object(bsd_utils.util, 'write_file')) - - def test_get_rc_config_value(self): - self.load_file.return_value = 'hostname=foo\n' - self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo') - self.load_file.assert_called_with('/etc/rc.conf') - - self.load_file.return_value = 'hostname=foo' - self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo') - - self.load_file.return_value = 'hostname="foo"' - self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo') - - self.load_file.return_value = "hostname='foo'" - self.assertEqual(bsd_utils.get_rc_config_value('hostname'), 'foo') - - self.load_file.return_value = 'hostname=\'foo"' - self.assertEqual(bsd_utils.get_rc_config_value('hostname'), "'foo\"") - - self.load_file.return_value = '' - self.assertEqual(bsd_utils.get_rc_config_value('hostname'), None) - - self.load_file.return_value = RC_FILE.format(hostname='foo') - self.assertEqual(bsd_utils.get_rc_config_value('hostname'), "foo") - - def test_set_rc_config_value_unchanged(self): - # bsd_utils.set_rc_config_value('hostname', 'foo') - # self.write_file.assert_called_with('/etc/rc.conf', 'hostname=foo\n') - - self.load_file.return_value = RC_FILE.format(hostname='foo') - self.write_file.assert_not_called() - - def test_set_rc_config_value(self): - bsd_utils.set_rc_config_value('hostname', 'foo') - self.write_file.assert_called_with('/etc/rc.conf', 'hostname=foo\n') - - self.load_file.return_value = RC_FILE.format(hostname='foo') - bsd_utils.set_rc_config_value('hostname', 'bar') - self.write_file.assert_called_with( - '/etc/rc.conf', - RC_FILE.format(hostname='bar') - ) diff --git a/tests/unittests/test_distros/test_create_users.py b/tests/unittests/test_distros/test_create_users.py deleted file mode 100644 index 685f08ba..00000000 --- a/tests/unittests/test_distros/test_create_users.py +++ /dev/null @@ -1,236 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import re - -from cloudinit import distros -from cloudinit import ssh_util -from cloudinit.tests.helpers import (CiTestCase, mock) -from tests.unittests.util import abstract_to_concrete - - -@mock.patch("cloudinit.distros.util.system_is_snappy", return_value=False) -@mock.patch("cloudinit.distros.subp.subp") -class TestCreateUser(CiTestCase): - - with_logs = True - - def setUp(self): - super(TestCreateUser, self).setUp() - self.dist = abstract_to_concrete(distros.Distro)( - name='test', cfg=None, paths=None - ) - - def _useradd2call(self, args): - # return a mock call for the useradd command in args - # with expected 'logstring'. - args = ['useradd'] + args - logcmd = [a for a in args] - for i in range(len(args)): - if args[i] in ('--password',): - logcmd[i + 1] = 'REDACTED' - return mock.call(args, logstring=logcmd) - - def test_basic(self, m_subp, m_is_snappy): - user = 'foouser' - self.dist.create_user(user) - self.assertEqual( - m_subp.call_args_list, - [self._useradd2call([user, '-m']), - mock.call(['passwd', '-l', user])]) - - def test_no_home(self, m_subp, m_is_snappy): - user = 'foouser' - self.dist.create_user(user, no_create_home=True) - self.assertEqual( - m_subp.call_args_list, - [self._useradd2call([user, '-M']), - mock.call(['passwd', '-l', user])]) - - def test_system_user(self, m_subp, m_is_snappy): - # system user should have no home and get --system - user = 'foouser' - self.dist.create_user(user, system=True) - self.assertEqual( - m_subp.call_args_list, - [self._useradd2call([user, '--system', '-M']), - mock.call(['passwd', '-l', user])]) - - def test_explicit_no_home_false(self, m_subp, m_is_snappy): - user = 'foouser' - self.dist.create_user(user, no_create_home=False) - self.assertEqual( - m_subp.call_args_list, - [self._useradd2call([user, '-m']), - mock.call(['passwd', '-l', user])]) - - def test_unlocked(self, m_subp, m_is_snappy): - user = 'foouser' - self.dist.create_user(user, lock_passwd=False) - self.assertEqual( - m_subp.call_args_list, - [self._useradd2call([user, '-m'])]) - - def test_set_password(self, m_subp, m_is_snappy): - user = 'foouser' - password = 'passfoo' - self.dist.create_user(user, passwd=password) - self.assertEqual( - m_subp.call_args_list, - [self._useradd2call([user, '--password', password, '-m']), - mock.call(['passwd', '-l', user])]) - - @mock.patch("cloudinit.distros.util.is_group") - def test_group_added(self, m_is_group, m_subp, m_is_snappy): - m_is_group.return_value = False - user = 'foouser' - self.dist.create_user(user, groups=['group1']) - expected = [ - mock.call(['groupadd', 'group1']), - self._useradd2call([user, '--groups', 'group1', '-m']), - mock.call(['passwd', '-l', user])] - self.assertEqual(m_subp.call_args_list, expected) - - @mock.patch("cloudinit.distros.util.is_group") - def test_only_new_group_added(self, m_is_group, m_subp, m_is_snappy): - ex_groups = ['existing_group'] - groups = ['group1', ex_groups[0]] - m_is_group.side_effect = lambda m: m in ex_groups - user = 'foouser' - self.dist.create_user(user, groups=groups) - expected = [ - mock.call(['groupadd', 'group1']), - self._useradd2call([user, '--groups', ','.join(groups), '-m']), - mock.call(['passwd', '-l', user])] - self.assertEqual(m_subp.call_args_list, expected) - - @mock.patch("cloudinit.distros.util.is_group") - def test_create_groups_with_whitespace_string( - self, m_is_group, m_subp, m_is_snappy): - # groups supported as a comma delimeted string even with white space - m_is_group.return_value = False - user = 'foouser' - self.dist.create_user(user, groups='group1, group2') - expected = [ - mock.call(['groupadd', 'group1']), - mock.call(['groupadd', 'group2']), - self._useradd2call([user, '--groups', 'group1,group2', '-m']), - mock.call(['passwd', '-l', user])] - self.assertEqual(m_subp.call_args_list, expected) - - def test_explicit_sudo_false(self, m_subp, m_is_snappy): - user = 'foouser' - self.dist.create_user(user, sudo=False) - self.assertEqual( - m_subp.call_args_list, - [self._useradd2call([user, '-m']), - mock.call(['passwd', '-l', user])]) - - @mock.patch('cloudinit.ssh_util.setup_user_keys') - def test_setup_ssh_authorized_keys_with_string( - self, m_setup_user_keys, m_subp, m_is_snappy): - """ssh_authorized_keys allows string and calls setup_user_keys.""" - user = 'foouser' - self.dist.create_user(user, ssh_authorized_keys='mykey') - self.assertEqual( - m_subp.call_args_list, - [self._useradd2call([user, '-m']), - mock.call(['passwd', '-l', user])]) - m_setup_user_keys.assert_called_once_with(set(['mykey']), user) - - @mock.patch('cloudinit.ssh_util.setup_user_keys') - def test_setup_ssh_authorized_keys_with_list( - self, m_setup_user_keys, m_subp, m_is_snappy): - """ssh_authorized_keys allows lists and calls setup_user_keys.""" - user = 'foouser' - self.dist.create_user(user, ssh_authorized_keys=['key1', 'key2']) - self.assertEqual( - m_subp.call_args_list, - [self._useradd2call([user, '-m']), - mock.call(['passwd', '-l', user])]) - m_setup_user_keys.assert_called_once_with(set(['key1', 'key2']), user) - - @mock.patch('cloudinit.ssh_util.setup_user_keys') - def test_setup_ssh_authorized_keys_with_integer( - self, m_setup_user_keys, m_subp, m_is_snappy): - """ssh_authorized_keys warns on non-iterable/string type.""" - user = 'foouser' - self.dist.create_user(user, ssh_authorized_keys=-1) - m_setup_user_keys.assert_called_once_with(set([]), user) - match = re.match( - r'.*WARNING: Invalid type \'<(type|class) \'int\'>\' detected for' - ' \'ssh_authorized_keys\'.*', - self.logs.getvalue(), - re.DOTALL) - self.assertIsNotNone( - match, 'Missing ssh_authorized_keys invalid type warning') - - @mock.patch('cloudinit.ssh_util.setup_user_keys') - def test_create_user_with_ssh_redirect_user_no_cloud_keys( - self, m_setup_user_keys, m_subp, m_is_snappy): - """Log a warning when trying to redirect a user no cloud ssh keys.""" - user = 'foouser' - self.dist.create_user(user, ssh_redirect_user='someuser') - self.assertIn( - 'WARNING: Unable to disable SSH logins for foouser given ' - 'ssh_redirect_user: someuser. No cloud public-keys present.\n', - self.logs.getvalue()) - m_setup_user_keys.assert_not_called() - - @mock.patch('cloudinit.ssh_util.setup_user_keys') - def test_create_user_with_ssh_redirect_user_with_cloud_keys( - self, m_setup_user_keys, m_subp, m_is_snappy): - """Disable ssh when ssh_redirect_user and cloud ssh keys are set.""" - user = 'foouser' - self.dist.create_user( - user, ssh_redirect_user='someuser', cloud_public_ssh_keys=['key1']) - disable_prefix = ssh_util.DISABLE_USER_OPTS - disable_prefix = disable_prefix.replace('$USER', 'someuser') - disable_prefix = disable_prefix.replace('$DISABLE_USER', user) - m_setup_user_keys.assert_called_once_with( - set(['key1']), 'foouser', options=disable_prefix) - - @mock.patch('cloudinit.ssh_util.setup_user_keys') - def test_create_user_with_ssh_redirect_user_does_not_disable_auth_keys( - self, m_setup_user_keys, m_subp, m_is_snappy): - """Do not disable ssh_authorized_keys when ssh_redirect_user is set.""" - user = 'foouser' - self.dist.create_user( - user, ssh_authorized_keys='auth1', ssh_redirect_user='someuser', - cloud_public_ssh_keys=['key1']) - disable_prefix = ssh_util.DISABLE_USER_OPTS - disable_prefix = disable_prefix.replace('$USER', 'someuser') - disable_prefix = disable_prefix.replace('$DISABLE_USER', user) - self.assertEqual( - m_setup_user_keys.call_args_list, - [mock.call(set(['auth1']), user), # not disabled - mock.call(set(['key1']), 'foouser', options=disable_prefix)]) - - @mock.patch("cloudinit.distros.subp.which") - def test_lock_with_usermod_if_no_passwd(self, m_which, m_subp, - m_is_snappy): - """Lock uses usermod --lock if no 'passwd' cmd available.""" - m_which.side_effect = lambda m: m in ('usermod',) - self.dist.lock_passwd("bob") - self.assertEqual( - [mock.call(['usermod', '--lock', 'bob'])], - m_subp.call_args_list) - - @mock.patch("cloudinit.distros.subp.which") - def test_lock_with_passwd_if_available(self, m_which, m_subp, - m_is_snappy): - """Lock with only passwd will use passwd.""" - m_which.side_effect = lambda m: m in ('passwd',) - self.dist.lock_passwd("bob") - self.assertEqual( - [mock.call(['passwd', '-l', 'bob'])], - m_subp.call_args_list) - - @mock.patch("cloudinit.distros.subp.which") - def test_lock_raises_runtime_if_no_commands(self, m_which, m_subp, - m_is_snappy): - """Lock with no commands available raises RuntimeError.""" - m_which.return_value = None - with self.assertRaises(RuntimeError): - self.dist.lock_passwd("bob") - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_distros/test_debian.py b/tests/unittests/test_distros/test_debian.py deleted file mode 100644 index a88c2686..00000000 --- a/tests/unittests/test_distros/test_debian.py +++ /dev/null @@ -1,174 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -from itertools import count, cycle -from unittest import mock - -import pytest - -from cloudinit import distros, util -from cloudinit.distros.debian import ( - APT_GET_COMMAND, - APT_GET_WRAPPER, -) -from cloudinit.tests.helpers import FilesystemMockingTestCase -from cloudinit import subp - - -@mock.patch("cloudinit.distros.debian.subp.subp") -class TestDebianApplyLocale(FilesystemMockingTestCase): - - def setUp(self): - super(TestDebianApplyLocale, self).setUp() - self.new_root = self.tmp_dir() - self.patchOS(self.new_root) - self.patchUtils(self.new_root) - self.spath = self.tmp_path('etc/default/locale', self.new_root) - cls = distros.fetch("debian") - self.distro = cls("debian", {}, None) - - def test_no_rerun(self, m_subp): - """If system has defined locale, no re-run is expected.""" - m_subp.return_value = (None, None) - locale = 'en_US.UTF-8' - util.write_file(self.spath, 'LANG=%s\n' % locale, omode="w") - self.distro.apply_locale(locale, out_fn=self.spath) - m_subp.assert_not_called() - - def test_no_regen_on_c_utf8(self, m_subp): - """If locale is set to C.UTF8, do not attempt to call locale-gen""" - m_subp.return_value = (None, None) - locale = 'C.UTF-8' - util.write_file(self.spath, 'LANG=%s\n' % 'en_US.UTF-8', omode="w") - self.distro.apply_locale(locale, out_fn=self.spath) - self.assertEqual( - [['update-locale', '--locale-file=' + self.spath, - 'LANG=%s' % locale]], - [p[0][0] for p in m_subp.call_args_list]) - - def test_rerun_if_different(self, m_subp): - """If system has different locale, locale-gen should be called.""" - m_subp.return_value = (None, None) - locale = 'en_US.UTF-8' - util.write_file(self.spath, 'LANG=fr_FR.UTF-8', omode="w") - self.distro.apply_locale(locale, out_fn=self.spath) - self.assertEqual( - [['locale-gen', locale], - ['update-locale', '--locale-file=' + self.spath, - 'LANG=%s' % locale]], - [p[0][0] for p in m_subp.call_args_list]) - - def test_rerun_if_no_file(self, m_subp): - """If system has no locale file, locale-gen should be called.""" - m_subp.return_value = (None, None) - locale = 'en_US.UTF-8' - self.distro.apply_locale(locale, out_fn=self.spath) - self.assertEqual( - [['locale-gen', locale], - ['update-locale', '--locale-file=' + self.spath, - 'LANG=%s' % locale]], - [p[0][0] for p in m_subp.call_args_list]) - - def test_rerun_on_unset_system_locale(self, m_subp): - """If system has unset locale, locale-gen should be called.""" - m_subp.return_value = (None, None) - locale = 'en_US.UTF-8' - util.write_file(self.spath, 'LANG=', omode="w") - self.distro.apply_locale(locale, out_fn=self.spath) - self.assertEqual( - [['locale-gen', locale], - ['update-locale', '--locale-file=' + self.spath, - 'LANG=%s' % locale]], - [p[0][0] for p in m_subp.call_args_list]) - - def test_rerun_on_mismatched_keys(self, m_subp): - """If key is LC_ALL and system has only LANG, rerun is expected.""" - m_subp.return_value = (None, None) - locale = 'en_US.UTF-8' - util.write_file(self.spath, 'LANG=', omode="w") - self.distro.apply_locale(locale, out_fn=self.spath, keyname='LC_ALL') - self.assertEqual( - [['locale-gen', locale], - ['update-locale', '--locale-file=' + self.spath, - 'LC_ALL=%s' % locale]], - [p[0][0] for p in m_subp.call_args_list]) - - def test_falseish_locale_raises_valueerror(self, m_subp): - """locale as None or "" is invalid and should raise ValueError.""" - - with self.assertRaises(ValueError) as ctext_m: - self.distro.apply_locale(None) - m_subp.assert_not_called() - - self.assertEqual( - 'Failed to provide locale value.', str(ctext_m.exception)) - - with self.assertRaises(ValueError) as ctext_m: - self.distro.apply_locale("") - m_subp.assert_not_called() - self.assertEqual( - 'Failed to provide locale value.', str(ctext_m.exception)) - - -@mock.patch.dict('os.environ', {}, clear=True) -@mock.patch("cloudinit.distros.debian.subp.which", return_value=True) -@mock.patch("cloudinit.distros.debian.subp.subp") -class TestPackageCommand: - distro = distros.fetch("debian")("debian", {}, None) - - @mock.patch("cloudinit.distros.debian.Distro._apt_lock_available", - return_value=True) - def test_simple_command(self, m_apt_avail, m_subp, m_which): - self.distro.package_command('update') - apt_args = [APT_GET_WRAPPER['command']] - apt_args.extend(APT_GET_COMMAND) - apt_args.append('update') - expected_call = { - 'args': apt_args, - 'capture': False, - 'env': {'DEBIAN_FRONTEND': 'noninteractive'}, - } - assert m_subp.call_args == mock.call(**expected_call) - - @mock.patch("cloudinit.distros.debian.Distro._apt_lock_available", - side_effect=[False, False, True]) - @mock.patch("cloudinit.distros.debian.time.sleep") - def test_wait_for_lock(self, m_sleep, m_apt_avail, m_subp, m_which): - self.distro._wait_for_apt_command("stub", {"args": "stub2"}) - assert m_sleep.call_args_list == [mock.call(1), mock.call(1)] - assert m_subp.call_args_list == [mock.call(args='stub2')] - - @mock.patch("cloudinit.distros.debian.Distro._apt_lock_available", - return_value=False) - @mock.patch("cloudinit.distros.debian.time.sleep") - @mock.patch("cloudinit.distros.debian.time.time", side_effect=count()) - def test_lock_wait_timeout( - self, m_time, m_sleep, m_apt_avail, m_subp, m_which - ): - with pytest.raises(TimeoutError): - self.distro._wait_for_apt_command("stub", "stub2", timeout=5) - assert m_subp.call_args_list == [] - - @mock.patch("cloudinit.distros.debian.Distro._apt_lock_available", - side_effect=cycle([True, False])) - @mock.patch("cloudinit.distros.debian.time.sleep") - def test_lock_exception_wait(self, m_sleep, m_apt_avail, m_subp, m_which): - exception = subp.ProcessExecutionError( - exit_code=100, stderr="Could not get apt lock" - ) - m_subp.side_effect = [exception, exception, "return_thing"] - ret = self.distro._wait_for_apt_command("stub", {"args": "stub2"}) - assert ret == "return_thing" - - @mock.patch("cloudinit.distros.debian.Distro._apt_lock_available", - side_effect=cycle([True, False])) - @mock.patch("cloudinit.distros.debian.time.sleep") - @mock.patch("cloudinit.distros.debian.time.time", side_effect=count()) - def test_lock_exception_timeout( - self, m_time, m_sleep, m_apt_avail, m_subp, m_which - ): - m_subp.side_effect = subp.ProcessExecutionError( - exit_code=100, stderr="Could not get apt lock" - ) - with pytest.raises(TimeoutError): - self.distro._wait_for_apt_command( - "stub", {"args": "stub2"}, timeout=5 - ) diff --git a/tests/unittests/test_distros/test_dragonflybsd.py b/tests/unittests/test_distros/test_dragonflybsd.py deleted file mode 100644 index df2c00f4..00000000 --- a/tests/unittests/test_distros/test_dragonflybsd.py +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python3 - - -import cloudinit.util -from cloudinit.tests.helpers import mock - - -def test_find_dragonflybsd_part(): - assert cloudinit.util.find_dragonflybsd_part("/dev/vbd0s3") == "vbd0s3" - - -@mock.patch("cloudinit.util.is_DragonFlyBSD") -@mock.patch("cloudinit.subp.subp") -def test_parse_mount(mock_subp, m_is_DragonFlyBSD): - mount_out = """ -vbd0s3 on / (hammer2, local) -devfs on /dev (devfs, nosymfollow, local) -/dev/vbd0s0a on /boot (ufs, local) -procfs on /proc (procfs, local) -tmpfs on /var/run/shm (tmpfs, local) -""" - - mock_subp.return_value = (mount_out, "") - m_is_DragonFlyBSD.return_value = True - assert cloudinit.util.parse_mount("/") == ("vbd0s3", "hammer2", "/") diff --git a/tests/unittests/test_distros/test_freebsd.py b/tests/unittests/test_distros/test_freebsd.py deleted file mode 100644 index be565b04..00000000 --- a/tests/unittests/test_distros/test_freebsd.py +++ /dev/null @@ -1,45 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.util import (find_freebsd_part, get_path_dev_freebsd) -from cloudinit.tests.helpers import (CiTestCase, mock) - -import os - - -class TestDeviceLookUp(CiTestCase): - - @mock.patch('cloudinit.subp.subp') - def test_find_freebsd_part_label(self, mock_subp): - glabel_out = ''' -gptid/fa52d426-c337-11e6-8911-00155d4c5e47 N/A da0p1 - label/rootfs N/A da0p2 - label/swap N/A da0p3 -''' - mock_subp.return_value = (glabel_out, "") - res = find_freebsd_part("/dev/label/rootfs") - self.assertEqual("da0p2", res) - - @mock.patch('cloudinit.subp.subp') - def test_find_freebsd_part_gpt(self, mock_subp): - glabel_out = ''' - gpt/bootfs N/A vtbd0p1 -gptid/3f4cbe26-75da-11e8-a8f2-002590ec6166 N/A vtbd0p1 - gpt/swapfs N/A vtbd0p2 - gpt/rootfs N/A vtbd0p3 - iso9660/cidata N/A vtbd2 -''' - mock_subp.return_value = (glabel_out, "") - res = find_freebsd_part("/dev/gpt/rootfs") - self.assertEqual("vtbd0p3", res) - - def test_get_path_dev_freebsd_label(self): - mnt_list = ''' -/dev/label/rootfs / ufs rw 1 1 -devfs /dev devfs rw,multilabel 0 0 -fdescfs /dev/fd fdescfs rw 0 0 -/dev/da1s1 /mnt/resource ufs rw 2 2 -''' - with mock.patch.object(os.path, 'exists', - return_value=True): - res = get_path_dev_freebsd('/etc', mnt_list) - self.assertIsNotNone(res) diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py deleted file mode 100644 index 336150bc..00000000 --- a/tests/unittests/test_distros/test_generic.py +++ /dev/null @@ -1,315 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit import distros -from cloudinit import util - -from cloudinit.tests import helpers - -import os -import pytest -import shutil -import tempfile -from unittest import mock - -unknown_arch_info = { - 'arches': ['default'], - 'failsafe': {'primary': 'http://fs-primary-default', - 'security': 'http://fs-security-default'} -} - -package_mirrors = [ - {'arches': ['i386', 'amd64'], - 'failsafe': {'primary': 'http://fs-primary-intel', - 'security': 'http://fs-security-intel'}, - 'search': { - 'primary': ['http://%(ec2_region)s.ec2/', - 'http://%(availability_zone)s.clouds/'], - 'security': ['http://security-mirror1-intel', - 'http://security-mirror2-intel']}}, - {'arches': ['armhf', 'armel'], - 'failsafe': {'primary': 'http://fs-primary-arm', - 'security': 'http://fs-security-arm'}}, - unknown_arch_info -] - -gpmi = distros._get_package_mirror_info -gapmi = distros._get_arch_package_mirror_info - - -class TestGenericDistro(helpers.FilesystemMockingTestCase): - - def setUp(self): - super(TestGenericDistro, self).setUp() - # Make a temp directoy for tests to use. - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - - def _write_load_sudoers(self, _user, rules): - cls = distros.fetch("ubuntu") - d = cls("ubuntu", {}, None) - os.makedirs(os.path.join(self.tmp, "etc")) - os.makedirs(os.path.join(self.tmp, "etc", 'sudoers.d')) - self.patchOS(self.tmp) - self.patchUtils(self.tmp) - d.write_sudo_rules("harlowja", rules) - contents = util.load_file(d.ci_sudoers_fn) - return contents - - def _count_in(self, lines_look_for, text_content): - found_amount = 0 - for e in lines_look_for: - for line in text_content.splitlines(): - line = line.strip() - if line == e: - found_amount += 1 - return found_amount - - def test_sudoers_ensure_rules(self): - rules = 'ALL=(ALL:ALL) ALL' - contents = self._write_load_sudoers('harlowja', rules) - expected = ['harlowja ALL=(ALL:ALL) ALL'] - self.assertEqual(len(expected), self._count_in(expected, contents)) - not_expected = [ - 'harlowja A', - 'harlowja L', - 'harlowja L', - ] - self.assertEqual(0, self._count_in(not_expected, contents)) - - def test_sudoers_ensure_rules_list(self): - rules = [ - 'ALL=(ALL:ALL) ALL', - 'B-ALL=(ALL:ALL) ALL', - 'C-ALL=(ALL:ALL) ALL', - ] - contents = self._write_load_sudoers('harlowja', rules) - expected = [ - 'harlowja ALL=(ALL:ALL) ALL', - 'harlowja B-ALL=(ALL:ALL) ALL', - 'harlowja C-ALL=(ALL:ALL) ALL', - ] - self.assertEqual(len(expected), self._count_in(expected, contents)) - not_expected = [ - 'harlowja A', - 'harlowja L', - 'harlowja L', - ] - self.assertEqual(0, self._count_in(not_expected, contents)) - - def test_sudoers_ensure_new(self): - cls = distros.fetch("ubuntu") - d = cls("ubuntu", {}, None) - self.patchOS(self.tmp) - self.patchUtils(self.tmp) - d.ensure_sudo_dir("/b") - contents = util.load_file("/etc/sudoers") - self.assertIn("includedir /b", contents) - self.assertTrue(os.path.isdir("/b")) - - def test_sudoers_ensure_append(self): - cls = distros.fetch("ubuntu") - d = cls("ubuntu", {}, None) - self.patchOS(self.tmp) - self.patchUtils(self.tmp) - util.write_file("/etc/sudoers", "josh, josh\n") - d.ensure_sudo_dir("/b") - contents = util.load_file("/etc/sudoers") - self.assertIn("includedir /b", contents) - self.assertTrue(os.path.isdir("/b")) - self.assertIn("josh", contents) - self.assertEqual(2, contents.count("josh")) - - def test_sudoers_ensure_only_one_includedir(self): - cls = distros.fetch("ubuntu") - d = cls("ubuntu", {}, None) - self.patchOS(self.tmp) - self.patchUtils(self.tmp) - for char in ['#', '@']: - util.write_file("/etc/sudoers", "{}includedir /b".format(char)) - d.ensure_sudo_dir("/b") - contents = util.load_file("/etc/sudoers") - self.assertIn("includedir /b", contents) - self.assertTrue(os.path.isdir("/b")) - self.assertEqual(1, contents.count("includedir /b")) - - def test_arch_package_mirror_info_unknown(self): - """for an unknown arch, we should get back that with arch 'default'.""" - arch_mirrors = gapmi(package_mirrors, arch="unknown") - self.assertEqual(unknown_arch_info, arch_mirrors) - - def test_arch_package_mirror_info_known(self): - arch_mirrors = gapmi(package_mirrors, arch="amd64") - self.assertEqual(package_mirrors[0], arch_mirrors) - - def test_systemd_in_use(self): - cls = distros.fetch("ubuntu") - d = cls("ubuntu", {}, None) - self.patchOS(self.tmp) - self.patchUtils(self.tmp) - os.makedirs('/run/systemd/system') - self.assertTrue(d.uses_systemd()) - - def test_systemd_not_in_use(self): - cls = distros.fetch("ubuntu") - d = cls("ubuntu", {}, None) - self.patchOS(self.tmp) - self.patchUtils(self.tmp) - self.assertFalse(d.uses_systemd()) - - def test_systemd_symlink(self): - cls = distros.fetch("ubuntu") - d = cls("ubuntu", {}, None) - self.patchOS(self.tmp) - self.patchUtils(self.tmp) - os.makedirs('/run/systemd') - os.symlink('/', '/run/systemd/system') - self.assertFalse(d.uses_systemd()) - - @mock.patch('cloudinit.distros.debian.read_system_locale') - def test_get_locale_ubuntu(self, m_locale): - """Test ubuntu distro returns locale set to C.UTF-8""" - m_locale.return_value = 'C.UTF-8' - cls = distros.fetch("ubuntu") - d = cls("ubuntu", {}, None) - locale = d.get_locale() - self.assertEqual('C.UTF-8', locale) - - def test_get_locale_rhel(self): - """Test rhel distro returns NotImplementedError exception""" - cls = distros.fetch("rhel") - d = cls("rhel", {}, None) - with self.assertRaises(NotImplementedError): - d.get_locale() - - def test_expire_passwd_uses_chpasswd(self): - """Test ubuntu.expire_passwd uses the passwd command.""" - for d_name in ("ubuntu", "rhel"): - cls = distros.fetch(d_name) - d = cls(d_name, {}, None) - with mock.patch("cloudinit.subp.subp") as m_subp: - d.expire_passwd("myuser") - m_subp.assert_called_once_with(["passwd", "--expire", "myuser"]) - - def test_expire_passwd_freebsd_uses_pw_command(self): - """Test FreeBSD.expire_passwd uses the pw command.""" - cls = distros.fetch("freebsd") - d = cls("freebsd", {}, None) - with mock.patch("cloudinit.subp.subp") as m_subp: - d.expire_passwd("myuser") - m_subp.assert_called_once_with( - ["pw", "usermod", "myuser", "-p", "01-Jan-1970"]) - - -class TestGetPackageMirrors: - - def return_first(self, mlist): - if not mlist: - return None - return mlist[0] - - def return_second(self, mlist): - if not mlist: - return None - - return mlist[1] if len(mlist) > 1 else None - - def return_none(self, _mlist): - return None - - def return_last(self, mlist): - if not mlist: - return None - return(mlist[-1]) - - @pytest.mark.parametrize( - "allow_ec2_mirror, platform_type, mirrors", - [ - (True, "ec2", [ - {'primary': 'http://us-east-1.ec2/', - 'security': 'http://security-mirror1-intel'}, - {'primary': 'http://us-east-1a.clouds/', - 'security': 'http://security-mirror2-intel'} - ]), - (True, "other", [ - {'primary': 'http://us-east-1.ec2/', - 'security': 'http://security-mirror1-intel'}, - {'primary': 'http://us-east-1a.clouds/', - 'security': 'http://security-mirror2-intel'} - ]), - (False, "ec2", [ - {'primary': 'http://us-east-1.ec2/', - 'security': 'http://security-mirror1-intel'}, - {'primary': 'http://us-east-1a.clouds/', - 'security': 'http://security-mirror2-intel'} - ]), - (False, "other", [ - {'primary': 'http://us-east-1a.clouds/', - 'security': 'http://security-mirror1-intel'}, - {'primary': 'http://fs-primary-intel', - 'security': 'http://security-mirror2-intel'} - ]) - ]) - def test_get_package_mirror_info_az_ec2(self, - allow_ec2_mirror, - platform_type, - mirrors): - flag_path = "cloudinit.distros." \ - "ALLOW_EC2_MIRRORS_ON_NON_AWS_INSTANCE_TYPES" - with mock.patch(flag_path, allow_ec2_mirror): - arch_mirrors = gapmi(package_mirrors, arch="amd64") - data_source_mock = mock.Mock( - availability_zone="us-east-1a", - platform_type=platform_type) - - results = gpmi(arch_mirrors, data_source=data_source_mock, - mirror_filter=self.return_first) - assert(results == mirrors[0]) - - results = gpmi(arch_mirrors, data_source=data_source_mock, - mirror_filter=self.return_second) - assert(results == mirrors[1]) - - results = gpmi(arch_mirrors, data_source=data_source_mock, - mirror_filter=self.return_none) - assert(results == package_mirrors[0]['failsafe']) - - def test_get_package_mirror_info_az_non_ec2(self): - arch_mirrors = gapmi(package_mirrors, arch="amd64") - data_source_mock = mock.Mock(availability_zone="nova.cloudvendor") - - results = gpmi(arch_mirrors, data_source=data_source_mock, - mirror_filter=self.return_first) - assert(results == { - 'primary': 'http://nova.cloudvendor.clouds/', - 'security': 'http://security-mirror1-intel'} - ) - - results = gpmi(arch_mirrors, data_source=data_source_mock, - mirror_filter=self.return_last) - assert(results == { - 'primary': 'http://nova.cloudvendor.clouds/', - 'security': 'http://security-mirror2-intel'} - ) - - def test_get_package_mirror_info_none(self): - arch_mirrors = gapmi(package_mirrors, arch="amd64") - data_source_mock = mock.Mock(availability_zone=None) - - # because both search entries here replacement based on - # availability-zone, the filter will be called with an empty list and - # failsafe should be taken. - results = gpmi(arch_mirrors, data_source=data_source_mock, - mirror_filter=self.return_first) - assert(results == { - 'primary': 'http://fs-primary-intel', - 'security': 'http://security-mirror1-intel'} - ) - - results = gpmi(arch_mirrors, data_source=data_source_mock, - mirror_filter=self.return_last) - assert(results == { - 'primary': 'http://fs-primary-intel', - 'security': 'http://security-mirror2-intel'} - ) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_distros/test_gentoo.py b/tests/unittests/test_distros/test_gentoo.py deleted file mode 100644 index 37a4f51f..00000000 --- a/tests/unittests/test_distros/test_gentoo.py +++ /dev/null @@ -1,26 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit import util -from cloudinit import atomic_helper -from cloudinit.tests.helpers import CiTestCase -from . import _get_distro - - -class TestGentoo(CiTestCase): - - def test_write_hostname(self): - distro = _get_distro("gentoo") - hostname = "myhostname" - hostfile = self.tmp_path("hostfile") - distro._write_hostname(hostname, hostfile) - self.assertEqual('hostname="myhostname"\n', util.load_file(hostfile)) - - def test_write_existing_hostname_with_comments(self): - distro = _get_distro("gentoo") - hostname = "myhostname" - contents = '#This is the hostname\nhostname="localhost"' - hostfile = self.tmp_path("hostfile") - atomic_helper.write_file(hostfile, contents, omode="w") - distro._write_hostname(hostname, hostfile) - self.assertEqual('#This is the hostname\nhostname="myhostname"\n', - util.load_file(hostfile)) diff --git a/tests/unittests/test_distros/test_hostname.py b/tests/unittests/test_distros/test_hostname.py deleted file mode 100644 index f6d4dbe5..00000000 --- a/tests/unittests/test_distros/test_hostname.py +++ /dev/null @@ -1,42 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import unittest - -from cloudinit.distros.parsers import hostname - - -BASE_HOSTNAME = ''' -# My super-duper-hostname - -blahblah - -''' -BASE_HOSTNAME = BASE_HOSTNAME.strip() - - -class TestHostnameHelper(unittest.TestCase): - def test_parse_same(self): - hn = hostname.HostnameConf(BASE_HOSTNAME) - self.assertEqual(str(hn).strip(), BASE_HOSTNAME) - self.assertEqual(hn.hostname, 'blahblah') - - def test_no_adjust_hostname(self): - hn = hostname.HostnameConf(BASE_HOSTNAME) - prev_name = hn.hostname - hn.set_hostname("") - self.assertEqual(hn.hostname, prev_name) - - def test_adjust_hostname(self): - hn = hostname.HostnameConf(BASE_HOSTNAME) - prev_name = hn.hostname - self.assertEqual(prev_name, 'blahblah') - hn.set_hostname("bbbbd") - self.assertEqual(hn.hostname, 'bbbbd') - expected_out = ''' -# My super-duper-hostname - -bbbbd -''' - self.assertEqual(str(hn).strip(), expected_out.strip()) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_distros/test_hosts.py b/tests/unittests/test_distros/test_hosts.py deleted file mode 100644 index 8aaa6e48..00000000 --- a/tests/unittests/test_distros/test_hosts.py +++ /dev/null @@ -1,45 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import unittest - -from cloudinit.distros.parsers import hosts - - -BASE_ETC = ''' -# Example -127.0.0.1 localhost -192.168.1.10 foo.mydomain.org foo -192.168.1.10 bar.mydomain.org bar -146.82.138.7 master.debian.org master -209.237.226.90 www.opensource.org -''' -BASE_ETC = BASE_ETC.strip() - - -class TestHostsHelper(unittest.TestCase): - def test_parse(self): - eh = hosts.HostsConf(BASE_ETC) - self.assertEqual(eh.get_entry('127.0.0.1'), [['localhost']]) - self.assertEqual(eh.get_entry('192.168.1.10'), - [['foo.mydomain.org', 'foo'], - ['bar.mydomain.org', 'bar']]) - eh = str(eh) - self.assertTrue(eh.startswith('# Example')) - - def test_add(self): - eh = hosts.HostsConf(BASE_ETC) - eh.add_entry('127.0.0.0', 'blah') - self.assertEqual(eh.get_entry('127.0.0.0'), [['blah']]) - eh.add_entry('127.0.0.3', 'blah', 'blah2', 'blah3') - self.assertEqual(eh.get_entry('127.0.0.3'), - [['blah', 'blah2', 'blah3']]) - - def test_del(self): - eh = hosts.HostsConf(BASE_ETC) - eh.add_entry('127.0.0.0', 'blah') - self.assertEqual(eh.get_entry('127.0.0.0'), [['blah']]) - - eh.del_entries('127.0.0.0') - self.assertEqual(eh.get_entry('127.0.0.0'), []) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_distros/test_manage_service.py b/tests/unittests/test_distros/test_manage_service.py deleted file mode 100644 index 47e7cfb0..00000000 --- a/tests/unittests/test_distros/test_manage_service.py +++ /dev/null @@ -1,38 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.tests.helpers import (CiTestCase, mock) -from tests.unittests.util import TestingDistro - - -class TestManageService(CiTestCase): - - with_logs = True - - def setUp(self): - super(TestManageService, self).setUp() - self.dist = TestingDistro() - - @mock.patch.object(TestingDistro, 'uses_systemd', return_value=False) - @mock.patch("cloudinit.distros.subp.subp") - def test_manage_service_systemctl_initcmd(self, m_subp, m_sysd): - self.dist.init_cmd = ['systemctl'] - self.dist.manage_service('start', 'myssh') - m_subp.assert_called_with(['systemctl', 'start', 'myssh'], - capture=True) - - @mock.patch.object(TestingDistro, 'uses_systemd', return_value=False) - @mock.patch("cloudinit.distros.subp.subp") - def test_manage_service_service_initcmd(self, m_subp, m_sysd): - self.dist.init_cmd = ['service'] - self.dist.manage_service('start', 'myssh') - m_subp.assert_called_with(['service', 'myssh', 'start'], capture=True) - - @mock.patch.object(TestingDistro, 'uses_systemd', return_value=True) - @mock.patch("cloudinit.distros.subp.subp") - def test_manage_service_systemctl(self, m_subp, m_sysd): - self.dist.init_cmd = ['ignore'] - self.dist.manage_service('start', 'myssh') - m_subp.assert_called_with(['systemctl', 'start', 'myssh'], - capture=True) - -# vi: ts=4 sw=4 expandtab diff --git a/tests/unittests/test_distros/test_netbsd.py b/tests/unittests/test_distros/test_netbsd.py deleted file mode 100644 index 11a68d2a..00000000 --- a/tests/unittests/test_distros/test_netbsd.py +++ /dev/null @@ -1,17 +0,0 @@ -import cloudinit.distros.netbsd - -import pytest -import unittest.mock as mock - - -@pytest.mark.parametrize('with_pkgin', (True, False)) -@mock.patch("cloudinit.distros.netbsd.os") -def test_init(m_os, with_pkgin): - print(with_pkgin) - m_os.path.exists.return_value = with_pkgin - cfg = {} - - distro = cloudinit.distros.netbsd.NetBSD("netbsd", cfg, None) - expectation = ['pkgin', '-y', 'full-upgrade'] if with_pkgin else None - assert distro.pkg_cmd_upgrade_prefix == expectation - assert [mock.call('/usr/pkg/bin/pkgin')] == m_os.path.exists.call_args_list diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py deleted file mode 100644 index e4eba179..00000000 --- a/tests/unittests/test_distros/test_netconfig.py +++ /dev/null @@ -1,916 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import copy -import os -import re -from io import StringIO -from textwrap import dedent -from unittest import mock - -from cloudinit import distros -from cloudinit.distros.parsers.sys_conf import SysConf -from cloudinit import helpers -from cloudinit import settings -from cloudinit.tests.helpers import ( - FilesystemMockingTestCase, dir2dict) -from cloudinit import subp -from cloudinit import util -from cloudinit import safeyaml - -BASE_NET_CFG = ''' -auto lo -iface lo inet loopback - -auto eth0 -iface eth0 inet static - address 192.168.1.5 - broadcast 192.168.1.0 - gateway 192.168.1.254 - netmask 255.255.255.0 - network 192.168.0.0 - -auto eth1 -iface eth1 inet dhcp -''' - -BASE_NET_CFG_FROM_V2 = ''' -auto lo -iface lo inet loopback - -auto eth0 -iface eth0 inet static - address 192.168.1.5/24 - gateway 192.168.1.254 - -auto eth1 -iface eth1 inet dhcp -''' - -BASE_NET_CFG_IPV6 = ''' -auto lo -iface lo inet loopback - -auto eth0 -iface eth0 inet static - address 192.168.1.5 - netmask 255.255.255.0 - network 192.168.0.0 - broadcast 192.168.1.0 - gateway 192.168.1.254 - -iface eth0 inet6 static - address 2607:f0d0:1002:0011::2 - netmask 64 - gateway 2607:f0d0:1002:0011::1 - -iface eth1 inet static - address 192.168.1.6 - netmask 255.255.255.0 - network 192.168.0.0 - broadcast 192.168.1.0 - gateway 192.168.1.254 - -iface eth1 inet6 static - address 2607:f0d0:1002:0011::3 - netmask 64 - gateway 2607:f0d0:1002:0011::1 -''' - -V1_NET_CFG = {'config': [{'name': 'eth0', - - 'subnets': [{'address': '192.168.1.5', - 'broadcast': '192.168.1.0', - 'gateway': '192.168.1.254', - 'netmask': '255.255.255.0', - 'type': 'static'}], - 'type': 'physical'}, - {'name': 'eth1', - 'subnets': [{'control': 'auto', 'type': 'dhcp4'}], - 'type': 'physical'}], - 'version': 1} - -V1_NET_CFG_WITH_DUPS = """\ -# same value in interface specific dns and global dns -# should produce single entry in network file -version: 1 -config: - - type: physical - name: eth0 - subnets: - - type: static - address: 192.168.0.102/24 - dns_nameservers: [1.2.3.4] - dns_search: [test.com] - interface: eth0 - - type: nameserver - address: [1.2.3.4] - search: [test.com] -""" - -V1_NET_CFG_OUTPUT = """\ -# This file is generated from information provided by the datasource. Changes -# to it will not persist across an instance reboot. To disable cloud-init's -# network configuration capabilities, write a file -# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: -# network: {config: disabled} -auto lo -iface lo inet loopback - -auto eth0 -iface eth0 inet static - address 192.168.1.5/24 - broadcast 192.168.1.0 - gateway 192.168.1.254 - -auto eth1 -iface eth1 inet dhcp -""" - -V1_NET_CFG_IPV6_OUTPUT = """\ -# This file is generated from information provided by the datasource. Changes -# to it will not persist across an instance reboot. To disable cloud-init's -# network configuration capabilities, write a file -# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: -# network: {config: disabled} -auto lo -iface lo inet loopback - -auto eth0 -iface eth0 inet6 static - address 2607:f0d0:1002:0011::2/64 - gateway 2607:f0d0:1002:0011::1 - -auto eth1 -iface eth1 inet dhcp -""" - -V1_NET_CFG_IPV6 = {'config': [{'name': 'eth0', - 'subnets': [{'address': - '2607:f0d0:1002:0011::2', - 'gateway': - '2607:f0d0:1002:0011::1', - 'netmask': '64', - 'type': 'static6'}], - 'type': 'physical'}, - {'name': 'eth1', - 'subnets': [{'control': 'auto', - 'type': 'dhcp4'}], - 'type': 'physical'}], - 'version': 1} - - -V1_TO_V2_NET_CFG_OUTPUT = """\ -# This file is generated from information provided by the datasource. Changes -# to it will not persist across an instance reboot. To disable cloud-init's -# network configuration capabilities, write a file -# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: -# network: {config: disabled} -network: - version: 2 - ethernets: - eth0: - addresses: - - 192.168.1.5/24 - gateway4: 192.168.1.254 - eth1: - dhcp4: true -""" - -V1_TO_V2_NET_CFG_IPV6_OUTPUT = """\ -# This file is generated from information provided by the datasource. Changes -# to it will not persist across an instance reboot. To disable cloud-init's -# network configuration capabilities, write a file -# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: -# network: {config: disabled} -network: - version: 2 - ethernets: - eth0: - addresses: - - 2607:f0d0:1002:0011::2/64 - gateway6: 2607:f0d0:1002:0011::1 - eth1: - dhcp4: true -""" - -V2_NET_CFG = { - 'ethernets': { - 'eth7': { - 'addresses': ['192.168.1.5/24'], - 'gateway4': '192.168.1.254'}, - 'eth9': { - 'dhcp4': True} - }, - 'version': 2 -} - - -V2_TO_V2_NET_CFG_OUTPUT = """\ -# This file is generated from information provided by the datasource. Changes -# to it will not persist across an instance reboot. To disable cloud-init's -# network configuration capabilities, write a file -# /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg with the following: -# network: {config: disabled} -network: - ethernets: - eth7: - addresses: - - 192.168.1.5/24 - gateway4: 192.168.1.254 - eth9: - dhcp4: true - version: 2 -""" - - -class WriteBuffer(object): - def __init__(self): - self.buffer = StringIO() - self.mode = None - self.omode = None - - def write(self, text): - self.buffer.write(text) - - def __str__(self): - return self.buffer.getvalue() - - -class TestNetCfgDistroBase(FilesystemMockingTestCase): - - def setUp(self): - super(TestNetCfgDistroBase, self).setUp() - self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy') - - def _get_distro(self, dname, renderers=None): - cls = distros.fetch(dname) - cfg = settings.CFG_BUILTIN - cfg['system_info']['distro'] = dname - if renderers: - cfg['system_info']['network'] = {'renderers': renderers} - paths = helpers.Paths({}) - return cls(dname, cfg.get('system_info'), paths) - - def assertCfgEquals(self, blob1, blob2): - b1 = dict(SysConf(blob1.strip().splitlines())) - b2 = dict(SysConf(blob2.strip().splitlines())) - self.assertEqual(b1, b2) - for (k, v) in b1.items(): - self.assertIn(k, b2) - for (k, v) in b2.items(): - self.assertIn(k, b1) - for (k, v) in b1.items(): - self.assertEqual(v, b2[k]) - - -class TestNetCfgDistroFreeBSD(TestNetCfgDistroBase): - - def setUp(self): - super(TestNetCfgDistroFreeBSD, self).setUp() - self.distro = self._get_distro('freebsd', renderers=['freebsd']) - - def _apply_and_verify_freebsd(self, apply_fn, config, expected_cfgs=None, - bringup=False): - if not expected_cfgs: - raise ValueError('expected_cfg must not be None') - - tmpd = None - with mock.patch('cloudinit.net.freebsd.available') as m_avail: - m_avail.return_value = True - with self.reRooted(tmpd) as tmpd: - util.ensure_dir('/etc') - util.ensure_file('/etc/rc.conf') - util.ensure_file('/etc/resolv.conf') - apply_fn(config, bringup) - - results = dir2dict(tmpd) - for cfgpath, expected in expected_cfgs.items(): - print("----------") - print(expected) - print("^^^^ expected | rendered VVVVVVV") - print(results[cfgpath]) - print("----------") - self.assertEqual( - set(expected.split('\n')), - set(results[cfgpath].split('\n'))) - self.assertEqual(0o644, get_mode(cfgpath, tmpd)) - - @mock.patch('cloudinit.net.get_interfaces_by_mac') - def test_apply_network_config_freebsd_standard(self, ifaces_mac): - ifaces_mac.return_value = { - '00:15:5d:4c:73:00': 'eth0', - } - rc_conf_expected = """\ -defaultrouter=192.168.1.254 -ifconfig_eth0='192.168.1.5 netmask 255.255.255.0' -ifconfig_eth1=DHCP -""" - - expected_cfgs = { - '/etc/rc.conf': rc_conf_expected, - '/etc/resolv.conf': '' - } - self._apply_and_verify_freebsd(self.distro.apply_network_config, - V1_NET_CFG, - expected_cfgs=expected_cfgs.copy()) - - @mock.patch('cloudinit.net.get_interfaces_by_mac') - def test_apply_network_config_freebsd_ifrename(self, ifaces_mac): - ifaces_mac.return_value = { - '00:15:5d:4c:73:00': 'vtnet0', - } - rc_conf_expected = """\ -ifconfig_vtnet0_name=eth0 -defaultrouter=192.168.1.254 -ifconfig_eth0='192.168.1.5 netmask 255.255.255.0' -ifconfig_eth1=DHCP -""" - - V1_NET_CFG_RENAME = copy.deepcopy(V1_NET_CFG) - V1_NET_CFG_RENAME['config'][0]['mac_address'] = '00:15:5d:4c:73:00' - - expected_cfgs = { - '/etc/rc.conf': rc_conf_expected, - '/etc/resolv.conf': '' - } - self._apply_and_verify_freebsd(self.distro.apply_network_config, - V1_NET_CFG_RENAME, - expected_cfgs=expected_cfgs.copy()) - - @mock.patch('cloudinit.net.get_interfaces_by_mac') - def test_apply_network_config_freebsd_nameserver(self, ifaces_mac): - ifaces_mac.return_value = { - '00:15:5d:4c:73:00': 'eth0', - } - - V1_NET_CFG_DNS = copy.deepcopy(V1_NET_CFG) - ns = ['1.2.3.4'] - V1_NET_CFG_DNS['config'][0]['subnets'][0]['dns_nameservers'] = ns - expected_cfgs = { - '/etc/resolv.conf': 'nameserver 1.2.3.4\n' - } - self._apply_and_verify_freebsd(self.distro.apply_network_config, - V1_NET_CFG_DNS, - expected_cfgs=expected_cfgs.copy()) - - -class TestNetCfgDistroUbuntuEni(TestNetCfgDistroBase): - - def setUp(self): - super(TestNetCfgDistroUbuntuEni, self).setUp() - self.distro = self._get_distro('ubuntu', renderers=['eni']) - - def eni_path(self): - return '/etc/network/interfaces.d/50-cloud-init.cfg' - - def _apply_and_verify_eni(self, apply_fn, config, expected_cfgs=None, - bringup=False): - if not expected_cfgs: - raise ValueError('expected_cfg must not be None') - - tmpd = None - with mock.patch('cloudinit.net.eni.available') as m_avail: - m_avail.return_value = True - with self.reRooted(tmpd) as tmpd: - apply_fn(config, bringup) - - results = dir2dict(tmpd) - for cfgpath, expected in expected_cfgs.items(): - print("----------") - print(expected) - print("^^^^ expected | rendered VVVVVVV") - print(results[cfgpath]) - print("----------") - self.assertEqual(expected, results[cfgpath]) - self.assertEqual(0o644, get_mode(cfgpath, tmpd)) - - def test_apply_network_config_eni_ub(self): - expected_cfgs = { - self.eni_path(): V1_NET_CFG_OUTPUT, - } - # ub_distro.apply_network_config(V1_NET_CFG, False) - self._apply_and_verify_eni(self.distro.apply_network_config, - V1_NET_CFG, - expected_cfgs=expected_cfgs.copy()) - - def test_apply_network_config_ipv6_ub(self): - expected_cfgs = { - self.eni_path(): V1_NET_CFG_IPV6_OUTPUT - } - self._apply_and_verify_eni(self.distro.apply_network_config, - V1_NET_CFG_IPV6, - expected_cfgs=expected_cfgs.copy()) - - -class TestNetCfgDistroUbuntuNetplan(TestNetCfgDistroBase): - def setUp(self): - super(TestNetCfgDistroUbuntuNetplan, self).setUp() - self.distro = self._get_distro('ubuntu', renderers=['netplan']) - self.devlist = ['eth0', 'lo'] - - def _apply_and_verify_netplan(self, apply_fn, config, expected_cfgs=None, - bringup=False): - if not expected_cfgs: - raise ValueError('expected_cfg must not be None') - - tmpd = None - with mock.patch('cloudinit.net.netplan.available', - return_value=True): - with mock.patch("cloudinit.net.netplan.get_devicelist", - return_value=self.devlist): - with self.reRooted(tmpd) as tmpd: - apply_fn(config, bringup) - - results = dir2dict(tmpd) - for cfgpath, expected in expected_cfgs.items(): - print("----------") - print(expected) - print("^^^^ expected | rendered VVVVVVV") - print(results[cfgpath]) - print("----------") - self.assertEqual(expected, results[cfgpath]) - self.assertEqual(0o644, get_mode(cfgpath, tmpd)) - - def netplan_path(self): - return '/etc/netplan/50-cloud-init.yaml' - - def test_apply_network_config_v1_to_netplan_ub(self): - expected_cfgs = { - self.netplan_path(): V1_TO_V2_NET_CFG_OUTPUT, - } - - # ub_distro.apply_network_config(V1_NET_CFG, False) - self._apply_and_verify_netplan(self.distro.apply_network_config, - V1_NET_CFG, - expected_cfgs=expected_cfgs.copy()) - - def test_apply_network_config_v1_ipv6_to_netplan_ub(self): - expected_cfgs = { - self.netplan_path(): V1_TO_V2_NET_CFG_IPV6_OUTPUT, - } - - # ub_distro.apply_network_config(V1_NET_CFG_IPV6, False) - self._apply_and_verify_netplan(self.distro.apply_network_config, - V1_NET_CFG_IPV6, - expected_cfgs=expected_cfgs.copy()) - - def test_apply_network_config_v2_passthrough_ub(self): - expected_cfgs = { - self.netplan_path(): V2_TO_V2_NET_CFG_OUTPUT, - } - # ub_distro.apply_network_config(V2_NET_CFG, False) - self._apply_and_verify_netplan(self.distro.apply_network_config, - V2_NET_CFG, - expected_cfgs=expected_cfgs.copy()) - - -class TestNetCfgDistroRedhat(TestNetCfgDistroBase): - - def setUp(self): - super(TestNetCfgDistroRedhat, self).setUp() - self.distro = self._get_distro('rhel', renderers=['sysconfig']) - - def ifcfg_path(self, ifname): - return '/etc/sysconfig/network-scripts/ifcfg-%s' % ifname - - def control_path(self): - return '/etc/sysconfig/network' - - def _apply_and_verify(self, apply_fn, config, expected_cfgs=None, - bringup=False): - if not expected_cfgs: - raise ValueError('expected_cfg must not be None') - - tmpd = None - with mock.patch('cloudinit.net.sysconfig.available') as m_avail: - m_avail.return_value = True - with self.reRooted(tmpd) as tmpd: - apply_fn(config, bringup) - - results = dir2dict(tmpd) - for cfgpath, expected in expected_cfgs.items(): - self.assertCfgEquals(expected, results[cfgpath]) - self.assertEqual(0o644, get_mode(cfgpath, tmpd)) - - def test_apply_network_config_rh(self): - expected_cfgs = { - self.ifcfg_path('eth0'): dedent("""\ - BOOTPROTO=none - DEFROUTE=yes - DEVICE=eth0 - GATEWAY=192.168.1.254 - IPADDR=192.168.1.5 - NETMASK=255.255.255.0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """), - self.ifcfg_path('eth1'): dedent("""\ - BOOTPROTO=dhcp - DEVICE=eth1 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """), - self.control_path(): dedent("""\ - NETWORKING=yes - """), - } - # rh_distro.apply_network_config(V1_NET_CFG, False) - self._apply_and_verify(self.distro.apply_network_config, - V1_NET_CFG, - expected_cfgs=expected_cfgs.copy()) - - def test_apply_network_config_ipv6_rh(self): - expected_cfgs = { - self.ifcfg_path('eth0'): dedent("""\ - BOOTPROTO=none - DEFROUTE=yes - DEVICE=eth0 - IPV6ADDR=2607:f0d0:1002:0011::2/64 - IPV6INIT=yes - IPV6_AUTOCONF=no - IPV6_DEFAULTGW=2607:f0d0:1002:0011::1 - IPV6_FORCE_ACCEPT_RA=no - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """), - self.ifcfg_path('eth1'): dedent("""\ - BOOTPROTO=dhcp - DEVICE=eth1 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """), - self.control_path(): dedent("""\ - NETWORKING=yes - NETWORKING_IPV6=yes - IPV6_AUTOCONF=no - """), - } - # rh_distro.apply_network_config(V1_NET_CFG_IPV6, False) - self._apply_and_verify(self.distro.apply_network_config, - V1_NET_CFG_IPV6, - expected_cfgs=expected_cfgs.copy()) - - def test_vlan_render_unsupported(self): - """Render officially unsupported vlan names.""" - cfg = { - 'version': 2, - 'ethernets': { - 'eth0': {'addresses': ["192.10.1.2/24"], - 'match': {'macaddress': "00:16:3e:60:7c:df"}}}, - 'vlans': { - 'infra0': {'addresses': ["10.0.1.2/16"], - 'id': 1001, 'link': 'eth0'}}, - } - expected_cfgs = { - self.ifcfg_path('eth0'): dedent("""\ - BOOTPROTO=none - DEVICE=eth0 - HWADDR=00:16:3e:60:7c:df - IPADDR=192.10.1.2 - NETMASK=255.255.255.0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """), - self.ifcfg_path('infra0'): dedent("""\ - BOOTPROTO=none - DEVICE=infra0 - IPADDR=10.0.1.2 - NETMASK=255.255.0.0 - NM_CONTROLLED=no - ONBOOT=yes - PHYSDEV=eth0 - USERCTL=no - VLAN=yes - """), - self.control_path(): dedent("""\ - NETWORKING=yes - """), - } - self._apply_and_verify( - self.distro.apply_network_config, cfg, - expected_cfgs=expected_cfgs) - - def test_vlan_render(self): - cfg = { - 'version': 2, - 'ethernets': { - 'eth0': {'addresses': ["192.10.1.2/24"]}}, - 'vlans': { - 'eth0.1001': {'addresses': ["10.0.1.2/16"], - 'id': 1001, 'link': 'eth0'}}, - } - expected_cfgs = { - self.ifcfg_path('eth0'): dedent("""\ - BOOTPROTO=none - DEVICE=eth0 - IPADDR=192.10.1.2 - NETMASK=255.255.255.0 - NM_CONTROLLED=no - ONBOOT=yes - TYPE=Ethernet - USERCTL=no - """), - self.ifcfg_path('eth0.1001'): dedent("""\ - BOOTPROTO=none - DEVICE=eth0.1001 - IPADDR=10.0.1.2 - NETMASK=255.255.0.0 - NM_CONTROLLED=no - ONBOOT=yes - PHYSDEV=eth0 - USERCTL=no - VLAN=yes - """), - self.control_path(): dedent("""\ - NETWORKING=yes - """), - } - self._apply_and_verify( - self.distro.apply_network_config, cfg, - expected_cfgs=expected_cfgs) - - -class TestNetCfgDistroOpensuse(TestNetCfgDistroBase): - - def setUp(self): - super(TestNetCfgDistroOpensuse, self).setUp() - self.distro = self._get_distro('opensuse', renderers=['sysconfig']) - - def ifcfg_path(self, ifname): - return '/etc/sysconfig/network/ifcfg-%s' % ifname - - def _apply_and_verify(self, apply_fn, config, expected_cfgs=None, - bringup=False): - if not expected_cfgs: - raise ValueError('expected_cfg must not be None') - - tmpd = None - with mock.patch('cloudinit.net.sysconfig.available') as m_avail: - m_avail.return_value = True - with self.reRooted(tmpd) as tmpd: - apply_fn(config, bringup) - - results = dir2dict(tmpd) - for cfgpath, expected in expected_cfgs.items(): - self.assertCfgEquals(expected, results[cfgpath]) - self.assertEqual(0o644, get_mode(cfgpath, tmpd)) - - def test_apply_network_config_opensuse(self): - """Opensuse uses apply_network_config and renders sysconfig""" - expected_cfgs = { - self.ifcfg_path('eth0'): dedent("""\ - BOOTPROTO=static - IPADDR=192.168.1.5 - NETMASK=255.255.255.0 - STARTMODE=auto - """), - self.ifcfg_path('eth1'): dedent("""\ - BOOTPROTO=dhcp4 - STARTMODE=auto - """), - } - self._apply_and_verify(self.distro.apply_network_config, - V1_NET_CFG, - expected_cfgs=expected_cfgs.copy()) - - def test_apply_network_config_ipv6_opensuse(self): - """Opensuse uses apply_network_config and renders sysconfig w/ipv6""" - expected_cfgs = { - self.ifcfg_path('eth0'): dedent("""\ - BOOTPROTO=static - IPADDR6=2607:f0d0:1002:0011::2/64 - STARTMODE=auto - """), - self.ifcfg_path('eth1'): dedent("""\ - BOOTPROTO=dhcp4 - STARTMODE=auto - """), - } - self._apply_and_verify(self.distro.apply_network_config, - V1_NET_CFG_IPV6, - expected_cfgs=expected_cfgs.copy()) - - -class TestNetCfgDistroArch(TestNetCfgDistroBase): - def setUp(self): - super(TestNetCfgDistroArch, self).setUp() - self.distro = self._get_distro('arch', renderers=['netplan']) - - def _apply_and_verify(self, apply_fn, config, expected_cfgs=None, - bringup=False, with_netplan=False): - if not expected_cfgs: - raise ValueError('expected_cfg must not be None') - - tmpd = None - with mock.patch('cloudinit.net.netplan.available', - return_value=with_netplan): - with self.reRooted(tmpd) as tmpd: - apply_fn(config, bringup) - - results = dir2dict(tmpd) - for cfgpath, expected in expected_cfgs.items(): - print("----------") - print(expected) - print("^^^^ expected | rendered VVVVVVV") - print(results[cfgpath]) - print("----------") - self.assertEqual(expected, results[cfgpath]) - self.assertEqual(0o644, get_mode(cfgpath, tmpd)) - - def netctl_path(self, iface): - return '/etc/netctl/%s' % iface - - def netplan_path(self): - return '/etc/netplan/50-cloud-init.yaml' - - def test_apply_network_config_v1_without_netplan(self): - # Note that this is in fact an invalid netctl config: - # "Address=None/None" - # But this is what the renderer has been writing out for a long time, - # and the test's purpose is to assert that the netctl renderer is - # still being used in absence of netplan, not the correctness of the - # rendered netctl config. - expected_cfgs = { - self.netctl_path('eth0'): dedent("""\ - Address=192.168.1.5/255.255.255.0 - Connection=ethernet - DNS=() - Gateway=192.168.1.254 - IP=static - Interface=eth0 - """), - self.netctl_path('eth1'): dedent("""\ - Address=None/None - Connection=ethernet - DNS=() - Gateway= - IP=dhcp - Interface=eth1 - """), - } - - # ub_distro.apply_network_config(V1_NET_CFG, False) - self._apply_and_verify(self.distro.apply_network_config, - V1_NET_CFG, - expected_cfgs=expected_cfgs.copy(), - with_netplan=False) - - def test_apply_network_config_v1_with_netplan(self): - expected_cfgs = { - self.netplan_path(): dedent("""\ - # generated by cloud-init - network: - version: 2 - ethernets: - eth0: - addresses: - - 192.168.1.5/24 - gateway4: 192.168.1.254 - eth1: - dhcp4: true - """), - } - - with mock.patch( - 'cloudinit.net.netplan.get_devicelist', - return_value=[] - ): - self._apply_and_verify(self.distro.apply_network_config, - V1_NET_CFG, - expected_cfgs=expected_cfgs.copy(), - with_netplan=True) - - -class TestNetCfgDistroPhoton(TestNetCfgDistroBase): - - def setUp(self): - super(TestNetCfgDistroPhoton, self).setUp() - self.distro = self._get_distro('photon', renderers=['networkd']) - - def create_conf_dict(self, contents): - content_dict = {} - for line in contents: - if line: - line = line.strip() - if line and re.search(r'^\[(.+)\]$', line): - content_dict[line] = [] - key = line - elif line: - assert key - content_dict[key].append(line) - - return content_dict - - def compare_dicts(self, actual, expected): - for k, v in actual.items(): - self.assertEqual(sorted(expected[k]), sorted(v)) - - def _apply_and_verify(self, apply_fn, config, expected_cfgs=None, - bringup=False): - if not expected_cfgs: - raise ValueError('expected_cfg must not be None') - - tmpd = None - with mock.patch('cloudinit.net.networkd.available') as m_avail: - m_avail.return_value = True - with self.reRooted(tmpd) as tmpd: - apply_fn(config, bringup) - - results = dir2dict(tmpd) - for cfgpath, expected in expected_cfgs.items(): - actual = self.create_conf_dict(results[cfgpath].splitlines()) - self.compare_dicts(actual, expected) - self.assertEqual(0o644, get_mode(cfgpath, tmpd)) - - def nwk_file_path(self, ifname): - return '/etc/systemd/network/10-cloud-init-%s.network' % ifname - - def net_cfg_1(self, ifname): - ret = """\ - [Match] - Name=%s - [Network] - DHCP=no - [Address] - Address=192.168.1.5/24 - [Route] - Gateway=192.168.1.254""" % ifname - return ret - - def net_cfg_2(self, ifname): - ret = """\ - [Match] - Name=%s - [Network] - DHCP=ipv4""" % ifname - return ret - - def test_photon_network_config_v1(self): - tmp = self.net_cfg_1('eth0').splitlines() - expected_eth0 = self.create_conf_dict(tmp) - - tmp = self.net_cfg_2('eth1').splitlines() - expected_eth1 = self.create_conf_dict(tmp) - - expected_cfgs = { - self.nwk_file_path('eth0'): expected_eth0, - self.nwk_file_path('eth1'): expected_eth1, - } - - self._apply_and_verify(self.distro.apply_network_config, - V1_NET_CFG, - expected_cfgs.copy()) - - def test_photon_network_config_v2(self): - tmp = self.net_cfg_1('eth7').splitlines() - expected_eth7 = self.create_conf_dict(tmp) - - tmp = self.net_cfg_2('eth9').splitlines() - expected_eth9 = self.create_conf_dict(tmp) - - expected_cfgs = { - self.nwk_file_path('eth7'): expected_eth7, - self.nwk_file_path('eth9'): expected_eth9, - } - - self._apply_and_verify(self.distro.apply_network_config, - V2_NET_CFG, - expected_cfgs.copy()) - - def test_photon_network_config_v1_with_duplicates(self): - expected = """\ - [Match] - Name=eth0 - [Network] - DHCP=no - DNS=1.2.3.4 - Domains=test.com - [Address] - Address=192.168.0.102/24""" - - net_cfg = safeyaml.load(V1_NET_CFG_WITH_DUPS) - - expected = self.create_conf_dict(expected.splitlines()) - expected_cfgs = { - self.nwk_file_path('eth0'): expected, - } - - self._apply_and_verify(self.distro.apply_network_config, - net_cfg, - expected_cfgs.copy()) - - -def get_mode(path, target=None): - return os.stat(subp.target_path(target, path)).st_mode & 0o777 - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_distros/test_opensuse.py b/tests/unittests/test_distros/test_opensuse.py deleted file mode 100644 index b9bb9b3e..00000000 --- a/tests/unittests/test_distros/test_opensuse.py +++ /dev/null @@ -1,12 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.tests.helpers import CiTestCase - -from . import _get_distro - - -class TestopenSUSE(CiTestCase): - - def test_get_distro(self): - distro = _get_distro("opensuse") - self.assertEqual(distro.osfamily, 'suse') diff --git a/tests/unittests/test_distros/test_photon.py b/tests/unittests/test_distros/test_photon.py deleted file mode 100644 index 1c3145ca..00000000 --- a/tests/unittests/test_distros/test_photon.py +++ /dev/null @@ -1,68 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from . import _get_distro -from cloudinit import util -from cloudinit.tests.helpers import mock -from cloudinit.tests.helpers import CiTestCase - -SYSTEM_INFO = { - 'paths': { - 'cloud_dir': '/var/lib/cloud/', - 'templates_dir': '/etc/cloud/templates/', - }, - 'network': {'renderers': 'networkd'}, -} - - -class TestPhoton(CiTestCase): - with_logs = True - distro = _get_distro('photon', SYSTEM_INFO) - expected_log_line = 'Rely on PhotonOS default network config' - - def test_network_renderer(self): - self.assertEqual(self.distro._cfg['network']['renderers'], 'networkd') - - def test_get_distro(self): - self.assertEqual(self.distro.osfamily, 'photon') - - @mock.patch("cloudinit.distros.photon.subp.subp") - def test_write_hostname(self, m_subp): - hostname = 'myhostname' - hostfile = self.tmp_path('previous-hostname') - self.distro._write_hostname(hostname, hostfile) - self.assertEqual(hostname, util.load_file(hostfile)) - - ret = self.distro._read_hostname(hostfile) - self.assertEqual(ret, hostname) - - m_subp.return_value = (None, None) - hostfile += 'hostfile' - self.distro._write_hostname(hostname, hostfile) - - m_subp.return_value = (hostname, None) - ret = self.distro._read_hostname(hostfile) - self.assertEqual(ret, hostname) - - self.logs.truncate(0) - m_subp.return_value = (None, 'bla') - self.distro._write_hostname(hostname, None) - self.assertIn('Error while setting hostname', self.logs.getvalue()) - - @mock.patch('cloudinit.net.generate_fallback_config') - def test_fallback_netcfg(self, m_fallback_cfg): - - key = 'disable_fallback_netcfg' - # Don't use fallback if no setting given - self.logs.truncate(0) - assert(self.distro.generate_fallback_config() is None) - self.assertIn(self.expected_log_line, self.logs.getvalue()) - - self.logs.truncate(0) - self.distro._cfg[key] = True - assert(self.distro.generate_fallback_config() is None) - self.assertIn(self.expected_log_line, self.logs.getvalue()) - - self.logs.truncate(0) - self.distro._cfg[key] = False - assert(self.distro.generate_fallback_config() is not None) - self.assertNotIn(self.expected_log_line, self.logs.getvalue()) diff --git a/tests/unittests/test_distros/test_resolv.py b/tests/unittests/test_distros/test_resolv.py deleted file mode 100644 index 7d940750..00000000 --- a/tests/unittests/test_distros/test_resolv.py +++ /dev/null @@ -1,65 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.distros.parsers import resolv_conf - -from cloudinit.tests.helpers import TestCase - -import re - - -BASE_RESOLVE = ''' -; generated by /sbin/dhclient-script -search blah.yahoo.com yahoo.com -nameserver 10.15.44.14 -nameserver 10.15.30.92 -''' -BASE_RESOLVE = BASE_RESOLVE.strip() - - -class TestResolvHelper(TestCase): - def test_parse_same(self): - rp = resolv_conf.ResolvConf(BASE_RESOLVE) - rp_r = str(rp).strip() - self.assertEqual(BASE_RESOLVE, rp_r) - - def test_local_domain(self): - rp = resolv_conf.ResolvConf(BASE_RESOLVE) - self.assertIsNone(rp.local_domain) - - rp.local_domain = "bob" - self.assertEqual('bob', rp.local_domain) - self.assertIn('domain bob', str(rp)) - - def test_nameservers(self): - rp = resolv_conf.ResolvConf(BASE_RESOLVE) - self.assertIn('10.15.44.14', rp.nameservers) - self.assertIn('10.15.30.92', rp.nameservers) - rp.add_nameserver('10.2') - self.assertIn('10.2', rp.nameservers) - self.assertIn('nameserver 10.2', str(rp)) - self.assertNotIn('10.3', rp.nameservers) - self.assertEqual(len(rp.nameservers), 3) - rp.add_nameserver('10.2') - rp.add_nameserver('10.3') - self.assertNotIn('10.3', rp.nameservers) - - def test_search_domains(self): - rp = resolv_conf.ResolvConf(BASE_RESOLVE) - self.assertIn('yahoo.com', rp.search_domains) - self.assertIn('blah.yahoo.com', rp.search_domains) - rp.add_search_domain('bbb.y.com') - self.assertIn('bbb.y.com', rp.search_domains) - self.assertTrue(re.search(r'search(.*)bbb.y.com(.*)', str(rp))) - self.assertIn('bbb.y.com', rp.search_domains) - rp.add_search_domain('bbb.y.com') - self.assertEqual(len(rp.search_domains), 3) - rp.add_search_domain('bbb2.y.com') - self.assertEqual(len(rp.search_domains), 4) - rp.add_search_domain('bbb3.y.com') - self.assertEqual(len(rp.search_domains), 5) - rp.add_search_domain('bbb4.y.com') - self.assertEqual(len(rp.search_domains), 6) - self.assertRaises(ValueError, rp.add_search_domain, 'bbb5.y.com') - self.assertEqual(len(rp.search_domains), 6) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_distros/test_sles.py b/tests/unittests/test_distros/test_sles.py deleted file mode 100644 index 33e3c457..00000000 --- a/tests/unittests/test_distros/test_sles.py +++ /dev/null @@ -1,12 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.tests.helpers import CiTestCase - -from . import _get_distro - - -class TestSLES(CiTestCase): - - def test_get_distro(self): - distro = _get_distro("sles") - self.assertEqual(distro.osfamily, 'suse') diff --git a/tests/unittests/test_distros/test_sysconfig.py b/tests/unittests/test_distros/test_sysconfig.py deleted file mode 100644 index c1d5b693..00000000 --- a/tests/unittests/test_distros/test_sysconfig.py +++ /dev/null @@ -1,86 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import re - -from cloudinit.distros.parsers.sys_conf import SysConf - -from cloudinit.tests.helpers import TestCase - - -# Lots of good examples @ -# http://content.hccfl.edu/pollock/AUnix1/SysconfigFilesDesc.txt - -class TestSysConfHelper(TestCase): - # This function was added in 2.7, make it work for 2.6 - def assertRegMatches(self, text, regexp): - regexp = re.compile(regexp) - self.assertTrue(regexp.search(text), - msg="%s must match %s!" % (text, regexp.pattern)) - - def test_parse_no_change(self): - contents = '''# A comment -USESMBAUTH=no -KEYTABLE=/usr/lib/kbd/keytables/us.map -SHORTDATE=$(date +%y:%m:%d:%H:%M) -HOSTNAME=blahblah -NETMASK0=255.255.255.0 -# Inline comment -LIST=$LOGROOT/incremental-list -IPV6TO4_ROUTING='eth0-:0004::1/64 eth1-:0005::1/64' -ETHTOOL_OPTS="-K ${DEVICE} tso on; -G ${DEVICE} rx 256 tx 256" -USEMD5=no''' - conf = SysConf(contents.splitlines()) - self.assertEqual(conf['HOSTNAME'], 'blahblah') - self.assertEqual(conf['SHORTDATE'], '$(date +%y:%m:%d:%H:%M)') - # Should be unquoted - self.assertEqual(conf['ETHTOOL_OPTS'], ('-K ${DEVICE} tso on; ' - '-G ${DEVICE} rx 256 tx 256')) - self.assertEqual(contents, str(conf)) - - def test_parse_shell_vars(self): - contents = 'USESMBAUTH=$XYZ' - conf = SysConf(contents.splitlines()) - self.assertEqual(contents, str(conf)) - conf = SysConf('') - conf['B'] = '${ZZ}d apples' - # Should be quoted - self.assertEqual('B="${ZZ}d apples"', str(conf)) - conf = SysConf('') - conf['B'] = '$? d apples' - self.assertEqual('B="$? d apples"', str(conf)) - contents = 'IPMI_WATCHDOG_OPTIONS="timeout=60"' - conf = SysConf(contents.splitlines()) - self.assertEqual('IPMI_WATCHDOG_OPTIONS=timeout=60', str(conf)) - - def test_parse_adjust(self): - contents = 'IPV6TO4_ROUTING="eth0-:0004::1/64 eth1-:0005::1/64"' - conf = SysConf(contents.splitlines()) - # Should be unquoted - self.assertEqual('eth0-:0004::1/64 eth1-:0005::1/64', - conf['IPV6TO4_ROUTING']) - conf['IPV6TO4_ROUTING'] = "blah \tblah" - contents2 = str(conf).strip() - # Should be requoted due to whitespace - self.assertRegMatches(contents2, - r'IPV6TO4_ROUTING=[\']blah\s+blah[\']') - - def test_parse_no_adjust_shell(self): - conf = SysConf(''.splitlines()) - conf['B'] = ' $(time)' - contents = str(conf) - self.assertEqual('B= $(time)', contents) - - def test_parse_empty(self): - contents = '' - conf = SysConf(contents.splitlines()) - self.assertEqual('', str(conf).strip()) - - def test_parse_add_new(self): - contents = 'BLAH=b' - conf = SysConf(contents.splitlines()) - conf['Z'] = 'd' - contents = str(conf) - self.assertIn("Z=d", contents) - self.assertIn("BLAH=b", contents) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py deleted file mode 100644 index 50c86942..00000000 --- a/tests/unittests/test_distros/test_user_data_normalize.py +++ /dev/null @@ -1,372 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from unittest import mock - -from cloudinit import distros -from cloudinit.distros import ug_util -from cloudinit import helpers -from cloudinit import settings - -from cloudinit.tests.helpers import TestCase - - -bcfg = { - 'name': 'bob', - 'plain_text_passwd': 'ubuntu', - 'home': "/home/ubuntu", - 'shell': "/bin/bash", - 'lock_passwd': True, - 'gecos': "Ubuntu", - 'groups': ["foo"] -} - - -class TestUGNormalize(TestCase): - - def setUp(self): - super(TestUGNormalize, self).setUp() - self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy') - - def _make_distro(self, dtype, def_user=None): - cfg = dict(settings.CFG_BUILTIN) - cfg['system_info']['distro'] = dtype - paths = helpers.Paths(cfg['system_info']['paths']) - distro_cls = distros.fetch(dtype) - if def_user: - cfg['system_info']['default_user'] = def_user.copy() - distro = distro_cls(dtype, cfg['system_info'], paths) - return distro - - def _norm(self, cfg, distro): - return ug_util.normalize_users_groups(cfg, distro) - - def test_group_dict(self): - distro = self._make_distro('ubuntu') - g = {'groups': - [{'ubuntu': ['foo', 'bar'], - 'bob': 'users'}, - 'cloud-users', - {'bob': 'users2'}]} - (_users, groups) = self._norm(g, distro) - self.assertIn('ubuntu', groups) - ub_members = groups['ubuntu'] - self.assertEqual(sorted(['foo', 'bar']), sorted(ub_members)) - self.assertIn('bob', groups) - b_members = groups['bob'] - self.assertEqual(sorted(['users', 'users2']), - sorted(b_members)) - - def test_basic_groups(self): - distro = self._make_distro('ubuntu') - ug_cfg = { - 'groups': ['bob'], - } - (users, groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', groups) - self.assertEqual({}, users) - - def test_csv_groups(self): - distro = self._make_distro('ubuntu') - ug_cfg = { - 'groups': 'bob,joe,steve', - } - (users, groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', groups) - self.assertIn('joe', groups) - self.assertIn('steve', groups) - self.assertEqual({}, users) - - def test_more_groups(self): - distro = self._make_distro('ubuntu') - ug_cfg = { - 'groups': ['bob', 'joe', 'steve'] - } - (users, groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', groups) - self.assertIn('joe', groups) - self.assertIn('steve', groups) - self.assertEqual({}, users) - - def test_member_groups(self): - distro = self._make_distro('ubuntu') - ug_cfg = { - 'groups': { - 'bob': ['s'], - 'joe': [], - 'steve': [], - } - } - (users, groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', groups) - self.assertEqual(['s'], groups['bob']) - self.assertEqual([], groups['joe']) - self.assertIn('joe', groups) - self.assertIn('steve', groups) - self.assertEqual({}, users) - - def test_users_simple_dict(self): - distro = self._make_distro('ubuntu', bcfg) - ug_cfg = { - 'users': { - 'default': True, - } - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', users) - ug_cfg = { - 'users': { - 'default': 'yes', - } - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', users) - ug_cfg = { - 'users': { - 'default': '1', - } - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', users) - - def test_users_simple_dict_no(self): - distro = self._make_distro('ubuntu', bcfg) - ug_cfg = { - 'users': { - 'default': False, - } - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertEqual({}, users) - ug_cfg = { - 'users': { - 'default': 'no', - } - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertEqual({}, users) - - def test_users_simple_csv(self): - distro = self._make_distro('ubuntu') - ug_cfg = { - 'users': 'joe,bob', - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('joe', users) - self.assertIn('bob', users) - self.assertEqual({'default': False}, users['joe']) - self.assertEqual({'default': False}, users['bob']) - - def test_users_simple(self): - distro = self._make_distro('ubuntu') - ug_cfg = { - 'users': [ - 'joe', - 'bob' - ], - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('joe', users) - self.assertIn('bob', users) - self.assertEqual({'default': False}, users['joe']) - self.assertEqual({'default': False}, users['bob']) - - def test_users_old_user(self): - distro = self._make_distro('ubuntu', bcfg) - ug_cfg = { - 'user': 'zetta', - 'users': 'default' - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertNotIn('bob', users) # Bob is not the default now, zetta is - self.assertIn('zetta', users) - self.assertTrue(users['zetta']['default']) - self.assertNotIn('default', users) - ug_cfg = { - 'user': 'zetta', - 'users': 'default, joe' - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertNotIn('bob', users) # Bob is not the default now, zetta is - self.assertIn('joe', users) - self.assertIn('zetta', users) - self.assertTrue(users['zetta']['default']) - self.assertNotIn('default', users) - ug_cfg = { - 'user': 'zetta', - 'users': ['bob', 'joe'] - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', users) - self.assertIn('joe', users) - self.assertIn('zetta', users) - self.assertTrue(users['zetta']['default']) - ug_cfg = { - 'user': 'zetta', - 'users': { - 'bob': True, - 'joe': True, - } - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', users) - self.assertIn('joe', users) - self.assertIn('zetta', users) - self.assertTrue(users['zetta']['default']) - ug_cfg = { - 'user': 'zetta', - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('zetta', users) - ug_cfg = {} - (users, groups) = self._norm(ug_cfg, distro) - self.assertEqual({}, users) - self.assertEqual({}, groups) - - def test_users_dict_default_additional(self): - distro = self._make_distro('ubuntu', bcfg) - ug_cfg = { - 'users': [ - {'name': 'default', 'blah': True} - ], - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', users) - self.assertEqual(",".join(distro.get_default_user()['groups']), - users['bob']['groups']) - self.assertEqual(True, users['bob']['blah']) - self.assertEqual(True, users['bob']['default']) - - def test_users_dict_extract(self): - distro = self._make_distro('ubuntu', bcfg) - ug_cfg = { - 'users': [ - 'default', - ], - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', users) - (name, config) = ug_util.extract_default(users) - self.assertEqual(name, 'bob') - expected_config = {} - def_config = None - try: - def_config = distro.get_default_user() - except NotImplementedError: - pass - if not def_config: - def_config = {} - expected_config.update(def_config) - - # Ignore these for now - expected_config.pop('name', None) - expected_config.pop('groups', None) - config.pop('groups', None) - self.assertEqual(config, expected_config) - - def test_users_dict_default(self): - distro = self._make_distro('ubuntu', bcfg) - ug_cfg = { - 'users': [ - 'default', - ], - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', users) - self.assertEqual(",".join(distro.get_default_user()['groups']), - users['bob']['groups']) - self.assertEqual(True, users['bob']['default']) - - def test_users_dict_trans(self): - distro = self._make_distro('ubuntu') - ug_cfg = { - 'users': [ - {'name': 'joe', - 'tr-me': True}, - {'name': 'bob'}, - ], - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('joe', users) - self.assertIn('bob', users) - self.assertEqual({'tr_me': True, 'default': False}, users['joe']) - self.assertEqual({'default': False}, users['bob']) - - def test_users_dict(self): - distro = self._make_distro('ubuntu') - ug_cfg = { - 'users': [ - {'name': 'joe'}, - {'name': 'bob'}, - ], - } - (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('joe', users) - self.assertIn('bob', users) - self.assertEqual({'default': False}, users['joe']) - self.assertEqual({'default': False}, users['bob']) - - @mock.patch('cloudinit.subp.subp') - def test_create_snap_user(self, mock_subp): - mock_subp.side_effect = [('{"username": "joe", "ssh-key-count": 1}\n', - '')] - distro = self._make_distro('ubuntu') - ug_cfg = { - 'users': [ - {'name': 'joe', 'snapuser': 'joe@joe.com'}, - ], - } - (users, _groups) = self._norm(ug_cfg, distro) - for (user, config) in users.items(): - print('user=%s config=%s' % (user, config)) - username = distro.create_user(user, **config) - - snapcmd = ['snap', 'create-user', '--sudoer', '--json', 'joe@joe.com'] - mock_subp.assert_called_with(snapcmd, capture=True, logstring=snapcmd) - self.assertEqual(username, 'joe') - - @mock.patch('cloudinit.subp.subp') - def test_create_snap_user_known(self, mock_subp): - mock_subp.side_effect = [('{"username": "joe", "ssh-key-count": 1}\n', - '')] - distro = self._make_distro('ubuntu') - ug_cfg = { - 'users': [ - {'name': 'joe', 'snapuser': 'joe@joe.com', 'known': True}, - ], - } - (users, _groups) = self._norm(ug_cfg, distro) - for (user, config) in users.items(): - print('user=%s config=%s' % (user, config)) - username = distro.create_user(user, **config) - - snapcmd = ['snap', 'create-user', '--sudoer', '--json', '--known', - 'joe@joe.com'] - mock_subp.assert_called_with(snapcmd, capture=True, logstring=snapcmd) - self.assertEqual(username, 'joe') - - @mock.patch('cloudinit.util.system_is_snappy') - @mock.patch('cloudinit.util.is_group') - @mock.patch('cloudinit.subp.subp') - def test_add_user_on_snappy_system(self, mock_subp, mock_isgrp, - mock_snappy): - mock_isgrp.return_value = False - mock_subp.return_value = True - mock_snappy.return_value = True - distro = self._make_distro('ubuntu') - ug_cfg = { - 'users': [ - {'name': 'joe', 'groups': 'users', 'create_groups': True}, - ], - } - (users, _groups) = self._norm(ug_cfg, distro) - for (user, config) in users.items(): - print('user=%s config=%s' % (user, config)) - distro.add_user(user, **config) - - groupcmd = ['groupadd', 'users', '--extrausers'] - addcmd = ['useradd', 'joe', '--extrausers', '--groups', 'users', '-m'] - - mock_subp.assert_any_call(groupcmd) - mock_subp.assert_any_call(addcmd, logstring=addcmd) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_dmi.py b/tests/unittests/test_dmi.py new file mode 100644 index 00000000..674e7b98 --- /dev/null +++ b/tests/unittests/test_dmi.py @@ -0,0 +1,154 @@ +from tests.unittests import helpers +from cloudinit import dmi +from cloudinit import util +from cloudinit import subp + +import os +import tempfile +import shutil +from unittest import mock + + +class TestReadDMIData(helpers.FilesystemMockingTestCase): + + def setUp(self): + super(TestReadDMIData, self).setUp() + self.new_root = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, self.new_root) + self.reRoot(self.new_root) + p = mock.patch("cloudinit.dmi.is_container", return_value=False) + self.addCleanup(p.stop) + self._m_is_container = p.start() + p = mock.patch("cloudinit.dmi.is_FreeBSD", return_value=False) + self.addCleanup(p.stop) + self._m_is_FreeBSD = p.start() + + def _create_sysfs_parent_directory(self): + util.ensure_dir(os.path.join('sys', 'class', 'dmi', 'id')) + + def _create_sysfs_file(self, key, content): + """Mocks the sys path found on Linux systems.""" + self._create_sysfs_parent_directory() + dmi_key = "/sys/class/dmi/id/{0}".format(key) + util.write_file(dmi_key, content) + + def _configure_dmidecode_return(self, key, content, error=None): + """ + In order to test a missing sys path and call outs to dmidecode, this + function fakes the results of dmidecode to test the results. + """ + def _dmidecode_subp(cmd): + if cmd[-1] != key: + raise subp.ProcessExecutionError() + return (content, error) + + self.patched_funcs.enter_context( + mock.patch("cloudinit.dmi.subp.which", side_effect=lambda _: True)) + self.patched_funcs.enter_context( + mock.patch("cloudinit.dmi.subp.subp", side_effect=_dmidecode_subp)) + + def _configure_kenv_return(self, key, content, error=None): + """ + In order to test a FreeBSD system call outs to kenv, this + function fakes the results of kenv to test the results. + """ + def _kenv_subp(cmd): + if cmd[-1] != dmi.DMIDECODE_TO_KERNEL[key].freebsd: + raise subp.ProcessExecutionError() + return (content, error) + + self.patched_funcs.enter_context( + mock.patch("cloudinit.dmi.subp.subp", side_effect=_kenv_subp)) + + def patch_mapping(self, new_mapping): + self.patched_funcs.enter_context( + mock.patch('cloudinit.dmi.DMIDECODE_TO_KERNEL', + new_mapping)) + + def test_sysfs_used_with_key_in_mapping_and_file_on_disk(self): + self.patch_mapping({'mapped-key': dmi.kdmi('mapped-value', None)}) + expected_dmi_value = 'sys-used-correctly' + self._create_sysfs_file('mapped-value', expected_dmi_value) + self._configure_dmidecode_return('mapped-key', 'wrong-wrong-wrong') + self.assertEqual(expected_dmi_value, dmi.read_dmi_data('mapped-key')) + + def test_dmidecode_used_if_no_sysfs_file_on_disk(self): + self.patch_mapping({}) + self._create_sysfs_parent_directory() + expected_dmi_value = 'dmidecode-used' + self._configure_dmidecode_return('use-dmidecode', expected_dmi_value) + with mock.patch("cloudinit.util.os.uname") as m_uname: + m_uname.return_value = ('x-sysname', 'x-nodename', + 'x-release', 'x-version', 'x86_64') + self.assertEqual(expected_dmi_value, + dmi.read_dmi_data('use-dmidecode')) + + def test_dmidecode_not_used_on_arm(self): + self.patch_mapping({}) + print("current =%s", subp) + self._create_sysfs_parent_directory() + dmi_val = 'from-dmidecode' + dmi_name = 'use-dmidecode' + self._configure_dmidecode_return(dmi_name, dmi_val) + print("now =%s", subp) + + expected = {'armel': None, 'aarch64': dmi_val, 'x86_64': dmi_val} + found = {} + # we do not run the 'dmi-decode' binary on some arches + # verify that anything requested that is not in the sysfs dir + # will return None on those arches. + with mock.patch("cloudinit.util.os.uname") as m_uname: + for arch in expected: + m_uname.return_value = ('x-sysname', 'x-nodename', + 'x-release', 'x-version', arch) + print("now2 =%s", subp) + found[arch] = dmi.read_dmi_data(dmi_name) + self.assertEqual(expected, found) + + def test_none_returned_if_neither_source_has_data(self): + self.patch_mapping({}) + self._configure_dmidecode_return('key', 'value') + self.assertIsNone(dmi.read_dmi_data('expect-fail')) + + def test_none_returned_if_dmidecode_not_in_path(self): + self.patched_funcs.enter_context( + mock.patch.object(subp, 'which', lambda _: False)) + self.patch_mapping({}) + self.assertIsNone(dmi.read_dmi_data('expect-fail')) + + def test_empty_string_returned_instead_of_foxfox(self): + # uninitialized dmi values show as \xff, return empty string + my_len = 32 + dmi_value = b'\xff' * my_len + b'\n' + expected = "" + dmi_key = 'system-product-name' + sysfs_key = 'product_name' + self._create_sysfs_file(sysfs_key, dmi_value) + self.assertEqual(expected, dmi.read_dmi_data(dmi_key)) + + def test_container_returns_none(self): + """In a container read_dmi_data should always return None.""" + + # first verify we get the value if not in container + self._m_is_container.return_value = False + key, val = ("system-product-name", "my_product") + self._create_sysfs_file('product_name', val) + self.assertEqual(val, dmi.read_dmi_data(key)) + + # then verify in container returns None + self._m_is_container.return_value = True + self.assertIsNone(dmi.read_dmi_data(key)) + + def test_container_returns_none_on_unknown(self): + """In a container even bogus keys return None.""" + self._m_is_container.return_value = True + self._create_sysfs_file('product_name', "should-be-ignored") + self.assertIsNone(dmi.read_dmi_data("bogus")) + self.assertIsNone(dmi.read_dmi_data("system-product-name")) + + def test_freebsd_uses_kenv(self): + """On a FreeBSD system, kenv is called.""" + self._m_is_FreeBSD.return_value = True + key, val = ("system-product-name", "my_product") + self._configure_kenv_return(key, val) + self.assertEqual(dmi.read_dmi_data(key), val) diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 43603ea5..62c3e403 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -8,7 +8,7 @@ from uuid import uuid4 from cloudinit import safeyaml from cloudinit import subp from cloudinit import util -from cloudinit.tests.helpers import ( +from tests.unittests.helpers import ( CiTestCase, dir2dict, populate_dir, populate_dir_with_ts) from cloudinit.sources import DataSourceIBMCloud as ds_ibm diff --git a/tests/unittests/test_ec2_util.py b/tests/unittests/test_ec2_util.py index 3f50f57d..e8e0b5b1 100644 --- a/tests/unittests/test_ec2_util.py +++ b/tests/unittests/test_ec2_util.py @@ -2,7 +2,7 @@ import httpretty as hp -from cloudinit.tests import helpers +from tests.unittests import helpers from cloudinit import ec2_utils as eu from cloudinit import url_helper as uh diff --git a/tests/unittests/test_event.py b/tests/unittests/test_event.py new file mode 100644 index 00000000..3da4c70c --- /dev/null +++ b/tests/unittests/test_event.py @@ -0,0 +1,26 @@ +# This file is part of cloud-init. See LICENSE file for license information. +"""Tests related to cloudinit.event module.""" +from cloudinit.event import EventType, EventScope, userdata_to_events + + +class TestEvent: + def test_userdata_to_events(self): + userdata = {'network': {'when': ['boot']}} + expected = {EventScope.NETWORK: {EventType.BOOT}} + assert expected == userdata_to_events(userdata) + + def test_invalid_scope(self, caplog): + userdata = {'networkasdfasdf': {'when': ['boot']}} + userdata_to_events(userdata) + assert ( + "'networkasdfasdf' is not a valid EventScope! Update data " + "will be ignored for 'networkasdfasdf' scope" + ) in caplog.text + + def test_invalid_event(self, caplog): + userdata = {'network': {'when': ['bootasdfasdf']}} + userdata_to_events(userdata) + assert ( + "'bootasdfasdf' is not a valid EventType! Update data " + "will be ignored for 'network' scope" + ) in caplog.text diff --git a/tests/unittests/test_features.py b/tests/unittests/test_features.py new file mode 100644 index 00000000..d7a7226d --- /dev/null +++ b/tests/unittests/test_features.py @@ -0,0 +1,60 @@ +# This file is part of cloud-init. See LICENSE file for license information. +# pylint: disable=no-member,no-name-in-module +""" +This file is for testing the feature flag functionality itself, +NOT for testing any individual feature flag +""" +import pytest +import sys +from pathlib import Path + +import cloudinit + + +@pytest.yield_fixture() +def create_override(request): + """ + Create a feature overrides file and do some module wizardry to make + it seem like we're importing the features file for the first time. + + After creating the override file with the values passed by the test, + we need to reload cloudinit.features + to get all of the current features (including the overridden ones). + Once the test is complete, we remove the file we created and set + features and feature_overrides modules to how they were before + the test started + """ + override_path = Path(cloudinit.__file__).parent / 'feature_overrides.py' + if override_path.exists(): + raise Exception("feature_overrides.py unexpectedly exists! " + "Remove it to run this test.") + with override_path.open('w') as f: + for key, value in request.param.items(): + f.write('{} = {}\n'.format(key, value)) + + sys.modules.pop('cloudinit.features', None) + + yield + + override_path.unlink() + sys.modules.pop('cloudinit.feature_overrides', None) + + +class TestFeatures: + def test_feature_without_override(self): + from cloudinit.features import ERROR_ON_USER_DATA_FAILURE + assert ERROR_ON_USER_DATA_FAILURE is True + + @pytest.mark.parametrize('create_override', + [{'ERROR_ON_USER_DATA_FAILURE': False}], + indirect=True) + def test_feature_with_override(self, create_override): + from cloudinit.features import ERROR_ON_USER_DATA_FAILURE + assert ERROR_ON_USER_DATA_FAILURE is False + + @pytest.mark.parametrize('create_override', + [{'SPAM': True}], + indirect=True) + def test_feature_only_in_override(self, create_override): + from cloudinit.features import SPAM + assert SPAM is True diff --git a/tests/unittests/test_filters/__init__.py b/tests/unittests/test_filters/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/unittests/test_filters/test_launch_index.py b/tests/unittests/test_filters/test_launch_index.py deleted file mode 100644 index 1492361e..00000000 --- a/tests/unittests/test_filters/test_launch_index.py +++ /dev/null @@ -1,135 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import copy -from itertools import filterfalse - -from cloudinit.tests import helpers - -from cloudinit.filters import launch_index -from cloudinit import user_data as ud -from cloudinit import util - - -def count_messages(root): - am = 0 - for m in root.walk(): - if ud.is_skippable(m): - continue - am += 1 - return am - - -class TestLaunchFilter(helpers.ResourceUsingTestCase): - - def assertCounts(self, message, expected_counts): - orig_message = copy.deepcopy(message) - for (index, count) in expected_counts.items(): - index = util.safe_int(index) - filtered_message = launch_index.Filter(index).apply(message) - self.assertEqual(count_messages(filtered_message), count) - # Ensure original message still ok/not modified - self.assertTrue(self.equivalentMessage(message, orig_message)) - - def equivalentMessage(self, msg1, msg2): - msg1_count = count_messages(msg1) - msg2_count = count_messages(msg2) - if msg1_count != msg2_count: - return False - # Do some basic payload checking - msg1_msgs = [m for m in msg1.walk()] - msg1_msgs = [m for m in filterfalse(ud.is_skippable, msg1_msgs)] - msg2_msgs = [m for m in msg2.walk()] - msg2_msgs = [m for m in filterfalse(ud.is_skippable, msg2_msgs)] - for i in range(0, len(msg2_msgs)): - m1_msg = msg1_msgs[i] - m2_msg = msg2_msgs[i] - if m1_msg.get_charset() != m2_msg.get_charset(): - return False - if m1_msg.is_multipart() != m2_msg.is_multipart(): - return False - m1_py = m1_msg.get_payload(decode=True) - m2_py = m2_msg.get_payload(decode=True) - if m1_py != m2_py: - return False - return True - - def testMultiEmailIndex(self): - test_data = helpers.readResource('filter_cloud_multipart_2.email') - ud_proc = ud.UserDataProcessor(self.getCloudPaths()) - message = ud_proc.process(test_data) - self.assertTrue(count_messages(message) > 0) - # This file should have the following - # indexes -> amount mapping in it - expected_counts = { - 3: 1, - 2: 2, - None: 3, - -1: 0, - } - self.assertCounts(message, expected_counts) - - def testHeaderEmailIndex(self): - test_data = helpers.readResource('filter_cloud_multipart_header.email') - ud_proc = ud.UserDataProcessor(self.getCloudPaths()) - message = ud_proc.process(test_data) - self.assertTrue(count_messages(message) > 0) - # This file should have the following - # indexes -> amount mapping in it - expected_counts = { - 5: 1, - -1: 0, - 'c': 1, - None: 1, - } - self.assertCounts(message, expected_counts) - - def testConfigEmailIndex(self): - test_data = helpers.readResource('filter_cloud_multipart_1.email') - ud_proc = ud.UserDataProcessor(self.getCloudPaths()) - message = ud_proc.process(test_data) - self.assertTrue(count_messages(message) > 0) - # This file should have the following - # indexes -> amount mapping in it - expected_counts = { - 2: 1, - -1: 0, - None: 1, - } - self.assertCounts(message, expected_counts) - - def testNoneIndex(self): - test_data = helpers.readResource('filter_cloud_multipart.yaml') - ud_proc = ud.UserDataProcessor(self.getCloudPaths()) - message = ud_proc.process(test_data) - start_count = count_messages(message) - self.assertTrue(start_count > 0) - filtered_message = launch_index.Filter(None).apply(message) - self.assertTrue(self.equivalentMessage(message, filtered_message)) - - def testIndexes(self): - test_data = helpers.readResource('filter_cloud_multipart.yaml') - ud_proc = ud.UserDataProcessor(self.getCloudPaths()) - message = ud_proc.process(test_data) - start_count = count_messages(message) - self.assertTrue(start_count > 0) - # This file should have the following - # indexes -> amount mapping in it - expected_counts = { - 2: 2, - 3: 2, - 1: 2, - 0: 1, - 4: 1, - 7: 0, - -1: 0, - 100: 0, - # None should just give all back - None: start_count, - # Non ints should be ignored - 'c': start_count, - # Strings should be converted - '1': 2, - } - self.assertCounts(message, expected_counts) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_gpg.py b/tests/unittests/test_gpg.py index 451ffa91..ceada49a 100644 --- a/tests/unittests/test_gpg.py +++ b/tests/unittests/test_gpg.py @@ -4,6 +4,8 @@ from unittest import mock from cloudinit import gpg from cloudinit import subp +from tests.unittests.helpers import CiTestCase + TEST_KEY_HUMAN = ''' /etc/apt/cloud-init.gpg.d/my_key.gpg -------------------------------------------- @@ -79,3 +81,50 @@ class TestGPGCommands: test_call = mock.call( ["gpg", "--dearmor"], data='key', decode=False) assert test_call == m_subp.call_args + + @mock.patch("cloudinit.gpg.time.sleep") + @mock.patch("cloudinit.gpg.subp.subp") + class TestReceiveKeys(CiTestCase): + """Test the recv_key method.""" + + def test_retries_on_subp_exc(self, m_subp, m_sleep): + """retry should be done on gpg receive keys failure.""" + retries = (1, 2, 4) + my_exc = subp.ProcessExecutionError( + stdout='', stderr='', exit_code=2, cmd=['mycmd']) + m_subp.side_effect = (my_exc, my_exc, ('', '')) + gpg.recv_key("ABCD", "keyserver.example.com", retries=retries) + self.assertEqual( + [mock.call(1), mock.call(2)], m_sleep.call_args_list) + + def test_raises_error_after_retries(self, m_subp, m_sleep): + """If the final run fails, error should be raised.""" + naplen = 1 + keyid, keyserver = ("ABCD", "keyserver.example.com") + m_subp.side_effect = subp.ProcessExecutionError( + stdout='', stderr='', exit_code=2, cmd=['mycmd']) + with self.assertRaises(ValueError) as rcm: + gpg.recv_key(keyid, keyserver, retries=(naplen,)) + self.assertIn(keyid, str(rcm.exception)) + self.assertIn(keyserver, str(rcm.exception)) + m_sleep.assert_called_with(naplen) + + def test_no_retries_on_none(self, m_subp, m_sleep): + """retry should not be done if retries is None.""" + m_subp.side_effect = subp.ProcessExecutionError( + stdout='', stderr='', exit_code=2, cmd=['mycmd']) + with self.assertRaises(ValueError): + gpg.recv_key("ABCD", "keyserver.example.com", retries=None) + m_sleep.assert_not_called() + + def test_expected_gpg_command(self, m_subp, m_sleep): + """Verify gpg is called with expected args.""" + key, keyserver = ("DEADBEEF", "keyserver.example.com") + retries = (1, 2, 4) + m_subp.return_value = ('', '') + gpg.recv_key(key, keyserver, retries=retries) + m_subp.assert_called_once_with( + ['gpg', '--no-tty', + '--keyserver=%s' % keyserver, '--recv-keys', key], + capture=True) + m_sleep.assert_not_called() diff --git a/tests/unittests/test_handler/__init__.py b/tests/unittests/test_handler/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/unittests/test_handler/test_handler_apk_configure.py b/tests/unittests/test_handler/test_handler_apk_configure.py deleted file mode 100644 index 8acc0b33..00000000 --- a/tests/unittests/test_handler/test_handler_apk_configure.py +++ /dev/null @@ -1,299 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -""" test_apk_configure -Test creation of repositories file -""" - -import logging -import os -import textwrap - -from cloudinit import (cloud, helpers, util) - -from cloudinit.config import cc_apk_configure -from cloudinit.tests.helpers import (FilesystemMockingTestCase, mock) - -REPO_FILE = "/etc/apk/repositories" -DEFAULT_MIRROR_URL = "https://alpine.global.ssl.fastly.net/alpine" -CC_APK = 'cloudinit.config.cc_apk_configure' - - -class TestNoConfig(FilesystemMockingTestCase): - def setUp(self): - super(TestNoConfig, self).setUp() - self.add_patch(CC_APK + '._write_repositories_file', 'm_write_repos') - self.name = "apk-configure" - self.cloud_init = None - self.log = logging.getLogger("TestNoConfig") - self.args = [] - - def test_no_config(self): - """ - Test that nothing is done if no apk-configure - configuration is provided. - """ - config = util.get_builtin_cfg() - - cc_apk_configure.handle(self.name, config, self.cloud_init, - self.log, self.args) - - self.assertEqual(0, self.m_write_repos.call_count) - - -class TestConfig(FilesystemMockingTestCase): - def setUp(self): - super(TestConfig, self).setUp() - self.new_root = self.tmp_dir() - self.new_root = self.reRoot(root=self.new_root) - for dirname in ['tmp', 'etc/apk']: - util.ensure_dir(os.path.join(self.new_root, dirname)) - self.paths = helpers.Paths({'templates_dir': self.new_root}) - self.name = "apk-configure" - self.cloud = cloud.Cloud(None, self.paths, None, None, None) - self.log = logging.getLogger("TestNoConfig") - self.args = [] - - @mock.patch(CC_APK + '._write_repositories_file') - def test_no_repo_settings(self, m_write_repos): - """ - Test that nothing is written if the 'alpine-repo' key - is not present. - """ - config = {"apk_repos": {}} - - cc_apk_configure.handle(self.name, config, self.cloud, self.log, - self.args) - - self.assertEqual(0, m_write_repos.call_count) - - @mock.patch(CC_APK + '._write_repositories_file') - def test_empty_repo_settings(self, m_write_repos): - """ - Test that nothing is written if 'alpine_repo' list is empty. - """ - config = {"apk_repos": {"alpine_repo": []}} - - cc_apk_configure.handle(self.name, config, self.cloud, self.log, - self.args) - - self.assertEqual(0, m_write_repos.call_count) - - def test_only_main_repo(self): - """ - Test when only details of main repo is written to file. - """ - alpine_version = 'v3.12' - config = { - "apk_repos": { - "alpine_repo": { - "version": alpine_version - } - } - } - - cc_apk_configure.handle(self.name, config, self.cloud, self.log, - self.args) - - expected_content = textwrap.dedent("""\ - # - # Created by cloud-init - # - # This file is written on first boot of an instance - # - - {0}/{1}/main - - """.format(DEFAULT_MIRROR_URL, alpine_version)) - - self.assertEqual(expected_content, util.load_file(REPO_FILE)) - - def test_main_and_community_repos(self): - """ - Test when only details of main and community repos are - written to file. - """ - alpine_version = 'edge' - config = { - "apk_repos": { - "alpine_repo": { - "version": alpine_version, - "community_enabled": True - } - } - } - - cc_apk_configure.handle(self.name, config, self.cloud, self.log, - self.args) - - expected_content = textwrap.dedent("""\ - # - # Created by cloud-init - # - # This file is written on first boot of an instance - # - - {0}/{1}/main - {0}/{1}/community - - """.format(DEFAULT_MIRROR_URL, alpine_version)) - - self.assertEqual(expected_content, util.load_file(REPO_FILE)) - - def test_main_community_testing_repos(self): - """ - Test when details of main, community and testing repos - are written to file. - """ - alpine_version = 'v3.12' - config = { - "apk_repos": { - "alpine_repo": { - "version": alpine_version, - "community_enabled": True, - "testing_enabled": True - } - } - } - - cc_apk_configure.handle(self.name, config, self.cloud, self.log, - self.args) - - expected_content = textwrap.dedent("""\ - # - # Created by cloud-init - # - # This file is written on first boot of an instance - # - - {0}/{1}/main - {0}/{1}/community - # - # Testing - using with non-Edge installation may cause problems! - # - {0}/edge/testing - - """.format(DEFAULT_MIRROR_URL, alpine_version)) - - self.assertEqual(expected_content, util.load_file(REPO_FILE)) - - def test_edge_main_community_testing_repos(self): - """ - Test when details of main, community and testing repos - for Edge version of Alpine are written to file. - """ - alpine_version = 'edge' - config = { - "apk_repos": { - "alpine_repo": { - "version": alpine_version, - "community_enabled": True, - "testing_enabled": True - } - } - } - - cc_apk_configure.handle(self.name, config, self.cloud, self.log, - self.args) - - expected_content = textwrap.dedent("""\ - # - # Created by cloud-init - # - # This file is written on first boot of an instance - # - - {0}/{1}/main - {0}/{1}/community - {0}/{1}/testing - - """.format(DEFAULT_MIRROR_URL, alpine_version)) - - self.assertEqual(expected_content, util.load_file(REPO_FILE)) - - def test_main_community_testing_local_repos(self): - """ - Test when details of main, community, testing and - local repos are written to file. - """ - alpine_version = 'v3.12' - local_repo_url = 'http://some.mirror/whereever' - config = { - "apk_repos": { - "alpine_repo": { - "version": alpine_version, - "community_enabled": True, - "testing_enabled": True - }, - "local_repo_base_url": local_repo_url - } - } - - cc_apk_configure.handle(self.name, config, self.cloud, self.log, - self.args) - - expected_content = textwrap.dedent("""\ - # - # Created by cloud-init - # - # This file is written on first boot of an instance - # - - {0}/{1}/main - {0}/{1}/community - # - # Testing - using with non-Edge installation may cause problems! - # - {0}/edge/testing - - # - # Local repo - # - {2}/{1} - - """.format(DEFAULT_MIRROR_URL, alpine_version, local_repo_url)) - - self.assertEqual(expected_content, util.load_file(REPO_FILE)) - - def test_edge_main_community_testing_local_repos(self): - """ - Test when details of main, community, testing and local repos - for Edge version of Alpine are written to file. - """ - alpine_version = 'edge' - local_repo_url = 'http://some.mirror/whereever' - config = { - "apk_repos": { - "alpine_repo": { - "version": alpine_version, - "community_enabled": True, - "testing_enabled": True - }, - "local_repo_base_url": local_repo_url - } - } - - cc_apk_configure.handle(self.name, config, self.cloud, self.log, - self.args) - - expected_content = textwrap.dedent("""\ - # - # Created by cloud-init - # - # This file is written on first boot of an instance - # - - {0}/{1}/main - {0}/{1}/community - {0}/edge/testing - - # - # Local repo - # - {2}/{1} - - """.format(DEFAULT_MIRROR_URL, alpine_version, local_repo_url)) - - self.assertEqual(expected_content, util.load_file(REPO_FILE)) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_apt_conf_v1.py b/tests/unittests/test_handler/test_handler_apt_conf_v1.py deleted file mode 100644 index 6a4b03ee..00000000 --- a/tests/unittests/test_handler/test_handler_apt_conf_v1.py +++ /dev/null @@ -1,129 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.config import cc_apt_configure -from cloudinit import util - -from cloudinit.tests.helpers import TestCase - -import copy -import os -import re -import shutil -import tempfile - - -class TestAptProxyConfig(TestCase): - def setUp(self): - super(TestAptProxyConfig, self).setUp() - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - self.pfile = os.path.join(self.tmp, "proxy.cfg") - self.cfile = os.path.join(self.tmp, "config.cfg") - - def _search_apt_config(self, contents, ptype, value): - return re.search( - r"acquire::%s::proxy\s+[\"']%s[\"'];\n" % (ptype, value), - contents, flags=re.IGNORECASE) - - def test_apt_proxy_written(self): - cfg = {'proxy': 'myproxy'} - cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) - - self.assertTrue(os.path.isfile(self.pfile)) - self.assertFalse(os.path.isfile(self.cfile)) - - contents = util.load_file(self.pfile) - self.assertTrue(self._search_apt_config(contents, "http", "myproxy")) - - def test_apt_http_proxy_written(self): - cfg = {'http_proxy': 'myproxy'} - cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) - - self.assertTrue(os.path.isfile(self.pfile)) - self.assertFalse(os.path.isfile(self.cfile)) - - contents = util.load_file(self.pfile) - self.assertTrue(self._search_apt_config(contents, "http", "myproxy")) - - def test_apt_all_proxy_written(self): - cfg = {'http_proxy': 'myproxy_http_proxy', - 'https_proxy': 'myproxy_https_proxy', - 'ftp_proxy': 'myproxy_ftp_proxy'} - - values = {'http': cfg['http_proxy'], - 'https': cfg['https_proxy'], - 'ftp': cfg['ftp_proxy'], - } - - cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) - - self.assertTrue(os.path.isfile(self.pfile)) - self.assertFalse(os.path.isfile(self.cfile)) - - contents = util.load_file(self.pfile) - - for ptype, pval in values.items(): - self.assertTrue(self._search_apt_config(contents, ptype, pval)) - - def test_proxy_deleted(self): - util.write_file(self.cfile, "content doesnt matter") - cc_apt_configure.apply_apt_config({}, self.pfile, self.cfile) - self.assertFalse(os.path.isfile(self.pfile)) - self.assertFalse(os.path.isfile(self.cfile)) - - def test_proxy_replaced(self): - util.write_file(self.cfile, "content doesnt matter") - cc_apt_configure.apply_apt_config({'proxy': "foo"}, - self.pfile, self.cfile) - self.assertTrue(os.path.isfile(self.pfile)) - contents = util.load_file(self.pfile) - self.assertTrue(self._search_apt_config(contents, "http", "foo")) - - def test_config_written(self): - payload = 'this is my apt config' - cfg = {'conf': payload} - - cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile) - - self.assertTrue(os.path.isfile(self.cfile)) - self.assertFalse(os.path.isfile(self.pfile)) - - self.assertEqual(util.load_file(self.cfile), payload) - - def test_config_replaced(self): - util.write_file(self.pfile, "content doesnt matter") - cc_apt_configure.apply_apt_config({'conf': "foo"}, - self.pfile, self.cfile) - self.assertTrue(os.path.isfile(self.cfile)) - self.assertEqual(util.load_file(self.cfile), "foo") - - def test_config_deleted(self): - # if no 'conf' is provided, delete any previously written file - util.write_file(self.pfile, "content doesnt matter") - cc_apt_configure.apply_apt_config({}, self.pfile, self.cfile) - self.assertFalse(os.path.isfile(self.pfile)) - self.assertFalse(os.path.isfile(self.cfile)) - - -class TestConversion(TestCase): - def test_convert_with_apt_mirror_as_empty_string(self): - # an empty apt_mirror is the same as no apt_mirror - empty_m_found = cc_apt_configure.convert_to_v3_apt_format( - {'apt_mirror': ''}) - default_found = cc_apt_configure.convert_to_v3_apt_format({}) - self.assertEqual(default_found, empty_m_found) - - def test_convert_with_apt_mirror(self): - mirror = 'http://my.mirror/ubuntu' - f = cc_apt_configure.convert_to_v3_apt_format({'apt_mirror': mirror}) - self.assertIn(mirror, set(m['uri'] for m in f['apt']['primary'])) - - def test_no_old_content(self): - mirror = 'http://my.mirror/ubuntu' - mydata = {'apt': {'primary': {'arches': ['default'], 'uri': mirror}}} - expected = copy.deepcopy(mydata) - self.assertEqual(expected, - cc_apt_configure.convert_to_v3_apt_format(mydata)) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py deleted file mode 100644 index d69916f9..00000000 --- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v1.py +++ /dev/null @@ -1,181 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -""" test_handler_apt_configure_sources_list -Test templating of sources list -""" -import logging -import os -import shutil -import tempfile -from unittest import mock - -from cloudinit import templater -from cloudinit import subp -from cloudinit import util - -from cloudinit.config import cc_apt_configure - -from cloudinit.distros.debian import Distro - -from cloudinit.tests import helpers as t_help -from tests.unittests.util import get_cloud - -LOG = logging.getLogger(__name__) - -YAML_TEXT_CUSTOM_SL = """ -apt_mirror: http://archive.ubuntu.com/ubuntu/ -apt_custom_sources_list: | - ## template:jinja - ## Note, this file is written by cloud-init on first boot of an instance - ## modifications made here will not survive a re-bundle. - ## if you wish to make changes you can: - ## a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg - ## or do the same in user-data - ## b.) add sources in /etc/apt/sources.list.d - ## c.) make changes to template file /etc/cloud/templates/sources.list.tmpl - - # See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to - # newer versions of the distribution. - deb {{mirror}} {{codename}} main restricted - deb-src {{mirror}} {{codename}} main restricted - # FIND_SOMETHING_SPECIAL -""" - -EXPECTED_CONVERTED_CONTENT = ( - """## Note, this file is written by cloud-init on first boot of an instance -## modifications made here will not survive a re-bundle. -## if you wish to make changes you can: -## a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg -## or do the same in user-data -## b.) add sources in /etc/apt/sources.list.d -## c.) make changes to template file /etc/cloud/templates/sources.list.tmpl - -# See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to -# newer versions of the distribution. -deb http://archive.ubuntu.com/ubuntu/ fakerelease main restricted -deb-src http://archive.ubuntu.com/ubuntu/ fakerelease main restricted -# FIND_SOMETHING_SPECIAL -""") - - -class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): - """TestAptSourceConfigSourceList - Main Class to test sources list rendering - """ - def setUp(self): - super(TestAptSourceConfigSourceList, self).setUp() - self.subp = subp.subp - self.new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.new_root) - - rpatcher = mock.patch("cloudinit.util.lsb_release") - get_rel = rpatcher.start() - get_rel.return_value = {'codename': "fakerelease"} - self.addCleanup(rpatcher.stop) - apatcher = mock.patch("cloudinit.util.get_dpkg_architecture") - get_arch = apatcher.start() - get_arch.return_value = 'amd64' - self.addCleanup(apatcher.stop) - - def apt_source_list(self, distro, mirror, mirrorcheck=None): - """apt_source_list - Test rendering of a source.list from template for a given distro - """ - if mirrorcheck is None: - mirrorcheck = mirror - - if isinstance(mirror, list): - cfg = {'apt_mirror_search': mirror} - else: - cfg = {'apt_mirror': mirror} - - mycloud = get_cloud(distro) - - with mock.patch.object(util, 'write_file') as mockwf: - with mock.patch.object(util, 'load_file', - return_value="faketmpl") as mocklf: - with mock.patch.object(os.path, 'isfile', - return_value=True) as mockisfile: - with mock.patch.object( - templater, 'render_string', - return_value='fake') as mockrnd: - with mock.patch.object(util, 'rename'): - cc_apt_configure.handle("test", cfg, mycloud, - LOG, None) - - mockisfile.assert_any_call( - ('/etc/cloud/templates/sources.list.%s.tmpl' % distro)) - mocklf.assert_any_call( - ('/etc/cloud/templates/sources.list.%s.tmpl' % distro)) - mockrnd.assert_called_once_with('faketmpl', - {'RELEASE': 'fakerelease', - 'PRIMARY': mirrorcheck, - 'MIRROR': mirrorcheck, - 'SECURITY': mirrorcheck, - 'codename': 'fakerelease', - 'primary': mirrorcheck, - 'mirror': mirrorcheck, - 'security': mirrorcheck}) - mockwf.assert_called_once_with('/etc/apt/sources.list', 'fake', - mode=0o644) - - def test_apt_v1_source_list_debian(self): - """Test rendering of a source.list from template for debian""" - self.apt_source_list('debian', 'http://httpredir.debian.org/debian') - - def test_apt_v1_source_list_ubuntu(self): - """Test rendering of a source.list from template for ubuntu""" - self.apt_source_list('ubuntu', 'http://archive.ubuntu.com/ubuntu/') - - @staticmethod - def myresolve(name): - """Fake util.is_resolvable for mirrorfail tests""" - if name == "does.not.exist": - print("Faking FAIL for '%s'" % name) - return False - else: - print("Faking SUCCESS for '%s'" % name) - return True - - def test_apt_v1_srcl_debian_mirrorfail(self): - """Test rendering of a source.list from template for debian""" - with mock.patch.object(util, 'is_resolvable', - side_effect=self.myresolve) as mockresolve: - self.apt_source_list('debian', - ['http://does.not.exist', - 'http://httpredir.debian.org/debian'], - 'http://httpredir.debian.org/debian') - mockresolve.assert_any_call("does.not.exist") - mockresolve.assert_any_call("httpredir.debian.org") - - def test_apt_v1_srcl_ubuntu_mirrorfail(self): - """Test rendering of a source.list from template for ubuntu""" - with mock.patch.object(util, 'is_resolvable', - side_effect=self.myresolve) as mockresolve: - self.apt_source_list('ubuntu', - ['http://does.not.exist', - 'http://archive.ubuntu.com/ubuntu/'], - 'http://archive.ubuntu.com/ubuntu/') - mockresolve.assert_any_call("does.not.exist") - mockresolve.assert_any_call("archive.ubuntu.com") - - def test_apt_v1_srcl_custom(self): - """Test rendering from a custom source.list template""" - cfg = util.load_yaml(YAML_TEXT_CUSTOM_SL) - mycloud = get_cloud() - - # the second mock restores the original subp - with mock.patch.object(util, 'write_file') as mockwrite: - with mock.patch.object(subp, 'subp', self.subp): - with mock.patch.object(Distro, 'get_primary_arch', - return_value='amd64'): - cc_apt_configure.handle("notimportant", cfg, mycloud, - LOG, None) - - mockwrite.assert_called_once_with( - '/etc/apt/sources.list', - EXPECTED_CONVERTED_CONTENT, - mode=420) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py b/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py deleted file mode 100644 index cd6f9239..00000000 --- a/tests/unittests/test_handler/test_handler_apt_configure_sources_list_v3.py +++ /dev/null @@ -1,226 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -""" test_apt_custom_sources_list -Test templating of custom sources list -""" -from contextlib import ExitStack -import logging -import os -import shutil -import tempfile -from unittest import mock -from unittest.mock import call - -from cloudinit import subp -from cloudinit import util -from cloudinit.config import cc_apt_configure -from cloudinit.distros.debian import Distro -from cloudinit.tests import helpers as t_help - -from tests.unittests.util import get_cloud - -LOG = logging.getLogger(__name__) - -TARGET = "/" - -# Input and expected output for the custom template -YAML_TEXT_CUSTOM_SL = """ -apt: - primary: - - arches: [default] - uri: http://test.ubuntu.com/ubuntu/ - security: - - arches: [default] - uri: http://testsec.ubuntu.com/ubuntu/ - sources_list: | - - # Note, this file is written by cloud-init at install time. It should not - # end up on the installed system itself. - # See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to - # newer versions of the distribution. - deb $MIRROR $RELEASE main restricted - deb-src $MIRROR $RELEASE main restricted - deb $PRIMARY $RELEASE universe restricted - deb $SECURITY $RELEASE-security multiverse - # FIND_SOMETHING_SPECIAL -""" - -EXPECTED_CONVERTED_CONTENT = """ -# Note, this file is written by cloud-init at install time. It should not -# end up on the installed system itself. -# See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to -# newer versions of the distribution. -deb http://test.ubuntu.com/ubuntu/ fakerel main restricted -deb-src http://test.ubuntu.com/ubuntu/ fakerel main restricted -deb http://test.ubuntu.com/ubuntu/ fakerel universe restricted -deb http://testsec.ubuntu.com/ubuntu/ fakerel-security multiverse -# FIND_SOMETHING_SPECIAL -""" - -# mocked to be independent to the unittest system -MOCKED_APT_SRC_LIST = """ -deb http://test.ubuntu.com/ubuntu/ notouched main restricted -deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted -deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted -deb http://testsec.ubuntu.com/ubuntu/ notouched-security main restricted -""" - -EXPECTED_BASE_CONTENT = (""" -deb http://test.ubuntu.com/ubuntu/ notouched main restricted -deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted -deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted -deb http://testsec.ubuntu.com/ubuntu/ notouched-security main restricted -""") - -EXPECTED_MIRROR_CONTENT = (""" -deb http://test.ubuntu.com/ubuntu/ notouched main restricted -deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted -deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted -deb http://test.ubuntu.com/ubuntu/ notouched-security main restricted -""") - -EXPECTED_PRIMSEC_CONTENT = (""" -deb http://test.ubuntu.com/ubuntu/ notouched main restricted -deb-src http://test.ubuntu.com/ubuntu/ notouched main restricted -deb http://test.ubuntu.com/ubuntu/ notouched-updates main restricted -deb http://testsec.ubuntu.com/ubuntu/ notouched-security main restricted -""") - - -class TestAptSourceConfigSourceList(t_help.FilesystemMockingTestCase): - """TestAptSourceConfigSourceList - Class to test sources list rendering""" - def setUp(self): - super(TestAptSourceConfigSourceList, self).setUp() - self.subp = subp.subp - self.new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.new_root) - - rpatcher = mock.patch("cloudinit.util.lsb_release") - get_rel = rpatcher.start() - get_rel.return_value = {'codename': "fakerel"} - self.addCleanup(rpatcher.stop) - apatcher = mock.patch("cloudinit.util.get_dpkg_architecture") - get_arch = apatcher.start() - get_arch.return_value = 'amd64' - self.addCleanup(apatcher.stop) - - def _apt_source_list(self, distro, cfg, cfg_on_empty=False): - """_apt_source_list - Test rendering from template (generic)""" - # entry at top level now, wrap in 'apt' key - cfg = {'apt': cfg} - mycloud = get_cloud(distro) - - with ExitStack() as stack: - mock_writefile = stack.enter_context(mock.patch.object( - util, 'write_file')) - mock_loadfile = stack.enter_context(mock.patch.object( - util, 'load_file', return_value=MOCKED_APT_SRC_LIST)) - mock_isfile = stack.enter_context(mock.patch.object( - os.path, 'isfile', return_value=True)) - stack.enter_context(mock.patch.object( - util, 'del_file')) - cfg_func = ('cloudinit.config.cc_apt_configure.' - '_should_configure_on_empty_apt') - mock_shouldcfg = stack.enter_context(mock.patch( - cfg_func, return_value=(cfg_on_empty, 'test') - )) - cc_apt_configure.handle("test", cfg, mycloud, LOG, None) - - return mock_writefile, mock_loadfile, mock_isfile, mock_shouldcfg - - def test_apt_v3_source_list_debian(self): - """test_apt_v3_source_list_debian - without custom sources or parms""" - cfg = {} - distro = 'debian' - expected = EXPECTED_BASE_CONTENT - - mock_writefile, mock_load_file, mock_isfile, mock_shouldcfg = ( - self._apt_source_list(distro, cfg, cfg_on_empty=True)) - - template = '/etc/cloud/templates/sources.list.%s.tmpl' % distro - mock_writefile.assert_called_once_with('/etc/apt/sources.list', - expected, mode=0o644) - mock_load_file.assert_called_with(template) - mock_isfile.assert_any_call(template) - self.assertEqual(1, mock_shouldcfg.call_count) - - def test_apt_v3_source_list_ubuntu(self): - """test_apt_v3_source_list_ubuntu - without custom sources or parms""" - cfg = {} - distro = 'ubuntu' - expected = EXPECTED_BASE_CONTENT - - mock_writefile, mock_load_file, mock_isfile, mock_shouldcfg = ( - self._apt_source_list(distro, cfg, cfg_on_empty=True)) - - template = '/etc/cloud/templates/sources.list.%s.tmpl' % distro - mock_writefile.assert_called_once_with('/etc/apt/sources.list', - expected, mode=0o644) - mock_load_file.assert_called_with(template) - mock_isfile.assert_any_call(template) - self.assertEqual(1, mock_shouldcfg.call_count) - - def test_apt_v3_source_list_ubuntu_snappy(self): - """test_apt_v3_source_list_ubuntu_snappy - without custom sources or - parms""" - cfg = {'apt': {}} - mycloud = get_cloud() - - with mock.patch.object(util, 'write_file') as mock_writefile: - with mock.patch.object(util, 'system_is_snappy', - return_value=True) as mock_issnappy: - cc_apt_configure.handle("test", cfg, mycloud, LOG, None) - - self.assertEqual(0, mock_writefile.call_count) - self.assertEqual(1, mock_issnappy.call_count) - - def test_apt_v3_source_list_centos(self): - """test_apt_v3_source_list_centos - without custom sources or parms""" - cfg = {} - distro = 'rhel' - - mock_writefile, _, _, _ = self._apt_source_list(distro, cfg) - - self.assertEqual(0, mock_writefile.call_count) - - def test_apt_v3_source_list_psm(self): - """test_apt_v3_source_list_psm - Test specifying prim+sec mirrors""" - pm = 'http://test.ubuntu.com/ubuntu/' - sm = 'http://testsec.ubuntu.com/ubuntu/' - cfg = {'preserve_sources_list': False, - 'primary': [{'arches': ["default"], - 'uri': pm}], - 'security': [{'arches': ["default"], - 'uri': sm}]} - distro = 'ubuntu' - expected = EXPECTED_PRIMSEC_CONTENT - - mock_writefile, mock_load_file, mock_isfile, _ = ( - self._apt_source_list(distro, cfg, cfg_on_empty=True)) - - template = '/etc/cloud/templates/sources.list.%s.tmpl' % distro - mock_writefile.assert_called_once_with('/etc/apt/sources.list', - expected, mode=0o644) - mock_load_file.assert_called_with(template) - mock_isfile.assert_any_call(template) - - def test_apt_v3_srcl_custom(self): - """test_apt_v3_srcl_custom - Test rendering a custom source template""" - cfg = util.load_yaml(YAML_TEXT_CUSTOM_SL) - mycloud = get_cloud() - - # the second mock restores the original subp - with mock.patch.object(util, 'write_file') as mockwrite: - with mock.patch.object(subp, 'subp', self.subp): - with mock.patch.object(Distro, 'get_primary_arch', - return_value='amd64'): - cc_apt_configure.handle("notimportant", cfg, mycloud, - LOG, None) - - calls = [call('/etc/apt/sources.list', - EXPECTED_CONVERTED_CONTENT, - mode=0o644)] - mockwrite.assert_has_calls(calls) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_apt_key.py b/tests/unittests/test_handler/test_handler_apt_key.py deleted file mode 100644 index 00e5a38d..00000000 --- a/tests/unittests/test_handler/test_handler_apt_key.py +++ /dev/null @@ -1,137 +0,0 @@ -import os -from unittest import mock - -from cloudinit.config import cc_apt_configure -from cloudinit import subp -from cloudinit import util - -TEST_KEY_HUMAN = ''' -/etc/apt/cloud-init.gpg.d/my_key.gpg --------------------------------------------- -pub rsa4096 2021-10-22 [SC] - 3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85 -uid [ unknown] Brett Holman -sub rsa4096 2021-10-22 [A] -sub rsa4096 2021-10-22 [E] -''' - -TEST_KEY_MACHINE = ''' -tru::1:1635129362:0:3:1:5 -pub:-:4096:1:F83F77129A5EBD85:1634912922:::-:::scESCA::::::23::0: -fpr:::::::::3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85: -uid:-::::1634912922::64F1F1D6FA96316752D635D7C6406C52C40713C7::Brett Holman \ -::::::::::0: -sub:-:4096:1:544B39C9A9141F04:1634912922::::::a::::::23: -fpr:::::::::8BD901490D6EC986D03D6F0D544B39C9A9141F04: -sub:-:4096:1:F45D9443F0A87092:1634912922::::::e::::::23: -fpr:::::::::8CCCB332317324F030A45B19F45D9443F0A87092: -''' - -TEST_KEY_FINGERPRINT_HUMAN = \ - '3A3E F34D FDED B3B7 F3FD F603 F83F 7712 9A5E BD85' - -TEST_KEY_FINGERPRINT_MACHINE = \ - '3A3EF34DFDEDB3B7F3FDF603F83F77129A5EBD85' - - -class TestAptKey: - """TestAptKey - Class to test apt-key commands - """ - @mock.patch.object(subp, 'subp', return_value=('fakekey', '')) - @mock.patch.object(util, 'write_file') - def _apt_key_add_success_helper(self, directory, *args, hardened=False): - file = cc_apt_configure.apt_key( - 'add', - output_file='my-key', - data='fakekey', - hardened=hardened) - assert file == directory + '/my-key.gpg' - - def test_apt_key_add_success(self): - """Verify the correct directory path gets returned for unhardened case - """ - self._apt_key_add_success_helper('/etc/apt/trusted.gpg.d') - - def test_apt_key_add_success_hardened(self): - """Verify the correct directory path gets returned for hardened case - """ - self._apt_key_add_success_helper( - '/etc/apt/cloud-init.gpg.d', - hardened=True) - - def test_apt_key_add_fail_no_file_name(self): - """Verify that null filename gets handled correctly - """ - file = cc_apt_configure.apt_key( - 'add', - output_file=None, - data='') - assert '/dev/null' == file - - def _apt_key_fail_helper(self): - file = cc_apt_configure.apt_key( - 'add', - output_file='my-key', - data='fakekey') - assert file == '/dev/null' - - @mock.patch.object(subp, 'subp', side_effect=subp.ProcessExecutionError) - def test_apt_key_add_fail_no_file_name_subproc(self, *args): - """Verify that bad key value gets handled correctly - """ - self._apt_key_fail_helper() - - @mock.patch.object( - subp, 'subp', side_effect=UnicodeDecodeError('test', b'', 1, 1, '')) - def test_apt_key_add_fail_no_file_name_unicode(self, *args): - """Verify that bad key encoding gets handled correctly - """ - self._apt_key_fail_helper() - - def _apt_key_list_success_helper(self, finger, key, human_output=True): - @mock.patch.object(os, 'listdir', return_value=('/fake/dir/key.gpg',)) - @mock.patch.object(subp, 'subp', return_value=(key, '')) - def mocked_list(*a): - - keys = cc_apt_configure.apt_key('list', human_output) - assert finger in keys - mocked_list() - - def test_apt_key_list_success_human(self): - """Verify expected key output, human - """ - self._apt_key_list_success_helper( - TEST_KEY_FINGERPRINT_HUMAN, - TEST_KEY_HUMAN) - - def test_apt_key_list_success_machine(self): - """Verify expected key output, machine - """ - self._apt_key_list_success_helper( - TEST_KEY_FINGERPRINT_MACHINE, - TEST_KEY_MACHINE, human_output=False) - - @mock.patch.object(os, 'listdir', return_value=()) - @mock.patch.object(subp, 'subp', return_value=('', '')) - def test_apt_key_list_fail_no_keys(self, *args): - """Ensure falsy output for no keys - """ - keys = cc_apt_configure.apt_key('list') - assert not keys - - @mock.patch.object(os, 'listdir', return_value=('file_not_gpg_key.txt')) - @mock.patch.object(subp, 'subp', return_value=('', '')) - def test_apt_key_list_fail_no_keys_file(self, *args): - """Ensure non-gpg file is not returned. - - apt-key used file extensions for this, so we do too - """ - assert not cc_apt_configure.apt_key('list') - - @mock.patch.object(subp, 'subp', side_effect=subp.ProcessExecutionError) - @mock.patch.object(os, 'listdir', return_value=('bad_gpg_key.gpg')) - def test_apt_key_list_fail_bad_key_file(self, *args): - """Ensure bad gpg key doesn't throw exeption. - """ - assert not cc_apt_configure.apt_key('list') diff --git a/tests/unittests/test_handler/test_handler_apt_source_v1.py b/tests/unittests/test_handler/test_handler_apt_source_v1.py deleted file mode 100644 index 2357d699..00000000 --- a/tests/unittests/test_handler/test_handler_apt_source_v1.py +++ /dev/null @@ -1,651 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -""" test_handler_apt_source_v1 -Testing various config variations of the apt_source config -This calls all things with v1 format to stress the conversion code on top of -the actually tested code. -""" -import os -import re -import shutil -import tempfile -import pathlib -from unittest import mock -from unittest.mock import call - -from cloudinit.config import cc_apt_configure -from cloudinit import gpg -from cloudinit import subp -from cloudinit import util - -from cloudinit.tests.helpers import TestCase - -EXPECTEDKEY = """-----BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1 - -mI0ESuZLUgEEAKkqq3idtFP7g9hzOu1a8+v8ImawQN4TrvlygfScMU1TIS1eC7UQ -NUA8Qqgr9iUaGnejb0VciqftLrU9D6WYHSKz+EITefgdyJ6SoQxjoJdsCpJ7o9Jy -8PQnpRttiFm4qHu6BVnKnBNxw/z3ST9YMqW5kbMQpfxbGe+obRox59NpABEBAAG0 -HUxhdW5jaHBhZCBQUEEgZm9yIFNjb3R0IE1vc2VyiLYEEwECACAFAkrmS1ICGwMG -CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRAGILvPA2g/d3aEA/9tVjc10HOZwV29 -OatVuTeERjjrIbxflO586GLA8cp0C9RQCwgod/R+cKYdQcHjbqVcP0HqxveLg0RZ -FJpWLmWKamwkABErwQLGlM/Hwhjfade8VvEQutH5/0JgKHmzRsoqfR+LMO6OS+Sm -S0ORP6HXET3+jC8BMG4tBWCTK/XEZw== -=ACB2 ------END PGP PUBLIC KEY BLOCK-----""" - -ADD_APT_REPO_MATCH = r"^[\w-]+:\w" - - -class FakeDistro(object): - """Fake Distro helper object""" - def update_package_sources(self): - """Fake update_package_sources helper method""" - return - - -class FakeDatasource: - """Fake Datasource helper object""" - def __init__(self): - self.region = 'region' - - -class FakeCloud(object): - """Fake Cloud helper object""" - def __init__(self): - self.distro = FakeDistro() - self.datasource = FakeDatasource() - - -class TestAptSourceConfig(TestCase): - """TestAptSourceConfig - Main Class to test apt_source configs - """ - release = "fantastic" - - def setUp(self): - super(TestAptSourceConfig, self).setUp() - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - self.aptlistfile = os.path.join(self.tmp, "single-deb.list") - self.aptlistfile2 = os.path.join(self.tmp, "single-deb2.list") - self.aptlistfile3 = os.path.join(self.tmp, "single-deb3.list") - self.join = os.path.join - self.matcher = re.compile(ADD_APT_REPO_MATCH).search - # mock fallback filename into writable tmp dir - self.fallbackfn = os.path.join(self.tmp, "etc/apt/sources.list.d/", - "cloud_config_sources.list") - - self.fakecloud = FakeCloud() - - rpatcher = mock.patch("cloudinit.util.lsb_release") - get_rel = rpatcher.start() - get_rel.return_value = {'codename': self.release} - self.addCleanup(rpatcher.stop) - apatcher = mock.patch("cloudinit.util.get_dpkg_architecture") - get_arch = apatcher.start() - get_arch.return_value = 'amd64' - self.addCleanup(apatcher.stop) - - def _get_default_params(self): - """get_default_params - Get the most basic default mrror and release info to be used in tests - """ - params = {} - params['RELEASE'] = self.release - params['MIRROR'] = "http://archive.ubuntu.com/ubuntu" - return params - - def wrapv1conf(self, cfg): - params = self._get_default_params() - # old v1 list format under old keys, but callabe to main handler - # disable source.list rendering and set mirror to avoid other code - return {'apt_preserve_sources_list': True, - 'apt_mirror': params['MIRROR'], - 'apt_sources': cfg} - - def myjoin(self, *args, **kwargs): - """myjoin - redir into writable tmpdir""" - if (args[0] == "/etc/apt/sources.list.d/" and - args[1] == "cloud_config_sources.list" and - len(args) == 2): - return self.join(self.tmp, args[0].lstrip("/"), args[1]) - else: - return self.join(*args, **kwargs) - - def apt_src_basic(self, filename, cfg): - """apt_src_basic - Test Fix deb source string, has to overwrite mirror conf in params - """ - cfg = self.wrapv1conf(cfg) - - cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) - - self.assertTrue(os.path.isfile(filename)) - - contents = util.load_file(filename) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", "http://archive.ubuntu.com/ubuntu", - "karmic-backports", - "main universe multiverse restricted"), - contents, flags=re.IGNORECASE)) - - def test_apt_src_basic(self): - """Test deb source string, overwrite mirror and filename""" - cfg = {'source': ('deb http://archive.ubuntu.com/ubuntu' - ' karmic-backports' - ' main universe multiverse restricted'), - 'filename': self.aptlistfile} - self.apt_src_basic(self.aptlistfile, [cfg]) - - def test_apt_src_basic_dict(self): - """Test deb source string, overwrite mirror and filename (dict)""" - cfg = {self.aptlistfile: {'source': - ('deb http://archive.ubuntu.com/ubuntu' - ' karmic-backports' - ' main universe multiverse restricted')}} - self.apt_src_basic(self.aptlistfile, cfg) - - def apt_src_basic_tri(self, cfg): - """apt_src_basic_tri - Test Fix three deb source string, has to overwrite mirror conf in - params. Test with filenames provided in config. - generic part to check three files with different content - """ - self.apt_src_basic(self.aptlistfile, cfg) - - # extra verify on two extra files of this test - contents = util.load_file(self.aptlistfile2) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", "http://archive.ubuntu.com/ubuntu", - "precise-backports", - "main universe multiverse restricted"), - contents, flags=re.IGNORECASE)) - contents = util.load_file(self.aptlistfile3) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", "http://archive.ubuntu.com/ubuntu", - "lucid-backports", - "main universe multiverse restricted"), - contents, flags=re.IGNORECASE)) - - def test_apt_src_basic_tri(self): - """Test Fix three deb source string with filenames""" - cfg1 = {'source': ('deb http://archive.ubuntu.com/ubuntu' - ' karmic-backports' - ' main universe multiverse restricted'), - 'filename': self.aptlistfile} - cfg2 = {'source': ('deb http://archive.ubuntu.com/ubuntu' - ' precise-backports' - ' main universe multiverse restricted'), - 'filename': self.aptlistfile2} - cfg3 = {'source': ('deb http://archive.ubuntu.com/ubuntu' - ' lucid-backports' - ' main universe multiverse restricted'), - 'filename': self.aptlistfile3} - self.apt_src_basic_tri([cfg1, cfg2, cfg3]) - - def test_apt_src_basic_dict_tri(self): - """Test Fix three deb source string with filenames (dict)""" - cfg = {self.aptlistfile: {'source': - ('deb http://archive.ubuntu.com/ubuntu' - ' karmic-backports' - ' main universe multiverse restricted')}, - self.aptlistfile2: {'source': - ('deb http://archive.ubuntu.com/ubuntu' - ' precise-backports' - ' main universe multiverse restricted')}, - self.aptlistfile3: {'source': - ('deb http://archive.ubuntu.com/ubuntu' - ' lucid-backports' - ' main universe multiverse restricted')}} - self.apt_src_basic_tri(cfg) - - def test_apt_src_basic_nofn(self): - """Test Fix three deb source string without filenames (dict)""" - cfg = {'source': ('deb http://archive.ubuntu.com/ubuntu' - ' karmic-backports' - ' main universe multiverse restricted')} - with mock.patch.object(os.path, 'join', side_effect=self.myjoin): - self.apt_src_basic(self.fallbackfn, [cfg]) - - def apt_src_replacement(self, filename, cfg): - """apt_src_replace - Test Autoreplacement of MIRROR and RELEASE in source specs - """ - cfg = self.wrapv1conf(cfg) - params = self._get_default_params() - cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) - - self.assertTrue(os.path.isfile(filename)) - - contents = util.load_file(filename) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", params['MIRROR'], params['RELEASE'], - "multiverse"), - contents, flags=re.IGNORECASE)) - - def test_apt_src_replace(self): - """Test Autoreplacement of MIRROR and RELEASE in source specs""" - cfg = {'source': 'deb $MIRROR $RELEASE multiverse', - 'filename': self.aptlistfile} - self.apt_src_replacement(self.aptlistfile, [cfg]) - - def apt_src_replace_tri(self, cfg): - """apt_src_replace_tri - Test three autoreplacements of MIRROR and RELEASE in source specs with - generic part - """ - self.apt_src_replacement(self.aptlistfile, cfg) - - # extra verify on two extra files of this test - params = self._get_default_params() - contents = util.load_file(self.aptlistfile2) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", params['MIRROR'], params['RELEASE'], - "main"), - contents, flags=re.IGNORECASE)) - contents = util.load_file(self.aptlistfile3) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", params['MIRROR'], params['RELEASE'], - "universe"), - contents, flags=re.IGNORECASE)) - - def test_apt_src_replace_tri(self): - """Test triple Autoreplacement of MIRROR and RELEASE in source specs""" - cfg1 = {'source': 'deb $MIRROR $RELEASE multiverse', - 'filename': self.aptlistfile} - cfg2 = {'source': 'deb $MIRROR $RELEASE main', - 'filename': self.aptlistfile2} - cfg3 = {'source': 'deb $MIRROR $RELEASE universe', - 'filename': self.aptlistfile3} - self.apt_src_replace_tri([cfg1, cfg2, cfg3]) - - def test_apt_src_replace_dict_tri(self): - """Test triple Autoreplacement in source specs (dict)""" - cfg = {self.aptlistfile: {'source': 'deb $MIRROR $RELEASE multiverse'}, - 'notused': {'source': 'deb $MIRROR $RELEASE main', - 'filename': self.aptlistfile2}, - self.aptlistfile3: {'source': 'deb $MIRROR $RELEASE universe'}} - self.apt_src_replace_tri(cfg) - - def test_apt_src_replace_nofn(self): - """Test Autoreplacement of MIRROR and RELEASE in source specs nofile""" - cfg = {'source': 'deb $MIRROR $RELEASE multiverse'} - with mock.patch.object(os.path, 'join', side_effect=self.myjoin): - self.apt_src_replacement(self.fallbackfn, [cfg]) - - def apt_src_keyid(self, filename, cfg, keynum): - """apt_src_keyid - Test specification of a source + keyid - """ - cfg = self.wrapv1conf(cfg) - - with mock.patch.object(cc_apt_configure, 'add_apt_key') as mockobj: - cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) - - # check if it added the right number of keys - calls = [] - sources = cfg['apt']['sources'] - for src in sources: - print(sources[src]) - calls.append(call(sources[src], None)) - - mockobj.assert_has_calls(calls, any_order=True) - - self.assertTrue(os.path.isfile(filename)) - - contents = util.load_file(filename) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", - ('http://ppa.launchpad.net/smoser/' - 'cloud-init-test/ubuntu'), - "xenial", "main"), - contents, flags=re.IGNORECASE)) - - def test_apt_src_keyid(self): - """Test specification of a source + keyid with filename being set""" - cfg = {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial main'), - 'keyid': "03683F77", - 'filename': self.aptlistfile} - self.apt_src_keyid(self.aptlistfile, [cfg], 1) - - def test_apt_src_keyid_tri(self): - """Test 3x specification of a source + keyid with filename being set""" - cfg1 = {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial main'), - 'keyid': "03683F77", - 'filename': self.aptlistfile} - cfg2 = {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial universe'), - 'keyid': "03683F77", - 'filename': self.aptlistfile2} - cfg3 = {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial multiverse'), - 'keyid': "03683F77", - 'filename': self.aptlistfile3} - - self.apt_src_keyid(self.aptlistfile, [cfg1, cfg2, cfg3], 3) - contents = util.load_file(self.aptlistfile2) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", - ('http://ppa.launchpad.net/smoser/' - 'cloud-init-test/ubuntu'), - "xenial", "universe"), - contents, flags=re.IGNORECASE)) - contents = util.load_file(self.aptlistfile3) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", - ('http://ppa.launchpad.net/smoser/' - 'cloud-init-test/ubuntu'), - "xenial", "multiverse"), - contents, flags=re.IGNORECASE)) - - def test_apt_src_keyid_nofn(self): - """Test specification of a source + keyid without filename being set""" - cfg = {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial main'), - 'keyid': "03683F77"} - with mock.patch.object(os.path, 'join', side_effect=self.myjoin): - self.apt_src_keyid(self.fallbackfn, [cfg], 1) - - def apt_src_key(self, filename, cfg): - """apt_src_key - Test specification of a source + key - """ - cfg = self.wrapv1conf([cfg]) - - with mock.patch.object(cc_apt_configure, 'add_apt_key') as mockobj: - cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) - - # check if it added the right amount of keys - sources = cfg['apt']['sources'] - calls = [] - for src in sources: - print(sources[src]) - calls.append(call(sources[src], None)) - - mockobj.assert_has_calls(calls, any_order=True) - - self.assertTrue(os.path.isfile(filename)) - - contents = util.load_file(filename) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", - ('http://ppa.launchpad.net/smoser/' - 'cloud-init-test/ubuntu'), - "xenial", "main"), - contents, flags=re.IGNORECASE)) - - def test_apt_src_key(self): - """Test specification of a source + key with filename being set""" - cfg = {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial main'), - 'key': "fakekey 4321", - 'filename': self.aptlistfile} - self.apt_src_key(self.aptlistfile, cfg) - - def test_apt_src_key_nofn(self): - """Test specification of a source + key without filename being set""" - cfg = {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial main'), - 'key': "fakekey 4321"} - with mock.patch.object(os.path, 'join', side_effect=self.myjoin): - self.apt_src_key(self.fallbackfn, cfg) - - def test_apt_src_keyonly(self): - """Test specifying key without source""" - cfg = {'key': "fakekey 4242", - 'filename': self.aptlistfile} - cfg = self.wrapv1conf([cfg]) - with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: - cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) - - calls = (call( - 'add', - output_file=pathlib.Path(self.aptlistfile).stem, - data='fakekey 4242', - hardened=False),) - mockobj.assert_has_calls(calls, any_order=True) - - # filename should be ignored on key only - self.assertFalse(os.path.isfile(self.aptlistfile)) - - def test_apt_src_keyidonly(self): - """Test specification of a keyid without source""" - cfg = {'keyid': "03683F77", - 'filename': self.aptlistfile} - cfg = self.wrapv1conf([cfg]) - - with mock.patch.object(subp, 'subp', - return_value=('fakekey 1212', '')): - with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: - cc_apt_configure.handle( - "test", - cfg, - self.fakecloud, - None, - None) - - calls = (call( - 'add', - output_file=pathlib.Path(self.aptlistfile).stem, - data='fakekey 1212', - hardened=False),) - mockobj.assert_has_calls(calls, any_order=True) - - # filename should be ignored on key only - self.assertFalse(os.path.isfile(self.aptlistfile)) - - def apt_src_keyid_real(self, cfg, expectedkey, is_hardened=None): - """apt_src_keyid_real - Test specification of a keyid without source including - up to addition of the key (add_apt_key_raw mocked to keep the - environment as is) - """ - key = cfg['keyid'] - keyserver = cfg.get('keyserver', 'keyserver.ubuntu.com') - cfg = self.wrapv1conf([cfg]) - - with mock.patch.object(cc_apt_configure, 'add_apt_key_raw') as mockkey: - with mock.patch.object(gpg, 'getkeybyid', - return_value=expectedkey) as mockgetkey: - cc_apt_configure.handle("test", cfg, self.fakecloud, - None, None) - if is_hardened is not None: - mockkey.assert_called_with( - expectedkey, - self.aptlistfile, - hardened=is_hardened) - else: - mockkey.assert_called_with(expectedkey, self.aptlistfile) - mockgetkey.assert_called_with(key, keyserver) - - # filename should be ignored on key only - self.assertFalse(os.path.isfile(self.aptlistfile)) - - def test_apt_src_keyid_real(self): - """test_apt_src_keyid_real - Test keyid including key add""" - keyid = "03683F77" - cfg = {'keyid': keyid, - 'filename': self.aptlistfile} - - self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) - - def test_apt_src_longkeyid_real(self): - """test_apt_src_longkeyid_real - Test long keyid including key add""" - keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" - cfg = {'keyid': keyid, - 'filename': self.aptlistfile} - - self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) - - def test_apt_src_longkeyid_ks_real(self): - """test_apt_src_longkeyid_ks_real - Test long keyid from other ks""" - keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" - cfg = {'keyid': keyid, - 'keyserver': 'keys.gnupg.net', - 'filename': self.aptlistfile} - - self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) - - def test_apt_src_ppa(self): - """Test adding a ppa""" - cfg = {'source': 'ppa:smoser/cloud-init-test', - 'filename': self.aptlistfile} - cfg = self.wrapv1conf([cfg]) - - with mock.patch.object(subp, 'subp') as mockobj: - cc_apt_configure.handle("test", cfg, self.fakecloud, None, None) - mockobj.assert_called_once_with(['add-apt-repository', - 'ppa:smoser/cloud-init-test'], - target=None) - - # adding ppa should ignore filename (uses add-apt-repository) - self.assertFalse(os.path.isfile(self.aptlistfile)) - - def test_apt_src_ppa_tri(self): - """Test adding three ppa's""" - cfg1 = {'source': 'ppa:smoser/cloud-init-test', - 'filename': self.aptlistfile} - cfg2 = {'source': 'ppa:smoser/cloud-init-test2', - 'filename': self.aptlistfile2} - cfg3 = {'source': 'ppa:smoser/cloud-init-test3', - 'filename': self.aptlistfile3} - cfg = self.wrapv1conf([cfg1, cfg2, cfg3]) - - with mock.patch.object(subp, 'subp') as mockobj: - cc_apt_configure.handle("test", cfg, self.fakecloud, - None, None) - calls = [call(['add-apt-repository', 'ppa:smoser/cloud-init-test'], - target=None), - call(['add-apt-repository', 'ppa:smoser/cloud-init-test2'], - target=None), - call(['add-apt-repository', 'ppa:smoser/cloud-init-test3'], - target=None)] - mockobj.assert_has_calls(calls, any_order=True) - - # adding ppa should ignore all filenames (uses add-apt-repository) - self.assertFalse(os.path.isfile(self.aptlistfile)) - self.assertFalse(os.path.isfile(self.aptlistfile2)) - self.assertFalse(os.path.isfile(self.aptlistfile3)) - - def test_convert_to_new_format(self): - """Test the conversion of old to new format""" - cfg1 = {'source': 'deb $MIRROR $RELEASE multiverse', - 'filename': self.aptlistfile} - cfg2 = {'source': 'deb $MIRROR $RELEASE main', - 'filename': self.aptlistfile2} - cfg3 = {'source': 'deb $MIRROR $RELEASE universe', - 'filename': self.aptlistfile3} - cfg = {'apt_sources': [cfg1, cfg2, cfg3]} - checkcfg = {self.aptlistfile: {'filename': self.aptlistfile, - 'source': 'deb $MIRROR $RELEASE ' - 'multiverse'}, - self.aptlistfile2: {'filename': self.aptlistfile2, - 'source': 'deb $MIRROR $RELEASE main'}, - self.aptlistfile3: {'filename': self.aptlistfile3, - 'source': 'deb $MIRROR $RELEASE ' - 'universe'}} - - newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg) - self.assertEqual(newcfg['apt']['sources'], checkcfg) - - # convert again, should stay the same - newcfg2 = cc_apt_configure.convert_to_v3_apt_format(newcfg) - self.assertEqual(newcfg2['apt']['sources'], checkcfg) - - # should work without raising an exception - cc_apt_configure.convert_to_v3_apt_format({}) - - with self.assertRaises(ValueError): - cc_apt_configure.convert_to_v3_apt_format({'apt_sources': 5}) - - def test_convert_to_new_format_collision(self): - """Test the conversion of old to new format with collisions - That matches e.g. the MAAS case specifying old and new config""" - cfg_1_and_3 = {'apt': {'proxy': 'http://192.168.122.1:8000/'}, - 'apt_proxy': 'http://192.168.122.1:8000/'} - cfg_3_only = {'apt': {'proxy': 'http://192.168.122.1:8000/'}} - cfgconflict = {'apt': {'proxy': 'http://192.168.122.1:8000/'}, - 'apt_proxy': 'ftp://192.168.122.1:8000/'} - - # collision (equal) - newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3) - self.assertEqual(newcfg, cfg_3_only) - # collision (equal, so ok to remove) - newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_3_only) - self.assertEqual(newcfg, cfg_3_only) - # collision (unequal) - match = "Old and New.*unequal.*apt_proxy" - with self.assertRaisesRegex(ValueError, match): - cc_apt_configure.convert_to_v3_apt_format(cfgconflict) - - def test_convert_to_new_format_dict_collision(self): - cfg1 = {'source': 'deb $MIRROR $RELEASE multiverse', - 'filename': self.aptlistfile} - cfg2 = {'source': 'deb $MIRROR $RELEASE main', - 'filename': self.aptlistfile2} - cfg3 = {'source': 'deb $MIRROR $RELEASE universe', - 'filename': self.aptlistfile3} - fullv3 = {self.aptlistfile: {'filename': self.aptlistfile, - 'source': 'deb $MIRROR $RELEASE ' - 'multiverse'}, - self.aptlistfile2: {'filename': self.aptlistfile2, - 'source': 'deb $MIRROR $RELEASE main'}, - self.aptlistfile3: {'filename': self.aptlistfile3, - 'source': 'deb $MIRROR $RELEASE ' - 'universe'}} - cfg_3_only = {'apt': {'sources': fullv3}} - cfg_1_and_3 = {'apt_sources': [cfg1, cfg2, cfg3]} - cfg_1_and_3.update(cfg_3_only) - - # collision (equal, so ok to remove) - newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3) - self.assertEqual(newcfg, cfg_3_only) - # no old spec (same result) - newcfg = cc_apt_configure.convert_to_v3_apt_format(cfg_3_only) - self.assertEqual(newcfg, cfg_3_only) - - diff = {self.aptlistfile: {'filename': self.aptlistfile, - 'source': 'deb $MIRROR $RELEASE ' - 'DIFFERENTVERSE'}, - self.aptlistfile2: {'filename': self.aptlistfile2, - 'source': 'deb $MIRROR $RELEASE main'}, - self.aptlistfile3: {'filename': self.aptlistfile3, - 'source': 'deb $MIRROR $RELEASE ' - 'universe'}} - cfg_3_only = {'apt': {'sources': diff}} - cfg_1_and_3_different = {'apt_sources': [cfg1, cfg2, cfg3]} - cfg_1_and_3_different.update(cfg_3_only) - - # collision (unequal by dict having a different entry) - with self.assertRaises(ValueError): - cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3_different) - - missing = {self.aptlistfile: {'filename': self.aptlistfile, - 'source': 'deb $MIRROR $RELEASE ' - 'multiverse'}} - cfg_3_only = {'apt': {'sources': missing}} - cfg_1_and_3_missing = {'apt_sources': [cfg1, cfg2, cfg3]} - cfg_1_and_3_missing.update(cfg_3_only) - # collision (unequal by dict missing an entry) - with self.assertRaises(ValueError): - cc_apt_configure.convert_to_v3_apt_format(cfg_1_and_3_missing) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py deleted file mode 100644 index 20289121..00000000 --- a/tests/unittests/test_handler/test_handler_apt_source_v3.py +++ /dev/null @@ -1,1170 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""test_handler_apt_source_v3 -Testing various config variations of the apt_source custom config -This tries to call all in the new v3 format and cares about new features -""" -import glob -import os -import re -import shutil -import socket -import tempfile -import pathlib - -from unittest import TestCase, mock -from unittest.mock import call - -from cloudinit import gpg -from cloudinit import subp -from cloudinit import util -from cloudinit.config import cc_apt_configure -from cloudinit.tests import helpers as t_help - -from tests.unittests.util import get_cloud - -EXPECTEDKEY = """-----BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1 - -mI0ESuZLUgEEAKkqq3idtFP7g9hzOu1a8+v8ImawQN4TrvlygfScMU1TIS1eC7UQ -NUA8Qqgr9iUaGnejb0VciqftLrU9D6WYHSKz+EITefgdyJ6SoQxjoJdsCpJ7o9Jy -8PQnpRttiFm4qHu6BVnKnBNxw/z3ST9YMqW5kbMQpfxbGe+obRox59NpABEBAAG0 -HUxhdW5jaHBhZCBQUEEgZm9yIFNjb3R0IE1vc2VyiLYEEwECACAFAkrmS1ICGwMG -CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRAGILvPA2g/d3aEA/9tVjc10HOZwV29 -OatVuTeERjjrIbxflO586GLA8cp0C9RQCwgod/R+cKYdQcHjbqVcP0HqxveLg0RZ -FJpWLmWKamwkABErwQLGlM/Hwhjfade8VvEQutH5/0JgKHmzRsoqfR+LMO6OS+Sm -S0ORP6HXET3+jC8BMG4tBWCTK/XEZw== -=ACB2 ------END PGP PUBLIC KEY BLOCK-----""" - -ADD_APT_REPO_MATCH = r"^[\w-]+:\w" - -TARGET = None - -MOCK_LSB_RELEASE_DATA = { - 'id': 'Ubuntu', 'description': 'Ubuntu 18.04.1 LTS', - 'release': '18.04', 'codename': 'bionic'} - - -class FakeDatasource: - """Fake Datasource helper object""" - def __init__(self): - self.region = 'region' - - -class FakeCloud: - """Fake Cloud helper object""" - def __init__(self): - self.datasource = FakeDatasource() - - -class TestAptSourceConfig(t_help.FilesystemMockingTestCase): - """TestAptSourceConfig - Main Class to test apt configs - """ - def setUp(self): - super(TestAptSourceConfig, self).setUp() - self.tmp = tempfile.mkdtemp() - self.new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - self.addCleanup(shutil.rmtree, self.new_root) - self.aptlistfile = os.path.join(self.tmp, "single-deb.list") - self.aptlistfile2 = os.path.join(self.tmp, "single-deb2.list") - self.aptlistfile3 = os.path.join(self.tmp, "single-deb3.list") - self.join = os.path.join - self.matcher = re.compile(ADD_APT_REPO_MATCH).search - self.add_patch( - 'cloudinit.config.cc_apt_configure.util.lsb_release', - 'm_lsb_release', return_value=MOCK_LSB_RELEASE_DATA.copy()) - - @staticmethod - def _add_apt_sources(*args, **kwargs): - with mock.patch.object(cc_apt_configure, 'update_packages'): - cc_apt_configure.add_apt_sources(*args, **kwargs) - - @staticmethod - def _get_default_params(): - """get_default_params - Get the most basic default mrror and release info to be used in tests - """ - params = {} - params['RELEASE'] = MOCK_LSB_RELEASE_DATA['release'] - arch = 'amd64' - params['MIRROR'] = cc_apt_configure.\ - get_default_mirrors(arch)["PRIMARY"] - return params - - def _myjoin(self, *args, **kwargs): - """_myjoin - redir into writable tmpdir""" - if (args[0] == "/etc/apt/sources.list.d/" and - args[1] == "cloud_config_sources.list" and - len(args) == 2): - return self.join(self.tmp, args[0].lstrip("/"), args[1]) - else: - return self.join(*args, **kwargs) - - def _apt_src_basic(self, filename, cfg): - """_apt_src_basic - Test Fix deb source string, has to overwrite mirror conf in params - """ - params = self._get_default_params() - - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) - - self.assertTrue(os.path.isfile(filename)) - - contents = util.load_file(filename) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", "http://test.ubuntu.com/ubuntu", - "karmic-backports", - "main universe multiverse restricted"), - contents, flags=re.IGNORECASE)) - - def test_apt_v3_src_basic(self): - """test_apt_v3_src_basic - Test fix deb source string""" - cfg = {self.aptlistfile: {'source': - ('deb http://test.ubuntu.com/ubuntu' - ' karmic-backports' - ' main universe multiverse restricted')}} - self._apt_src_basic(self.aptlistfile, cfg) - - def test_apt_v3_src_basic_tri(self): - """test_apt_v3_src_basic_tri - Test multiple fix deb source strings""" - cfg = {self.aptlistfile: {'source': - ('deb http://test.ubuntu.com/ubuntu' - ' karmic-backports' - ' main universe multiverse restricted')}, - self.aptlistfile2: {'source': - ('deb http://test.ubuntu.com/ubuntu' - ' precise-backports' - ' main universe multiverse restricted')}, - self.aptlistfile3: {'source': - ('deb http://test.ubuntu.com/ubuntu' - ' lucid-backports' - ' main universe multiverse restricted')}} - self._apt_src_basic(self.aptlistfile, cfg) - - # extra verify on two extra files of this test - contents = util.load_file(self.aptlistfile2) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", "http://test.ubuntu.com/ubuntu", - "precise-backports", - "main universe multiverse restricted"), - contents, flags=re.IGNORECASE)) - contents = util.load_file(self.aptlistfile3) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", "http://test.ubuntu.com/ubuntu", - "lucid-backports", - "main universe multiverse restricted"), - contents, flags=re.IGNORECASE)) - - def _apt_src_replacement(self, filename, cfg): - """apt_src_replace - Test Autoreplacement of MIRROR and RELEASE in source specs - """ - params = self._get_default_params() - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) - - self.assertTrue(os.path.isfile(filename)) - - contents = util.load_file(filename) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", params['MIRROR'], params['RELEASE'], - "multiverse"), - contents, flags=re.IGNORECASE)) - - def test_apt_v3_src_replace(self): - """test_apt_v3_src_replace - Test replacement of MIRROR & RELEASE""" - cfg = {self.aptlistfile: {'source': 'deb $MIRROR $RELEASE multiverse'}} - self._apt_src_replacement(self.aptlistfile, cfg) - - def test_apt_v3_src_replace_fn(self): - """test_apt_v3_src_replace_fn - Test filename overwritten in dict""" - cfg = {'ignored': {'source': 'deb $MIRROR $RELEASE multiverse', - 'filename': self.aptlistfile}} - # second file should overwrite the dict key - self._apt_src_replacement(self.aptlistfile, cfg) - - def _apt_src_replace_tri(self, cfg): - """_apt_src_replace_tri - Test three autoreplacements of MIRROR and RELEASE in source specs with - generic part - """ - self._apt_src_replacement(self.aptlistfile, cfg) - - # extra verify on two extra files of this test - params = self._get_default_params() - contents = util.load_file(self.aptlistfile2) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", params['MIRROR'], params['RELEASE'], - "main"), - contents, flags=re.IGNORECASE)) - contents = util.load_file(self.aptlistfile3) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", params['MIRROR'], params['RELEASE'], - "universe"), - contents, flags=re.IGNORECASE)) - - def test_apt_v3_src_replace_tri(self): - """test_apt_v3_src_replace_tri - Test multiple replace/overwrites""" - cfg = {self.aptlistfile: {'source': 'deb $MIRROR $RELEASE multiverse'}, - 'notused': {'source': 'deb $MIRROR $RELEASE main', - 'filename': self.aptlistfile2}, - self.aptlistfile3: {'source': 'deb $MIRROR $RELEASE universe'}} - self._apt_src_replace_tri(cfg) - - def _apt_src_keyid(self, filename, cfg, keynum, is_hardened=None): - """_apt_src_keyid - Test specification of a source + keyid - """ - params = self._get_default_params() - - with mock.patch.object(cc_apt_configure, 'add_apt_key') as mockobj: - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) - - # check if it added the right number of keys - calls = [] - for key in cfg: - if is_hardened is not None: - calls.append(call(cfg[key], hardened=is_hardened)) - else: - calls.append(call(cfg[key], TARGET)) - - mockobj.assert_has_calls(calls, any_order=True) - - self.assertTrue(os.path.isfile(filename)) - - contents = util.load_file(filename) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", - ('http://ppa.launchpad.net/smoser/' - 'cloud-init-test/ubuntu'), - "xenial", "main"), - contents, flags=re.IGNORECASE)) - - def test_apt_v3_src_keyid(self): - """test_apt_v3_src_keyid - Test source + keyid with filename""" - cfg = {self.aptlistfile: {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial main'), - 'filename': self.aptlistfile, - 'keyid': "03683F77"}} - self._apt_src_keyid(self.aptlistfile, cfg, 1) - - def test_apt_v3_src_keyid_tri(self): - """test_apt_v3_src_keyid_tri - Test multiple src+key+filen writes""" - cfg = {self.aptlistfile: {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial main'), - 'keyid': "03683F77"}, - 'ignored': {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial universe'), - 'keyid': "03683F77", - 'filename': self.aptlistfile2}, - self.aptlistfile3: {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial multiverse'), - 'filename': self.aptlistfile3, - 'keyid': "03683F77"}} - - self._apt_src_keyid(self.aptlistfile, cfg, 3) - contents = util.load_file(self.aptlistfile2) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", - ('http://ppa.launchpad.net/smoser/' - 'cloud-init-test/ubuntu'), - "xenial", "universe"), - contents, flags=re.IGNORECASE)) - contents = util.load_file(self.aptlistfile3) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", - ('http://ppa.launchpad.net/smoser/' - 'cloud-init-test/ubuntu'), - "xenial", "multiverse"), - contents, flags=re.IGNORECASE)) - - def test_apt_v3_src_key(self): - """test_apt_v3_src_key - Test source + key""" - params = self._get_default_params() - cfg = {self.aptlistfile: {'source': ('deb ' - 'http://ppa.launchpad.net/' - 'smoser/cloud-init-test/ubuntu' - ' xenial main'), - 'filename': self.aptlistfile, - 'key': "fakekey 4321"}} - - with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) - - calls = (call( - 'add', - output_file=pathlib.Path(self.aptlistfile).stem, - data='fakekey 4321', - hardened=False),) - mockobj.assert_has_calls(calls, any_order=True) - self.assertTrue(os.path.isfile(self.aptlistfile)) - - contents = util.load_file(self.aptlistfile) - self.assertTrue(re.search(r"%s %s %s %s\n" % - ("deb", - ('http://ppa.launchpad.net/smoser/' - 'cloud-init-test/ubuntu'), - "xenial", "main"), - contents, flags=re.IGNORECASE)) - - def test_apt_v3_src_keyonly(self): - """test_apt_v3_src_keyonly - Test key without source""" - params = self._get_default_params() - cfg = {self.aptlistfile: {'key': "fakekey 4242"}} - - with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) - - calls = (call( - 'add', - output_file=pathlib.Path(self.aptlistfile).stem, - data='fakekey 4242', - hardened=False),) - mockobj.assert_has_calls(calls, any_order=True) - - # filename should be ignored on key only - self.assertFalse(os.path.isfile(self.aptlistfile)) - - def test_apt_v3_src_keyidonly(self): - """test_apt_v3_src_keyidonly - Test keyid without source""" - params = self._get_default_params() - cfg = {self.aptlistfile: {'keyid': "03683F77"}} - with mock.patch.object(subp, 'subp', - return_value=('fakekey 1212', '')): - with mock.patch.object(cc_apt_configure, 'apt_key') as mockobj: - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) - - calls = (call( - 'add', - output_file=pathlib.Path(self.aptlistfile).stem, - data='fakekey 1212', - hardened=False),) - mockobj.assert_has_calls(calls, any_order=True) - - # filename should be ignored on key only - self.assertFalse(os.path.isfile(self.aptlistfile)) - - def apt_src_keyid_real(self, cfg, expectedkey, is_hardened=None): - """apt_src_keyid_real - Test specification of a keyid without source including - up to addition of the key (add_apt_key_raw mocked to keep the - environment as is) - """ - params = self._get_default_params() - - with mock.patch.object(cc_apt_configure, 'add_apt_key_raw') as mockkey: - with mock.patch.object(gpg, 'getkeybyid', - return_value=expectedkey) as mockgetkey: - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) - - keycfg = cfg[self.aptlistfile] - mockgetkey.assert_called_with(keycfg['keyid'], - keycfg.get('keyserver', - 'keyserver.ubuntu.com')) - if is_hardened is not None: - mockkey.assert_called_with( - expectedkey, - keycfg['keyfile'], - hardened=is_hardened) - - # filename should be ignored on key only - self.assertFalse(os.path.isfile(self.aptlistfile)) - - def test_apt_v3_src_keyid_real(self): - """test_apt_v3_src_keyid_real - Test keyid including key add""" - keyid = "03683F77" - cfg = {self.aptlistfile: {'keyid': keyid, - 'keyfile': self.aptlistfile}} - - self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) - - def test_apt_v3_src_longkeyid_real(self): - """test_apt_v3_src_longkeyid_real Test long keyid including key add""" - keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" - cfg = {self.aptlistfile: {'keyid': keyid, - 'keyfile': self.aptlistfile}} - - self.apt_src_keyid_real(cfg, EXPECTEDKEY, is_hardened=False) - - def test_apt_v3_src_longkeyid_ks_real(self): - """test_apt_v3_src_longkeyid_ks_real Test long keyid from other ks""" - keyid = "B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77" - cfg = {self.aptlistfile: {'keyid': keyid, - 'keyfile': self.aptlistfile, - 'keyserver': 'keys.gnupg.net'}} - - self.apt_src_keyid_real(cfg, EXPECTEDKEY) - - def test_apt_v3_src_keyid_keyserver(self): - """test_apt_v3_src_keyid_keyserver - Test custom keyserver""" - keyid = "03683F77" - params = self._get_default_params() - cfg = {self.aptlistfile: {'keyid': keyid, - 'keyfile': self.aptlistfile, - 'keyserver': 'test.random.com'}} - - # in some test environments only *.ubuntu.com is reachable - # so mock the call and check if the config got there - with mock.patch.object(gpg, 'getkeybyid', - return_value="fakekey") as mockgetkey: - with mock.patch.object(cc_apt_configure, - 'add_apt_key_raw') as mockadd: - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) - - mockgetkey.assert_called_with('03683F77', 'test.random.com') - mockadd.assert_called_with('fakekey', self.aptlistfile, hardened=False) - - # filename should be ignored on key only - self.assertFalse(os.path.isfile(self.aptlistfile)) - - def test_apt_v3_src_ppa(self): - """test_apt_v3_src_ppa - Test specification of a ppa""" - params = self._get_default_params() - cfg = {self.aptlistfile: {'source': 'ppa:smoser/cloud-init-test'}} - - with mock.patch("cloudinit.subp.subp") as mockobj: - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) - mockobj.assert_any_call(['add-apt-repository', - 'ppa:smoser/cloud-init-test'], target=TARGET) - - # adding ppa should ignore filename (uses add-apt-repository) - self.assertFalse(os.path.isfile(self.aptlistfile)) - - def test_apt_v3_src_ppa_tri(self): - """test_apt_v3_src_ppa_tri - Test specification of multiple ppa's""" - params = self._get_default_params() - cfg = {self.aptlistfile: {'source': 'ppa:smoser/cloud-init-test'}, - self.aptlistfile2: {'source': 'ppa:smoser/cloud-init-test2'}, - self.aptlistfile3: {'source': 'ppa:smoser/cloud-init-test3'}} - - with mock.patch("cloudinit.subp.subp") as mockobj: - self._add_apt_sources(cfg, TARGET, template_params=params, - aa_repo_match=self.matcher) - calls = [call(['add-apt-repository', 'ppa:smoser/cloud-init-test'], - target=TARGET), - call(['add-apt-repository', 'ppa:smoser/cloud-init-test2'], - target=TARGET), - call(['add-apt-repository', 'ppa:smoser/cloud-init-test3'], - target=TARGET)] - mockobj.assert_has_calls(calls, any_order=True) - - # adding ppa should ignore all filenames (uses add-apt-repository) - self.assertFalse(os.path.isfile(self.aptlistfile)) - self.assertFalse(os.path.isfile(self.aptlistfile2)) - self.assertFalse(os.path.isfile(self.aptlistfile3)) - - @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture") - def test_apt_v3_list_rename(self, m_get_dpkg_architecture): - """test_apt_v3_list_rename - Test find mirror and apt list renaming""" - pre = "/var/lib/apt/lists" - # filenames are archive dependent - - arch = 's390x' - m_get_dpkg_architecture.return_value = arch - component = "ubuntu-ports" - archive = "ports.ubuntu.com" - - cfg = {'primary': [{'arches': ["default"], - 'uri': - 'http://test.ubuntu.com/%s/' % component}], - 'security': [{'arches': ["default"], - 'uri': - 'http://testsec.ubuntu.com/%s/' % component}]} - post = ("%s_dists_%s-updates_InRelease" % - (component, MOCK_LSB_RELEASE_DATA['codename'])) - fromfn = ("%s/%s_%s" % (pre, archive, post)) - tofn = ("%s/test.ubuntu.com_%s" % (pre, post)) - - mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(), arch) - - self.assertEqual(mirrors['MIRROR'], - "http://test.ubuntu.com/%s/" % component) - self.assertEqual(mirrors['PRIMARY'], - "http://test.ubuntu.com/%s/" % component) - self.assertEqual(mirrors['SECURITY'], - "http://testsec.ubuntu.com/%s/" % component) - - with mock.patch.object(os, 'rename') as mockren: - with mock.patch.object(glob, 'glob', - return_value=[fromfn]): - cc_apt_configure.rename_apt_lists(mirrors, TARGET, arch) - - mockren.assert_any_call(fromfn, tofn) - - @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture") - def test_apt_v3_list_rename_non_slash(self, m_get_dpkg_architecture): - target = os.path.join(self.tmp, "rename_non_slash") - apt_lists_d = os.path.join(target, "./" + cc_apt_configure.APT_LISTS) - - arch = 'amd64' - m_get_dpkg_architecture.return_value = arch - - mirror_path = "some/random/path/" - primary = "http://test.ubuntu.com/" + mirror_path - security = "http://test-security.ubuntu.com/" + mirror_path - mirrors = {'PRIMARY': primary, 'SECURITY': security} - - # these match default archive prefixes - opri_pre = "archive.ubuntu.com_ubuntu_dists_xenial" - osec_pre = "security.ubuntu.com_ubuntu_dists_xenial" - # this one won't match and should not be renamed defaults. - other_pre = "dl.google.com_linux_chrome_deb_dists_stable" - # these are our new expected prefixes - npri_pre = "test.ubuntu.com_some_random_path_dists_xenial" - nsec_pre = "test-security.ubuntu.com_some_random_path_dists_xenial" - - files = [ - # orig prefix, new prefix, suffix - (opri_pre, npri_pre, "_main_binary-amd64_Packages"), - (opri_pre, npri_pre, "_main_binary-amd64_InRelease"), - (opri_pre, npri_pre, "-updates_main_binary-amd64_Packages"), - (opri_pre, npri_pre, "-updates_main_binary-amd64_InRelease"), - (other_pre, other_pre, "_main_binary-amd64_Packages"), - (other_pre, other_pre, "_Release"), - (other_pre, other_pre, "_Release.gpg"), - (osec_pre, nsec_pre, "_InRelease"), - (osec_pre, nsec_pre, "_main_binary-amd64_Packages"), - (osec_pre, nsec_pre, "_universe_binary-amd64_Packages"), - ] - - expected = sorted([npre + suff for opre, npre, suff in files]) - # create files - for (opre, _npre, suff) in files: - fpath = os.path.join(apt_lists_d, opre + suff) - util.write_file(fpath, content=fpath) - - cc_apt_configure.rename_apt_lists(mirrors, target, arch) - found = sorted(os.listdir(apt_lists_d)) - self.assertEqual(expected, found) - - @staticmethod - def test_apt_v3_proxy(): - """test_apt_v3_proxy - Test apt_*proxy configuration""" - cfg = {"proxy": "foobar1", - "http_proxy": "foobar2", - "ftp_proxy": "foobar3", - "https_proxy": "foobar4"} - - with mock.patch.object(util, 'write_file') as mockobj: - cc_apt_configure.apply_apt_config(cfg, "proxyfn", "notused") - - mockobj.assert_called_with('proxyfn', - ('Acquire::http::Proxy "foobar1";\n' - 'Acquire::http::Proxy "foobar2";\n' - 'Acquire::ftp::Proxy "foobar3";\n' - 'Acquire::https::Proxy "foobar4";\n')) - - def test_apt_v3_mirror(self): - """test_apt_v3_mirror - Test defining a mirror""" - pmir = "http://us.archive.ubuntu.com/ubuntu/" - smir = "http://security.ubuntu.com/ubuntu/" - cfg = {"primary": [{'arches': ["default"], - "uri": pmir}], - "security": [{'arches': ["default"], - "uri": smir}]} - - mirrors = cc_apt_configure.find_apt_mirror_info( - cfg, FakeCloud(), 'amd64') - - self.assertEqual(mirrors['MIRROR'], - pmir) - self.assertEqual(mirrors['PRIMARY'], - pmir) - self.assertEqual(mirrors['SECURITY'], - smir) - - def test_apt_v3_mirror_default(self): - """test_apt_v3_mirror_default - Test without defining a mirror""" - arch = 'amd64' - default_mirrors = cc_apt_configure.get_default_mirrors(arch) - pmir = default_mirrors["PRIMARY"] - smir = default_mirrors["SECURITY"] - mycloud = get_cloud() - mirrors = cc_apt_configure.find_apt_mirror_info({}, mycloud, arch) - - self.assertEqual(mirrors['MIRROR'], - pmir) - self.assertEqual(mirrors['PRIMARY'], - pmir) - self.assertEqual(mirrors['SECURITY'], - smir) - - def test_apt_v3_mirror_arches(self): - """test_apt_v3_mirror_arches - Test arches selection of mirror""" - pmir = "http://my-primary.ubuntu.com/ubuntu/" - smir = "http://my-security.ubuntu.com/ubuntu/" - arch = 'ppc64el' - cfg = {"primary": [{'arches': ["default"], "uri": "notthis-primary"}, - {'arches': [arch], "uri": pmir}], - "security": [{'arches': ["default"], "uri": "nothis-security"}, - {'arches': [arch], "uri": smir}]} - - mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(), arch) - - self.assertEqual(mirrors['PRIMARY'], pmir) - self.assertEqual(mirrors['MIRROR'], pmir) - self.assertEqual(mirrors['SECURITY'], smir) - - def test_apt_v3_mirror_arches_default(self): - """test_apt_v3_mirror_arches - Test falling back to default arch""" - pmir = "http://us.archive.ubuntu.com/ubuntu/" - smir = "http://security.ubuntu.com/ubuntu/" - cfg = {"primary": [{'arches': ["default"], - "uri": pmir}, - {'arches': ["thisarchdoesntexist"], - "uri": "notthis"}], - "security": [{'arches': ["thisarchdoesntexist"], - "uri": "nothat"}, - {'arches': ["default"], - "uri": smir}]} - - mirrors = cc_apt_configure.find_apt_mirror_info( - cfg, FakeCloud(), 'amd64') - - self.assertEqual(mirrors['MIRROR'], - pmir) - self.assertEqual(mirrors['PRIMARY'], - pmir) - self.assertEqual(mirrors['SECURITY'], - smir) - - @mock.patch("cloudinit.config.cc_apt_configure.util.get_dpkg_architecture") - def test_apt_v3_get_def_mir_non_intel_no_arch( - self, m_get_dpkg_architecture - ): - arch = 'ppc64el' - m_get_dpkg_architecture.return_value = arch - expected = {'PRIMARY': 'http://ports.ubuntu.com/ubuntu-ports', - 'SECURITY': 'http://ports.ubuntu.com/ubuntu-ports'} - self.assertEqual(expected, cc_apt_configure.get_default_mirrors()) - - def test_apt_v3_get_default_mirrors_non_intel_with_arch(self): - found = cc_apt_configure.get_default_mirrors('ppc64el') - - expected = {'PRIMARY': 'http://ports.ubuntu.com/ubuntu-ports', - 'SECURITY': 'http://ports.ubuntu.com/ubuntu-ports'} - self.assertEqual(expected, found) - - def test_apt_v3_mirror_arches_sysdefault(self): - """test_apt_v3_mirror_arches - Test arches fallback to sys default""" - arch = 'amd64' - default_mirrors = cc_apt_configure.get_default_mirrors(arch) - pmir = default_mirrors["PRIMARY"] - smir = default_mirrors["SECURITY"] - mycloud = get_cloud() - cfg = {"primary": [{'arches': ["thisarchdoesntexist_64"], - "uri": "notthis"}, - {'arches': ["thisarchdoesntexist"], - "uri": "notthiseither"}], - "security": [{'arches': ["thisarchdoesntexist"], - "uri": "nothat"}, - {'arches': ["thisarchdoesntexist_64"], - "uri": "nothateither"}]} - - mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch) - - self.assertEqual(mirrors['MIRROR'], pmir) - self.assertEqual(mirrors['PRIMARY'], pmir) - self.assertEqual(mirrors['SECURITY'], smir) - - def test_apt_v3_mirror_search(self): - """test_apt_v3_mirror_search - Test searching mirrors in a list - mock checks to avoid relying on network connectivity""" - pmir = "http://us.archive.ubuntu.com/ubuntu/" - smir = "http://security.ubuntu.com/ubuntu/" - cfg = {"primary": [{'arches': ["default"], - "search": ["pfailme", pmir]}], - "security": [{'arches': ["default"], - "search": ["sfailme", smir]}]} - - with mock.patch.object(cc_apt_configure.util, 'search_for_mirror', - side_effect=[pmir, smir]) as mocksearch: - mirrors = cc_apt_configure.find_apt_mirror_info(cfg, FakeCloud(), - 'amd64') - - calls = [call(["pfailme", pmir]), - call(["sfailme", smir])] - mocksearch.assert_has_calls(calls) - - self.assertEqual(mirrors['MIRROR'], - pmir) - self.assertEqual(mirrors['PRIMARY'], - pmir) - self.assertEqual(mirrors['SECURITY'], - smir) - - def test_apt_v3_mirror_search_many2(self): - """test_apt_v3_mirror_search_many3 - Test both mirrors specs at once""" - pmir = "http://us.archive.ubuntu.com/ubuntu/" - smir = "http://security.ubuntu.com/ubuntu/" - cfg = {"primary": [{'arches': ["default"], - "uri": pmir, - "search": ["pfailme", "foo"]}], - "security": [{'arches': ["default"], - "uri": smir, - "search": ["sfailme", "bar"]}]} - - arch = 'amd64' - - # should be called only once per type, despite two mirror configs - mycloud = None - with mock.patch.object(cc_apt_configure, 'get_mirror', - return_value="http://mocked/foo") as mockgm: - mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch) - calls = [call(cfg, 'primary', arch, mycloud), - call(cfg, 'security', arch, mycloud)] - mockgm.assert_has_calls(calls) - - # should not be called, since primary is specified - with mock.patch.object(cc_apt_configure.util, - 'search_for_mirror') as mockse: - mirrors = cc_apt_configure.find_apt_mirror_info( - cfg, FakeCloud(), arch) - mockse.assert_not_called() - - self.assertEqual(mirrors['MIRROR'], - pmir) - self.assertEqual(mirrors['PRIMARY'], - pmir) - self.assertEqual(mirrors['SECURITY'], - smir) - - def test_apt_v3_url_resolvable(self): - """test_apt_v3_url_resolvable - Test resolving urls""" - - with mock.patch.object(util, 'is_resolvable') as mockresolve: - util.is_resolvable_url("http://1.2.3.4/ubuntu") - mockresolve.assert_called_with("1.2.3.4") - - with mock.patch.object(util, 'is_resolvable') as mockresolve: - util.is_resolvable_url("http://us.archive.ubuntu.com/ubuntu") - mockresolve.assert_called_with("us.archive.ubuntu.com") - - # former tests can leave this set (or not if the test is ran directly) - # do a hard reset to ensure a stable result - util._DNS_REDIRECT_IP = None - bad = [(None, None, None, "badname", ["10.3.2.1"])] - good = [(None, None, None, "goodname", ["10.2.3.4"])] - with mock.patch.object(socket, 'getaddrinfo', - side_effect=[bad, bad, bad, good, - good]) as mocksock: - ret = util.is_resolvable_url("http://us.archive.ubuntu.com/ubuntu") - ret2 = util.is_resolvable_url("http://1.2.3.4/ubuntu") - mocksock.assert_any_call('does-not-exist.example.com.', None, - 0, 0, 1, 2) - mocksock.assert_any_call('example.invalid.', None, 0, 0, 1, 2) - mocksock.assert_any_call('us.archive.ubuntu.com', None) - mocksock.assert_any_call('1.2.3.4', None) - - self.assertTrue(ret) - self.assertTrue(ret2) - - # side effect need only bad ret after initial call - with mock.patch.object(socket, 'getaddrinfo', - side_effect=[bad]) as mocksock: - ret3 = util.is_resolvable_url("http://failme.com/ubuntu") - calls = [call('failme.com', None)] - mocksock.assert_has_calls(calls) - self.assertFalse(ret3) - - def test_apt_v3_disable_suites(self): - """test_disable_suites - disable_suites with many configurations""" - release = "xenial" - orig = """deb http://ubuntu.com//ubuntu xenial main -deb http://ubuntu.com//ubuntu xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://ubuntu.com/ubuntu/ xenial-proposed main""" - - # disable nothing - disabled = [] - expect = """deb http://ubuntu.com//ubuntu xenial main -deb http://ubuntu.com//ubuntu xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://ubuntu.com/ubuntu/ xenial-proposed main""" - result = cc_apt_configure.disable_suites(disabled, orig, release) - self.assertEqual(expect, result) - - # single disable release suite - disabled = ["$RELEASE"] - expect = """\ -# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu xenial main -deb http://ubuntu.com//ubuntu xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://ubuntu.com/ubuntu/ xenial-proposed main""" - result = cc_apt_configure.disable_suites(disabled, orig, release) - self.assertEqual(expect, result) - - # single disable other suite - disabled = ["$RELEASE-updates"] - expect = ("""deb http://ubuntu.com//ubuntu xenial main -# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu""" - """ xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://ubuntu.com/ubuntu/ xenial-proposed main""") - result = cc_apt_configure.disable_suites(disabled, orig, release) - self.assertEqual(expect, result) - - # multi disable - disabled = ["$RELEASE-updates", "$RELEASE-security"] - expect = ("""deb http://ubuntu.com//ubuntu xenial main -# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ - """xenial-updates main -# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ - """xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://ubuntu.com/ubuntu/ xenial-proposed main""") - result = cc_apt_configure.disable_suites(disabled, orig, release) - self.assertEqual(expect, result) - - # multi line disable (same suite multiple times in input) - disabled = ["$RELEASE-updates", "$RELEASE-security"] - orig = """deb http://ubuntu.com//ubuntu xenial main -deb http://ubuntu.com//ubuntu xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://UBUNTU.com//ubuntu xenial-updates main -deb http://UBUNTU.COM//ubuntu xenial-updates main -deb http://ubuntu.com/ubuntu/ xenial-proposed main""" - expect = ("""deb http://ubuntu.com//ubuntu xenial main -# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ - """xenial-updates main -# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ - """xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -# suite disabled by cloud-init: deb http://UBUNTU.com//ubuntu """ - """xenial-updates main -# suite disabled by cloud-init: deb http://UBUNTU.COM//ubuntu """ - """xenial-updates main -deb http://ubuntu.com/ubuntu/ xenial-proposed main""") - result = cc_apt_configure.disable_suites(disabled, orig, release) - self.assertEqual(expect, result) - - # comment in input - disabled = ["$RELEASE-updates", "$RELEASE-security"] - orig = """deb http://ubuntu.com//ubuntu xenial main -deb http://ubuntu.com//ubuntu xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -#foo -#deb http://UBUNTU.com//ubuntu xenial-updates main -deb http://UBUNTU.COM//ubuntu xenial-updates main -deb http://ubuntu.com/ubuntu/ xenial-proposed main""" - expect = ("""deb http://ubuntu.com//ubuntu xenial main -# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ - """xenial-updates main -# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ - """xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -#foo -#deb http://UBUNTU.com//ubuntu xenial-updates main -# suite disabled by cloud-init: deb http://UBUNTU.COM//ubuntu """ - """xenial-updates main -deb http://ubuntu.com/ubuntu/ xenial-proposed main""") - result = cc_apt_configure.disable_suites(disabled, orig, release) - self.assertEqual(expect, result) - - # single disable custom suite - disabled = ["foobar"] - orig = """deb http://ubuntu.com//ubuntu xenial main -deb http://ubuntu.com//ubuntu xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb http://ubuntu.com/ubuntu/ foobar main""" - expect = """deb http://ubuntu.com//ubuntu xenial main -deb http://ubuntu.com//ubuntu xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -# suite disabled by cloud-init: deb http://ubuntu.com/ubuntu/ foobar main""" - result = cc_apt_configure.disable_suites(disabled, orig, release) - self.assertEqual(expect, result) - - # single disable non existing suite - disabled = ["foobar"] - orig = """deb http://ubuntu.com//ubuntu xenial main -deb http://ubuntu.com//ubuntu xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb http://ubuntu.com/ubuntu/ notfoobar main""" - expect = """deb http://ubuntu.com//ubuntu xenial main -deb http://ubuntu.com//ubuntu xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb http://ubuntu.com/ubuntu/ notfoobar main""" - result = cc_apt_configure.disable_suites(disabled, orig, release) - self.assertEqual(expect, result) - - # single disable suite with option - disabled = ["$RELEASE-updates"] - orig = """deb http://ubuntu.com//ubuntu xenial main -deb [a=b] http://ubu.com//ubu xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://ubuntu.com/ubuntu/ xenial-proposed main""" - expect = ("""deb http://ubuntu.com//ubuntu xenial main -# suite disabled by cloud-init: deb [a=b] http://ubu.com//ubu """ - """xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://ubuntu.com/ubuntu/ xenial-proposed main""") - result = cc_apt_configure.disable_suites(disabled, orig, release) - self.assertEqual(expect, result) - - # single disable suite with more options and auto $RELEASE expansion - disabled = ["updates"] - orig = """deb http://ubuntu.com//ubuntu xenial main -deb [a=b c=d] http://ubu.com//ubu xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://ubuntu.com/ubuntu/ xenial-proposed main""" - expect = """deb http://ubuntu.com//ubuntu xenial main -# suite disabled by cloud-init: deb [a=b c=d] \ -http://ubu.com//ubu xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://ubuntu.com/ubuntu/ xenial-proposed main""" - result = cc_apt_configure.disable_suites(disabled, orig, release) - self.assertEqual(expect, result) - - # single disable suite while options at others - disabled = ["$RELEASE-security"] - orig = """deb http://ubuntu.com//ubuntu xenial main -deb [arch=foo] http://ubuntu.com//ubuntu xenial-updates main -deb http://ubuntu.com//ubuntu xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://ubuntu.com/ubuntu/ xenial-proposed main""" - expect = ("""deb http://ubuntu.com//ubuntu xenial main -deb [arch=foo] http://ubuntu.com//ubuntu xenial-updates main -# suite disabled by cloud-init: deb http://ubuntu.com//ubuntu """ - """xenial-security main -deb-src http://ubuntu.com//ubuntu universe multiverse -deb http://ubuntu.com/ubuntu/ xenial-proposed main""") - result = cc_apt_configure.disable_suites(disabled, orig, release) - self.assertEqual(expect, result) - - def test_disable_suites_blank_lines(self): - """test_disable_suites_blank_lines - ensure blank lines allowed""" - lines = ["deb %(repo)s %(rel)s main universe", - "", - "deb %(repo)s %(rel)s-updates main universe", - " # random comment", - "#comment here", - ""] - rel = "trusty" - repo = 'http://example.com/mirrors/ubuntu' - orig = "\n".join(lines) % {'repo': repo, 'rel': rel} - self.assertEqual( - orig, cc_apt_configure.disable_suites(["proposed"], orig, rel)) - - @mock.patch("cloudinit.util.get_hostname", return_value='abc.localdomain') - def test_apt_v3_mirror_search_dns(self, m_get_hostname): - """test_apt_v3_mirror_search_dns - Test searching dns patterns""" - pmir = "phit" - smir = "shit" - arch = 'amd64' - mycloud = get_cloud('ubuntu') - cfg = {"primary": [{'arches': ["default"], - "search_dns": True}], - "security": [{'arches': ["default"], - "search_dns": True}]} - - with mock.patch.object(cc_apt_configure, 'get_mirror', - return_value="http://mocked/foo") as mockgm: - mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch) - calls = [call(cfg, 'primary', arch, mycloud), - call(cfg, 'security', arch, mycloud)] - mockgm.assert_has_calls(calls) - - with mock.patch.object(cc_apt_configure, 'search_for_mirror_dns', - return_value="http://mocked/foo") as mocksdns: - mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch) - calls = [call(True, 'primary', cfg, mycloud), - call(True, 'security', cfg, mycloud)] - mocksdns.assert_has_calls(calls) - - # first return is for the non-dns call before - with mock.patch.object(cc_apt_configure.util, 'search_for_mirror', - side_effect=[None, pmir, None, smir]) as mockse: - mirrors = cc_apt_configure.find_apt_mirror_info(cfg, mycloud, arch) - - calls = [call(None), - call(['http://ubuntu-mirror.localdomain/ubuntu', - 'http://ubuntu-mirror/ubuntu']), - call(None), - call(['http://ubuntu-security-mirror.localdomain/ubuntu', - 'http://ubuntu-security-mirror/ubuntu'])] - mockse.assert_has_calls(calls) - - self.assertEqual(mirrors['MIRROR'], - pmir) - self.assertEqual(mirrors['PRIMARY'], - pmir) - self.assertEqual(mirrors['SECURITY'], - smir) - - def test_apt_v3_add_mirror_keys(self): - """test_apt_v3_add_mirror_keys - Test adding key for mirrors""" - arch = 'amd64' - cfg = { - 'primary': [ - {'arches': [arch], - 'uri': 'http://test.ubuntu.com/', - 'filename': 'primary', - 'key': 'fakekey_primary'}], - 'security': [ - {'arches': [arch], - 'uri': 'http://testsec.ubuntu.com/', - 'filename': 'security', - 'key': 'fakekey_security'}] - } - - with mock.patch.object(cc_apt_configure, - 'add_apt_key_raw') as mockadd: - cc_apt_configure.add_mirror_keys(cfg, TARGET) - calls = [ - mock.call('fakekey_primary', 'primary', hardened=False), - mock.call('fakekey_security', 'security', hardened=False), - ] - mockadd.assert_has_calls(calls, any_order=True) - - -class TestDebconfSelections(TestCase): - - @mock.patch("cloudinit.config.cc_apt_configure.subp.subp") - def test_set_sel_appends_newline_if_absent(self, m_subp): - """Automatically append a newline to debconf-set-selections config.""" - selections = b'some/setting boolean true' - cc_apt_configure.debconf_set_selections(selections=selections) - cc_apt_configure.debconf_set_selections(selections=selections + b'\n') - m_call = mock.call( - ['debconf-set-selections'], data=selections + b'\n', capture=True, - target=None) - self.assertEqual([m_call, m_call], m_subp.call_args_list) - - @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections") - def test_no_set_sel_if_none_to_set(self, m_set_sel): - cc_apt_configure.apply_debconf_selections({'foo': 'bar'}) - m_set_sel.assert_not_called() - - @mock.patch("cloudinit.config.cc_apt_configure." - "debconf_set_selections") - @mock.patch("cloudinit.config.cc_apt_configure." - "util.get_installed_packages") - def test_set_sel_call_has_expected_input(self, m_get_inst, m_set_sel): - data = { - 'set1': 'pkga pkga/q1 mybool false', - 'set2': ('pkgb\tpkgb/b1\tstr\tthis is a string\n' - 'pkgc\tpkgc/ip\tstring\t10.0.0.1')} - lines = '\n'.join(data.values()).split('\n') - - m_get_inst.return_value = ["adduser", "apparmor"] - m_set_sel.return_value = None - - cc_apt_configure.apply_debconf_selections({'debconf_selections': data}) - self.assertTrue(m_get_inst.called) - self.assertEqual(m_set_sel.call_count, 1) - - # assumes called with *args value. - selections = m_set_sel.call_args_list[0][0][0].decode() - - missing = [ - line for line in lines if line not in selections.splitlines() - ] - self.assertEqual([], missing) - - @mock.patch("cloudinit.config.cc_apt_configure.dpkg_reconfigure") - @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections") - @mock.patch("cloudinit.config.cc_apt_configure." - "util.get_installed_packages") - def test_reconfigure_if_intersection(self, m_get_inst, m_set_sel, - m_dpkg_r): - data = { - 'set1': 'pkga pkga/q1 mybool false', - 'set2': ('pkgb\tpkgb/b1\tstr\tthis is a string\n' - 'pkgc\tpkgc/ip\tstring\t10.0.0.1'), - 'cloud-init': ('cloud-init cloud-init/datasources' - 'multiselect MAAS')} - - m_set_sel.return_value = None - m_get_inst.return_value = ["adduser", "apparmor", "pkgb", - "cloud-init", 'zdog'] - - cc_apt_configure.apply_debconf_selections({'debconf_selections': data}) - - # reconfigure should be called with the intersection - # of (packages in config, packages installed) - self.assertEqual(m_dpkg_r.call_count, 1) - # assumes called with *args (dpkg_reconfigure([a,b,c], target=)) - packages = m_dpkg_r.call_args_list[0][0][0] - self.assertEqual(set(['cloud-init', 'pkgb']), set(packages)) - - @mock.patch("cloudinit.config.cc_apt_configure.dpkg_reconfigure") - @mock.patch("cloudinit.config.cc_apt_configure.debconf_set_selections") - @mock.patch("cloudinit.config.cc_apt_configure." - "util.get_installed_packages") - def test_reconfigure_if_no_intersection(self, m_get_inst, m_set_sel, - m_dpkg_r): - data = {'set1': 'pkga pkga/q1 mybool false'} - - m_get_inst.return_value = ["adduser", "apparmor", "pkgb", - "cloud-init", 'zdog'] - m_set_sel.return_value = None - - cc_apt_configure.apply_debconf_selections({'debconf_selections': data}) - - self.assertTrue(m_get_inst.called) - self.assertEqual(m_dpkg_r.call_count, 0) - - @mock.patch("cloudinit.config.cc_apt_configure.subp.subp") - def test_dpkg_reconfigure_does_reconfigure(self, m_subp): - target = "/foo-target" - - # due to the way the cleaners are called (via dictionary reference) - # mocking clean_cloud_init directly does not work. So we mock - # the CONFIG_CLEANERS dictionary and assert our cleaner is called. - ci_cleaner = mock.MagicMock() - with mock.patch.dict(("cloudinit.config.cc_apt_configure." - "CONFIG_CLEANERS"), - values={'cloud-init': ci_cleaner}, clear=True): - cc_apt_configure.dpkg_reconfigure(['pkga', 'cloud-init'], - target=target) - # cloud-init is actually the only package we have a cleaner for - # so for now, its the only one that should reconfigured - self.assertTrue(m_subp.called) - ci_cleaner.assert_called_with(target) - self.assertEqual(m_subp.call_count, 1) - found = m_subp.call_args_list[0][0][0] - expected = ['dpkg-reconfigure', '--frontend=noninteractive', - 'cloud-init'] - self.assertEqual(expected, found) - - @mock.patch("cloudinit.config.cc_apt_configure.subp.subp") - def test_dpkg_reconfigure_not_done_on_no_data(self, m_subp): - cc_apt_configure.dpkg_reconfigure([]) - m_subp.assert_not_called() - - @mock.patch("cloudinit.config.cc_apt_configure.subp.subp") - def test_dpkg_reconfigure_not_done_if_no_cleaners(self, m_subp): - cc_apt_configure.dpkg_reconfigure(['pkgfoo', 'pkgbar']) - m_subp.assert_not_called() - -# -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_bootcmd.py b/tests/unittests/test_handler/test_handler_bootcmd.py deleted file mode 100644 index 8cd3a5e1..00000000 --- a/tests/unittests/test_handler/test_handler_bootcmd.py +++ /dev/null @@ -1,152 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -import logging -import tempfile - -from cloudinit.config.cc_bootcmd import handle, schema -from cloudinit import (subp, util) -from cloudinit.tests.helpers import ( - CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema) - -from tests.unittests.util import get_cloud - -LOG = logging.getLogger(__name__) - - -class FakeExtendedTempFile(object): - def __init__(self, suffix): - self.suffix = suffix - self.handle = tempfile.NamedTemporaryFile( - prefix="ci-%s." % self.__class__.__name__, delete=False) - - def __enter__(self): - return self.handle - - def __exit__(self, exc_type, exc_value, traceback): - self.handle.close() - util.del_file(self.handle.name) - - -class TestBootcmd(CiTestCase): - - with_logs = True - - _etmpfile_path = ('cloudinit.config.cc_bootcmd.temp_utils.' - 'ExtendedTemporaryFile') - - def setUp(self): - super(TestBootcmd, self).setUp() - self.subp = subp.subp - self.new_root = self.tmp_dir() - - def test_handler_skip_if_no_bootcmd(self): - """When the provided config doesn't contain bootcmd, skip it.""" - cfg = {} - mycloud = get_cloud() - handle('notimportant', cfg, mycloud, LOG, None) - self.assertIn( - "Skipping module named notimportant, no 'bootcmd' key", - self.logs.getvalue()) - - def test_handler_invalid_command_set(self): - """Commands which can't be converted to shell will raise errors.""" - invalid_config = {'bootcmd': 1} - cc = get_cloud() - with self.assertRaises(TypeError) as context_manager: - handle('cc_bootcmd', invalid_config, cc, LOG, []) - self.assertIn('Failed to shellify bootcmd', self.logs.getvalue()) - self.assertEqual( - "Input to shellify was type 'int'. Expected list or tuple.", - str(context_manager.exception)) - - @skipUnlessJsonSchema() - def test_handler_schema_validation_warns_non_array_type(self): - """Schema validation warns of non-array type for bootcmd key. - - Schema validation is not strict, so bootcmd attempts to shellify the - invalid content. - """ - invalid_config = {'bootcmd': 1} - cc = get_cloud() - with self.assertRaises(TypeError): - handle('cc_bootcmd', invalid_config, cc, LOG, []) - self.assertIn( - 'Invalid config:\nbootcmd: 1 is not of type \'array\'', - self.logs.getvalue()) - self.assertIn('Failed to shellify', self.logs.getvalue()) - - @skipUnlessJsonSchema() - def test_handler_schema_validation_warns_non_array_item_type(self): - """Schema validation warns of non-array or string bootcmd items. - - Schema validation is not strict, so bootcmd attempts to shellify the - invalid content. - """ - invalid_config = { - 'bootcmd': ['ls /', 20, ['wget', 'http://stuff/blah'], {'a': 'n'}]} - cc = get_cloud() - with self.assertRaises(TypeError) as context_manager: - handle('cc_bootcmd', invalid_config, cc, LOG, []) - expected_warnings = [ - 'bootcmd.1: 20 is not valid under any of the given schemas', - 'bootcmd.3: {\'a\': \'n\'} is not valid under any of the given' - ' schema' - ] - logs = self.logs.getvalue() - for warning in expected_warnings: - self.assertIn(warning, logs) - self.assertIn('Failed to shellify', logs) - self.assertEqual( - ("Unable to shellify type 'int'. Expected list, string, tuple. " - "Got: 20"), - str(context_manager.exception)) - - def test_handler_creates_and_runs_bootcmd_script_with_instance_id(self): - """Valid schema runs a bootcmd script with INSTANCE_ID in the env.""" - cc = get_cloud() - out_file = self.tmp_path('bootcmd.out', self.new_root) - my_id = "b6ea0f59-e27d-49c6-9f87-79f19765a425" - valid_config = {'bootcmd': [ - 'echo {0} $INSTANCE_ID > {1}'.format(my_id, out_file)]} - - with mock.patch(self._etmpfile_path, FakeExtendedTempFile): - with self.allow_subp(['/bin/sh']): - handle('cc_bootcmd', valid_config, cc, LOG, []) - self.assertEqual(my_id + ' iid-datasource-none\n', - util.load_file(out_file)) - - def test_handler_runs_bootcmd_script_with_error(self): - """When a valid script generates an error, that error is raised.""" - cc = get_cloud() - valid_config = {'bootcmd': ['exit 1']} # Script with error - - with mock.patch(self._etmpfile_path, FakeExtendedTempFile): - with self.allow_subp(['/bin/sh']): - with self.assertRaises(subp.ProcessExecutionError) as ctxt: - handle('does-not-matter', valid_config, cc, LOG, []) - self.assertIn( - 'Unexpected error while running command.\n' - "Command: ['/bin/sh',", - str(ctxt.exception)) - self.assertIn( - 'Failed to run bootcmd module does-not-matter', - self.logs.getvalue()) - - -@skipUnlessJsonSchema() -class TestSchema(CiTestCase, SchemaTestCaseMixin): - """Directly test schema rather than through handle.""" - - schema = schema - - def test_duplicates_are_fine_array_array(self): - """Duplicated commands array/array entries are allowed.""" - self.assertSchemaValid( - ["byebye", "byebye"], 'command entries can be duplicate') - - def test_duplicates_are_fine_array_string(self): - """Duplicated commands array/string entries are allowed.""" - self.assertSchemaValid( - ["echo bye", "echo bye"], "command entries can be duplicate.") - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py deleted file mode 100644 index 2a4ab49e..00000000 --- a/tests/unittests/test_handler/test_handler_ca_certs.py +++ /dev/null @@ -1,361 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -import logging -import shutil -import tempfile -import unittest -from contextlib import ExitStack -from unittest import mock - -from cloudinit import distros -from cloudinit.config import cc_ca_certs -from cloudinit import helpers -from cloudinit import subp -from cloudinit import util -from cloudinit.tests.helpers import TestCase - -from tests.unittests.util import get_cloud - - -class TestNoConfig(unittest.TestCase): - def setUp(self): - super(TestNoConfig, self).setUp() - self.name = "ca-certs" - self.cloud_init = None - self.log = logging.getLogger("TestNoConfig") - self.args = [] - - def test_no_config(self): - """ - Test that nothing is done if no ca-certs configuration is provided. - """ - config = util.get_builtin_cfg() - with ExitStack() as mocks: - util_mock = mocks.enter_context( - mock.patch.object(util, 'write_file')) - certs_mock = mocks.enter_context( - mock.patch.object(cc_ca_certs, 'update_ca_certs')) - - cc_ca_certs.handle(self.name, config, self.cloud_init, self.log, - self.args) - - self.assertEqual(util_mock.call_count, 0) - self.assertEqual(certs_mock.call_count, 0) - - -class TestConfig(TestCase): - def setUp(self): - super(TestConfig, self).setUp() - self.name = "ca-certs" - self.paths = None - self.log = logging.getLogger("TestNoConfig") - self.args = [] - - def _fetch_distro(self, kind): - cls = distros.fetch(kind) - paths = helpers.Paths({}) - return cls(kind, {}, paths) - - def _mock_init(self): - self.mocks = ExitStack() - self.addCleanup(self.mocks.close) - - # Mock out the functions that actually modify the system - self.mock_add = self.mocks.enter_context( - mock.patch.object(cc_ca_certs, 'add_ca_certs')) - self.mock_update = self.mocks.enter_context( - mock.patch.object(cc_ca_certs, 'update_ca_certs')) - self.mock_remove = self.mocks.enter_context( - mock.patch.object(cc_ca_certs, 'remove_default_ca_certs')) - - def test_no_trusted_list(self): - """ - Test that no certificates are written if the 'trusted' key is not - present. - """ - config = {"ca-certs": {}} - - for distro_name in cc_ca_certs.distros: - self._mock_init() - cloud = get_cloud(distro_name) - cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - - self.assertEqual(self.mock_add.call_count, 0) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 0) - - def test_empty_trusted_list(self): - """Test that no certificate are written if 'trusted' list is empty.""" - config = {"ca-certs": {"trusted": []}} - - for distro_name in cc_ca_certs.distros: - self._mock_init() - cloud = get_cloud(distro_name) - cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - - self.assertEqual(self.mock_add.call_count, 0) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 0) - - def test_single_trusted(self): - """Test that a single cert gets passed to add_ca_certs.""" - config = {"ca-certs": {"trusted": ["CERT1"]}} - - for distro_name in cc_ca_certs.distros: - self._mock_init() - cloud = get_cloud(distro_name) - conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - - self.mock_add.assert_called_once_with(conf, ['CERT1']) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 0) - - def test_multiple_trusted(self): - """Test that multiple certs get passed to add_ca_certs.""" - config = {"ca-certs": {"trusted": ["CERT1", "CERT2"]}} - - for distro_name in cc_ca_certs.distros: - self._mock_init() - cloud = get_cloud(distro_name) - conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - - self.mock_add.assert_called_once_with(conf, ['CERT1', 'CERT2']) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 0) - - def test_remove_default_ca_certs(self): - """Test remove_defaults works as expected.""" - config = {"ca-certs": {"remove-defaults": True}} - - for distro_name in cc_ca_certs.distros: - self._mock_init() - cloud = get_cloud(distro_name) - cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - - self.assertEqual(self.mock_add.call_count, 0) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 1) - - def test_no_remove_defaults_if_false(self): - """Test remove_defaults is not called when config value is False.""" - config = {"ca-certs": {"remove-defaults": False}} - - for distro_name in cc_ca_certs.distros: - self._mock_init() - cloud = get_cloud(distro_name) - cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - - self.assertEqual(self.mock_add.call_count, 0) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 0) - - def test_correct_order_for_remove_then_add(self): - """Test remove_defaults is not called when config value is False.""" - config = {"ca-certs": {"remove-defaults": True, "trusted": ["CERT1"]}} - - for distro_name in cc_ca_certs.distros: - self._mock_init() - cloud = get_cloud(distro_name) - conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - cc_ca_certs.handle(self.name, config, cloud, self.log, self.args) - - self.mock_add.assert_called_once_with(conf, ['CERT1']) - self.assertEqual(self.mock_update.call_count, 1) - self.assertEqual(self.mock_remove.call_count, 1) - - -class TestAddCaCerts(TestCase): - - def setUp(self): - super(TestAddCaCerts, self).setUp() - tmpdir = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, tmpdir) - self.paths = helpers.Paths({ - 'cloud_dir': tmpdir, - }) - self.add_patch("cloudinit.config.cc_ca_certs.os.stat", "m_stat") - - def _fetch_distro(self, kind): - cls = distros.fetch(kind) - paths = helpers.Paths({}) - return cls(kind, {}, paths) - - def test_no_certs_in_list(self): - """Test that no certificate are written if not provided.""" - for distro_name in cc_ca_certs.distros: - conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - with mock.patch.object(util, 'write_file') as mockobj: - cc_ca_certs.add_ca_certs(conf, []) - self.assertEqual(mockobj.call_count, 0) - - def test_single_cert_trailing_cr(self): - """Test adding a single certificate to the trusted CAs - when existing ca-certificates has trailing newline""" - cert = "CERT1\nLINE2\nLINE3" - - ca_certs_content = "line1\nline2\ncloud-init-ca-certs.crt\nline3\n" - expected = "line1\nline2\nline3\ncloud-init-ca-certs.crt\n" - - self.m_stat.return_value.st_size = 1 - - for distro_name in cc_ca_certs.distros: - conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - - with ExitStack() as mocks: - mock_write = mocks.enter_context( - mock.patch.object(util, 'write_file')) - mock_load = mocks.enter_context( - mock.patch.object(util, 'load_file', - return_value=ca_certs_content)) - - cc_ca_certs.add_ca_certs(conf, [cert]) - - mock_write.assert_has_calls([ - mock.call(conf['ca_cert_full_path'], - cert, mode=0o644)]) - if conf['ca_cert_config'] is not None: - mock_write.assert_has_calls([ - mock.call(conf['ca_cert_config'], - expected, omode="wb")]) - mock_load.assert_called_once_with(conf['ca_cert_config']) - - def test_single_cert_no_trailing_cr(self): - """Test adding a single certificate to the trusted CAs - when existing ca-certificates has no trailing newline""" - cert = "CERT1\nLINE2\nLINE3" - - ca_certs_content = "line1\nline2\nline3" - - self.m_stat.return_value.st_size = 1 - - for distro_name in cc_ca_certs.distros: - conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - - with ExitStack() as mocks: - mock_write = mocks.enter_context( - mock.patch.object(util, 'write_file')) - mock_load = mocks.enter_context( - mock.patch.object(util, 'load_file', - return_value=ca_certs_content)) - - cc_ca_certs.add_ca_certs(conf, [cert]) - - mock_write.assert_has_calls([ - mock.call(conf['ca_cert_full_path'], - cert, mode=0o644)]) - if conf['ca_cert_config'] is not None: - mock_write.assert_has_calls([ - mock.call(conf['ca_cert_config'], - "%s\n%s\n" % (ca_certs_content, - conf['ca_cert_filename']), - omode="wb")]) - - mock_load.assert_called_once_with(conf['ca_cert_config']) - - def test_single_cert_to_empty_existing_ca_file(self): - """Test adding a single certificate to the trusted CAs - when existing ca-certificates.conf is empty""" - cert = "CERT1\nLINE2\nLINE3" - - expected = "cloud-init-ca-certs.crt\n" - - self.m_stat.return_value.st_size = 0 - - for distro_name in cc_ca_certs.distros: - conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - with mock.patch.object(util, 'write_file', - autospec=True) as m_write: - - cc_ca_certs.add_ca_certs(conf, [cert]) - - m_write.assert_has_calls([ - mock.call(conf['ca_cert_full_path'], - cert, mode=0o644)]) - if conf['ca_cert_config'] is not None: - m_write.assert_has_calls([ - mock.call(conf['ca_cert_config'], - expected, omode="wb")]) - - def test_multiple_certs(self): - """Test adding multiple certificates to the trusted CAs.""" - certs = ["CERT1\nLINE2\nLINE3", "CERT2\nLINE2\nLINE3"] - expected_cert_file = "\n".join(certs) - ca_certs_content = "line1\nline2\nline3" - - self.m_stat.return_value.st_size = 1 - - for distro_name in cc_ca_certs.distros: - conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - - with ExitStack() as mocks: - mock_write = mocks.enter_context( - mock.patch.object(util, 'write_file')) - mock_load = mocks.enter_context( - mock.patch.object(util, 'load_file', - return_value=ca_certs_content)) - - cc_ca_certs.add_ca_certs(conf, certs) - - mock_write.assert_has_calls([ - mock.call(conf['ca_cert_full_path'], - expected_cert_file, mode=0o644)]) - if conf['ca_cert_config'] is not None: - mock_write.assert_has_calls([ - mock.call(conf['ca_cert_config'], - "%s\n%s\n" % (ca_certs_content, - conf['ca_cert_filename']), - omode='wb')]) - - mock_load.assert_called_once_with(conf['ca_cert_config']) - - -class TestUpdateCaCerts(unittest.TestCase): - def test_commands(self): - for distro_name in cc_ca_certs.distros: - conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - with mock.patch.object(subp, 'subp') as mockobj: - cc_ca_certs.update_ca_certs(conf) - mockobj.assert_called_once_with( - conf['ca_cert_update_cmd'], capture=False) - - -class TestRemoveDefaultCaCerts(TestCase): - - def setUp(self): - super(TestRemoveDefaultCaCerts, self).setUp() - tmpdir = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, tmpdir) - self.paths = helpers.Paths({ - 'cloud_dir': tmpdir, - }) - - def test_commands(self): - for distro_name in cc_ca_certs.distros: - conf = cc_ca_certs._distro_ca_certs_configs(distro_name) - - with ExitStack() as mocks: - mock_delete = mocks.enter_context( - mock.patch.object(util, 'delete_dir_contents')) - mock_write = mocks.enter_context( - mock.patch.object(util, 'write_file')) - mock_subp = mocks.enter_context( - mock.patch.object(subp, 'subp')) - - cc_ca_certs.remove_default_ca_certs(distro_name, conf) - - mock_delete.assert_has_calls([ - mock.call(conf['ca_cert_path']), - mock.call(conf['ca_cert_system_path'])]) - - if conf['ca_cert_config'] is not None: - mock_write.assert_called_once_with( - conf['ca_cert_config'], "", mode=0o644) - - if distro_name in ['debian', 'ubuntu']: - mock_subp.assert_called_once_with( - ('debconf-set-selections', '-'), - "ca-certificates \ -ca-certificates/trust_new_crts select no") - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_chef.py b/tests/unittests/test_handler/test_handler_chef.py deleted file mode 100644 index 0672cebc..00000000 --- a/tests/unittests/test_handler/test_handler_chef.py +++ /dev/null @@ -1,271 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import httpretty -import json -import logging -import os - -from cloudinit.config import cc_chef -from cloudinit import util - -from cloudinit.tests.helpers import ( - HttprettyTestCase, FilesystemMockingTestCase, mock, skipIf) - -from tests.unittests.util import get_cloud - -LOG = logging.getLogger(__name__) - -CLIENT_TEMPL = os.path.sep.join(["templates", "chef_client.rb.tmpl"]) - -# This is adjusted to use http because using with https causes issue -# in some openssl/httpretty combinations. -# https://github.com/gabrielfalcao/HTTPretty/issues/242 -# We saw issue in opensuse 42.3 with -# httpretty=0.8.8-7.1 ndg-httpsclient=0.4.0-3.2 pyOpenSSL=16.0.0-4.1 -OMNIBUS_URL_HTTP = cc_chef.OMNIBUS_URL.replace("https:", "http:") - - -class TestInstallChefOmnibus(HttprettyTestCase): - - def setUp(self): - super(TestInstallChefOmnibus, self).setUp() - self.new_root = self.tmp_dir() - - @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP) - def test_install_chef_from_omnibus_runs_chef_url_content(self): - """install_chef_from_omnibus calls subp_blob_in_tempfile.""" - response = b'#!/bin/bash\necho "Hi Mom"' - httpretty.register_uri( - httpretty.GET, cc_chef.OMNIBUS_URL, body=response, status=200) - ret = (None, None) # stdout, stderr but capture=False - - with mock.patch("cloudinit.config.cc_chef.subp_blob_in_tempfile", - return_value=ret) as m_subp_blob: - cc_chef.install_chef_from_omnibus() - # admittedly whitebox, but assuming subp_blob_in_tempfile works - # this should be fine. - self.assertEqual( - [mock.call(blob=response, args=[], basename='chef-omnibus-install', - capture=False)], - m_subp_blob.call_args_list) - - @mock.patch('cloudinit.config.cc_chef.url_helper.readurl') - @mock.patch('cloudinit.config.cc_chef.subp_blob_in_tempfile') - def test_install_chef_from_omnibus_retries_url(self, m_subp_blob, m_rdurl): - """install_chef_from_omnibus retries OMNIBUS_URL upon failure.""" - - class FakeURLResponse(object): - contents = '#!/bin/bash\necho "Hi Mom" > {0}/chef.out'.format( - self.new_root) - - m_rdurl.return_value = FakeURLResponse() - - cc_chef.install_chef_from_omnibus() - expected_kwargs = {'retries': cc_chef.OMNIBUS_URL_RETRIES, - 'url': cc_chef.OMNIBUS_URL} - self.assertCountEqual(expected_kwargs, m_rdurl.call_args_list[0][1]) - cc_chef.install_chef_from_omnibus(retries=10) - expected_kwargs = {'retries': 10, - 'url': cc_chef.OMNIBUS_URL} - self.assertCountEqual(expected_kwargs, m_rdurl.call_args_list[1][1]) - expected_subp_kwargs = { - 'args': ['-v', '2.0'], - 'basename': 'chef-omnibus-install', - 'blob': m_rdurl.return_value.contents, - 'capture': False - } - self.assertCountEqual( - expected_subp_kwargs, - m_subp_blob.call_args_list[0][1]) - - @mock.patch("cloudinit.config.cc_chef.OMNIBUS_URL", OMNIBUS_URL_HTTP) - @mock.patch('cloudinit.config.cc_chef.subp_blob_in_tempfile') - def test_install_chef_from_omnibus_has_omnibus_version(self, m_subp_blob): - """install_chef_from_omnibus provides version arg to OMNIBUS_URL.""" - chef_outfile = self.tmp_path('chef.out', self.new_root) - response = '#!/bin/bash\necho "Hi Mom" > {0}'.format(chef_outfile) - httpretty.register_uri( - httpretty.GET, cc_chef.OMNIBUS_URL, body=response) - cc_chef.install_chef_from_omnibus(omnibus_version='2.0') - - called_kwargs = m_subp_blob.call_args_list[0][1] - expected_kwargs = { - 'args': ['-v', '2.0'], - 'basename': 'chef-omnibus-install', - 'blob': response, - 'capture': False - } - self.assertCountEqual(expected_kwargs, called_kwargs) - - -class TestChef(FilesystemMockingTestCase): - - def setUp(self): - super(TestChef, self).setUp() - self.tmp = self.tmp_dir() - - def test_no_config(self): - self.patchUtils(self.tmp) - self.patchOS(self.tmp) - - cfg = {} - cc_chef.handle('chef', cfg, get_cloud(), LOG, []) - for d in cc_chef.CHEF_DIRS: - self.assertFalse(os.path.isdir(d)) - - @skipIf(not os.path.isfile(CLIENT_TEMPL), - CLIENT_TEMPL + " is not available") - def test_basic_config(self): - """ - test basic config looks sane - - # This should create a file of the format... - # Created by cloud-init v. 0.7.6 on Sat, 11 Oct 2014 23:57:21 +0000 - chef_license "accept" - log_level :info - ssl_verify_mode :verify_none - log_location "/var/log/chef/client.log" - validation_client_name "bob" - validation_key "/etc/chef/validation.pem" - client_key "/etc/chef/client.pem" - chef_server_url "localhost" - environment "_default" - node_name "iid-datasource-none" - json_attribs "/etc/chef/firstboot.json" - file_cache_path "/var/cache/chef" - file_backup_path "/var/backups/chef" - pid_file "/var/run/chef/client.pid" - Chef::Log::Formatter.show_time = true - encrypted_data_bag_secret "/etc/chef/encrypted_data_bag_secret" - """ - tpl_file = util.load_file('templates/chef_client.rb.tmpl') - self.patchUtils(self.tmp) - self.patchOS(self.tmp) - - util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file) - cfg = { - 'chef': { - 'chef_license': "accept", - 'server_url': 'localhost', - 'validation_name': 'bob', - 'validation_key': "/etc/chef/vkey.pem", - 'validation_cert': "this is my cert", - 'encrypted_data_bag_secret': - '/etc/chef/encrypted_data_bag_secret' - }, - } - cc_chef.handle('chef', cfg, get_cloud(), LOG, []) - for d in cc_chef.CHEF_DIRS: - self.assertTrue(os.path.isdir(d)) - c = util.load_file(cc_chef.CHEF_RB_PATH) - - # the content of these keys is not expected to be rendered to tmpl - unrendered_keys = ('validation_cert',) - for k, v in cfg['chef'].items(): - if k in unrendered_keys: - continue - self.assertIn(v, c) - for k, v in cc_chef.CHEF_RB_TPL_DEFAULTS.items(): - if k in unrendered_keys: - continue - # the value from the cfg overrides that in the default - val = cfg['chef'].get(k, v) - if isinstance(val, str): - self.assertIn(val, c) - c = util.load_file(cc_chef.CHEF_FB_PATH) - self.assertEqual({}, json.loads(c)) - - def test_firstboot_json(self): - self.patchUtils(self.tmp) - self.patchOS(self.tmp) - - cfg = { - 'chef': { - 'server_url': 'localhost', - 'validation_name': 'bob', - 'run_list': ['a', 'b', 'c'], - 'initial_attributes': { - 'c': 'd', - } - }, - } - cc_chef.handle('chef', cfg, get_cloud(), LOG, []) - c = util.load_file(cc_chef.CHEF_FB_PATH) - self.assertEqual( - { - 'run_list': ['a', 'b', 'c'], - 'c': 'd', - }, json.loads(c)) - - @skipIf(not os.path.isfile(CLIENT_TEMPL), - CLIENT_TEMPL + " is not available") - def test_template_deletes(self): - tpl_file = util.load_file('templates/chef_client.rb.tmpl') - self.patchUtils(self.tmp) - self.patchOS(self.tmp) - - util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file) - cfg = { - 'chef': { - 'server_url': 'localhost', - 'validation_name': 'bob', - 'json_attribs': None, - 'show_time': None, - }, - } - cc_chef.handle('chef', cfg, get_cloud(), LOG, []) - c = util.load_file(cc_chef.CHEF_RB_PATH) - self.assertNotIn('json_attribs', c) - self.assertNotIn('Formatter.show_time', c) - - @skipIf(not os.path.isfile(CLIENT_TEMPL), - CLIENT_TEMPL + " is not available") - def test_validation_cert_and_validation_key(self): - # test validation_cert content is written to validation_key path - tpl_file = util.load_file('templates/chef_client.rb.tmpl') - self.patchUtils(self.tmp) - self.patchOS(self.tmp) - - util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file) - v_path = '/etc/chef/vkey.pem' - v_cert = 'this is my cert' - cfg = { - 'chef': { - 'server_url': 'localhost', - 'validation_name': 'bob', - 'validation_key': v_path, - 'validation_cert': v_cert - }, - } - cc_chef.handle('chef', cfg, get_cloud(), LOG, []) - content = util.load_file(cc_chef.CHEF_RB_PATH) - self.assertIn(v_path, content) - util.load_file(v_path) - self.assertEqual(v_cert, util.load_file(v_path)) - - def test_validation_cert_with_system(self): - # test validation_cert content is not written over system file - tpl_file = util.load_file('templates/chef_client.rb.tmpl') - self.patchUtils(self.tmp) - self.patchOS(self.tmp) - - v_path = '/etc/chef/vkey.pem' - v_cert = "system" - expected_cert = "this is the system file certificate" - cfg = { - 'chef': { - 'server_url': 'localhost', - 'validation_name': 'bob', - 'validation_key': v_path, - 'validation_cert': v_cert - }, - } - util.write_file('/etc/cloud/templates/chef_client.rb.tmpl', tpl_file) - util.write_file(v_path, expected_cert) - cc_chef.handle('chef', cfg, get_cloud(), LOG, []) - content = util.load_file(cc_chef.CHEF_RB_PATH) - self.assertIn(v_path, content) - util.load_file(v_path) - self.assertEqual(expected_cert, util.load_file(v_path)) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_debug.py b/tests/unittests/test_handler/test_handler_debug.py deleted file mode 100644 index 41e9d9bd..00000000 --- a/tests/unittests/test_handler/test_handler_debug.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (C) 2014 Yahoo! Inc. -# -# This file is part of cloud-init. See LICENSE file for license information. -import logging -import shutil -import tempfile - -from cloudinit import util -from cloudinit.config import cc_debug -from cloudinit.tests.helpers import (FilesystemMockingTestCase, mock) - -from tests.unittests.util import get_cloud - -LOG = logging.getLogger(__name__) - - -@mock.patch('cloudinit.distros.debian.read_system_locale') -class TestDebug(FilesystemMockingTestCase): - def setUp(self): - super(TestDebug, self).setUp() - self.new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.new_root) - self.patchUtils(self.new_root) - - def test_debug_write(self, m_locale): - m_locale.return_value = 'en_US.UTF-8' - cfg = { - 'abc': '123', - 'c': '\u20a0', - 'debug': { - 'verbose': True, - # Does not actually write here due to mocking... - 'output': '/var/log/cloud-init-debug.log', - }, - } - cc = get_cloud() - cc_debug.handle('cc_debug', cfg, cc, LOG, []) - contents = util.load_file('/var/log/cloud-init-debug.log') - # Some basic sanity tests... - self.assertNotEqual(0, len(contents)) - for k in cfg.keys(): - self.assertIn(k, contents) - - def test_debug_no_write(self, m_locale): - m_locale.return_value = 'en_US.UTF-8' - cfg = { - 'abc': '123', - 'debug': { - 'verbose': False, - # Does not actually write here due to mocking... - 'output': '/var/log/cloud-init-debug.log', - }, - } - cc = get_cloud() - cc_debug.handle('cc_debug', cfg, cc, LOG, []) - self.assertRaises(IOError, - util.load_file, '/var/log/cloud-init-debug.log') - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_disk_setup.py b/tests/unittests/test_handler/test_handler_disk_setup.py deleted file mode 100644 index 4f4a57fa..00000000 --- a/tests/unittests/test_handler/test_handler_disk_setup.py +++ /dev/null @@ -1,243 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import random - -from cloudinit.config import cc_disk_setup -from cloudinit.tests.helpers import CiTestCase, ExitStack, mock, TestCase - - -class TestIsDiskUsed(TestCase): - - def setUp(self): - super(TestIsDiskUsed, self).setUp() - self.patches = ExitStack() - mod_name = 'cloudinit.config.cc_disk_setup' - self.enumerate_disk = self.patches.enter_context( - mock.patch('{0}.enumerate_disk'.format(mod_name))) - self.check_fs = self.patches.enter_context( - mock.patch('{0}.check_fs'.format(mod_name))) - - def tearDown(self): - super(TestIsDiskUsed, self).tearDown() - self.patches.close() - - def test_multiple_child_nodes_returns_true(self): - self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(2)) - self.check_fs.return_value = (mock.MagicMock(), None, mock.MagicMock()) - self.assertTrue(cc_disk_setup.is_disk_used(mock.MagicMock())) - - def test_valid_filesystem_returns_true(self): - self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(1)) - self.check_fs.return_value = ( - mock.MagicMock(), 'ext4', mock.MagicMock()) - self.assertTrue(cc_disk_setup.is_disk_used(mock.MagicMock())) - - def test_one_child_nodes_and_no_fs_returns_false(self): - self.enumerate_disk.return_value = (mock.MagicMock() for _ in range(1)) - self.check_fs.return_value = (mock.MagicMock(), None, mock.MagicMock()) - self.assertFalse(cc_disk_setup.is_disk_used(mock.MagicMock())) - - -class TestGetMbrHddSize(TestCase): - - def setUp(self): - super(TestGetMbrHddSize, self).setUp() - self.patches = ExitStack() - self.subp = self.patches.enter_context( - mock.patch.object(cc_disk_setup.subp, 'subp')) - - def tearDown(self): - super(TestGetMbrHddSize, self).tearDown() - self.patches.close() - - def _configure_subp_mock(self, hdd_size_in_bytes, sector_size_in_bytes): - def _subp(cmd, *args, **kwargs): - self.assertEqual(3, len(cmd)) - if '--getsize64' in cmd: - return hdd_size_in_bytes, None - elif '--getss' in cmd: - return sector_size_in_bytes, None - raise Exception('Unexpected blockdev command called') - - self.subp.side_effect = _subp - - def _test_for_sector_size(self, sector_size): - size_in_bytes = random.randint(10000, 10000000) * 512 - size_in_sectors = size_in_bytes / sector_size - self._configure_subp_mock(size_in_bytes, sector_size) - self.assertEqual(size_in_sectors, - cc_disk_setup.get_hdd_size('/dev/sda1')) - - def test_size_for_512_byte_sectors(self): - self._test_for_sector_size(512) - - def test_size_for_1024_byte_sectors(self): - self._test_for_sector_size(1024) - - def test_size_for_2048_byte_sectors(self): - self._test_for_sector_size(2048) - - def test_size_for_4096_byte_sectors(self): - self._test_for_sector_size(4096) - - -class TestGetPartitionMbrLayout(TestCase): - - def test_single_partition_using_boolean(self): - self.assertEqual('0,', - cc_disk_setup.get_partition_mbr_layout(1000, True)) - - def test_single_partition_using_list(self): - disk_size = random.randint(1000000, 1000000000000) - self.assertEqual( - ',,83', - cc_disk_setup.get_partition_mbr_layout(disk_size, [100])) - - def test_half_and_half(self): - disk_size = random.randint(1000000, 1000000000000) - expected_partition_size = int(float(disk_size) / 2) - self.assertEqual( - ',{0},83\n,,83'.format(expected_partition_size), - cc_disk_setup.get_partition_mbr_layout(disk_size, [50, 50])) - - def test_thirds_with_different_partition_type(self): - disk_size = random.randint(1000000, 1000000000000) - expected_partition_size = int(float(disk_size) * 0.33) - self.assertEqual( - ',{0},83\n,,82'.format(expected_partition_size), - cc_disk_setup.get_partition_mbr_layout(disk_size, [33, [66, 82]])) - - -class TestUpdateFsSetupDevices(TestCase): - def test_regression_1634678(self): - # Cf. https://bugs.launchpad.net/cloud-init/+bug/1634678 - fs_setup = { - 'partition': 'auto', - 'device': '/dev/xvdb1', - 'overwrite': False, - 'label': 'test', - 'filesystem': 'ext4' - } - - cc_disk_setup.update_fs_setup_devices([fs_setup], - lambda device: device) - - self.assertEqual({ - '_origname': '/dev/xvdb1', - 'partition': 'auto', - 'device': '/dev/xvdb1', - 'overwrite': False, - 'label': 'test', - 'filesystem': 'ext4' - }, fs_setup) - - def test_dotted_devname(self): - fs_setup = { - 'partition': 'auto', - 'device': 'ephemeral0.0', - 'label': 'test2', - 'filesystem': 'xfs' - } - - cc_disk_setup.update_fs_setup_devices([fs_setup], - lambda device: device) - - self.assertEqual({ - '_origname': 'ephemeral0.0', - '_partition': 'auto', - 'partition': '0', - 'device': 'ephemeral0', - 'label': 'test2', - 'filesystem': 'xfs' - }, fs_setup) - - def test_dotted_devname_populates_partition(self): - fs_setup = { - 'device': 'ephemeral0.1', - 'label': 'test2', - 'filesystem': 'xfs' - } - cc_disk_setup.update_fs_setup_devices([fs_setup], - lambda device: device) - self.assertEqual({ - '_origname': 'ephemeral0.1', - 'device': 'ephemeral0', - 'partition': '1', - 'label': 'test2', - 'filesystem': 'xfs' - }, fs_setup) - - -@mock.patch('cloudinit.config.cc_disk_setup.assert_and_settle_device', - return_value=None) -@mock.patch('cloudinit.config.cc_disk_setup.find_device_node', - return_value=('/dev/xdb1', False)) -@mock.patch('cloudinit.config.cc_disk_setup.device_type', return_value=None) -@mock.patch('cloudinit.config.cc_disk_setup.subp.subp', return_value=('', '')) -class TestMkfsCommandHandling(CiTestCase): - - with_logs = True - - def test_with_cmd(self, subp, *args): - """mkfs honors cmd and logs warnings when extra_opts or overwrite are - provided.""" - cc_disk_setup.mkfs({ - 'cmd': 'mkfs -t %(filesystem)s -L %(label)s %(device)s', - 'filesystem': 'ext4', - 'device': '/dev/xdb1', - 'label': 'with_cmd', - 'extra_opts': ['should', 'generate', 'warning'], - 'overwrite': 'should generate warning too' - }) - - self.assertIn( - 'extra_opts ' + - 'ignored because cmd was specified: mkfs -t ext4 -L with_cmd ' + - '/dev/xdb1', - self.logs.getvalue()) - self.assertIn( - 'overwrite ' + - 'ignored because cmd was specified: mkfs -t ext4 -L with_cmd ' + - '/dev/xdb1', - self.logs.getvalue()) - - subp.assert_called_once_with( - 'mkfs -t ext4 -L with_cmd /dev/xdb1', shell=True) - - @mock.patch('cloudinit.config.cc_disk_setup.subp.which') - def test_overwrite_and_extra_opts_without_cmd(self, m_which, subp, *args): - """mkfs observes extra_opts and overwrite settings when cmd is not - present.""" - m_which.side_effect = lambda p: {'mkfs.ext4': '/sbin/mkfs.ext4'}[p] - cc_disk_setup.mkfs({ - 'filesystem': 'ext4', - 'device': '/dev/xdb1', - 'label': 'without_cmd', - 'extra_opts': ['are', 'added'], - 'overwrite': True - }) - - subp.assert_called_once_with( - ['/sbin/mkfs.ext4', '/dev/xdb1', - '-L', 'without_cmd', '-F', 'are', 'added'], - shell=False) - - @mock.patch('cloudinit.config.cc_disk_setup.subp.which') - def test_mkswap(self, m_which, subp, *args): - """mkfs observes extra_opts and overwrite settings when cmd is not - present.""" - m_which.side_effect = iter([None, '/sbin/mkswap']) - cc_disk_setup.mkfs({ - 'filesystem': 'swap', - 'device': '/dev/xdb1', - 'label': 'swap', - 'overwrite': True, - }) - - self.assertEqual([mock.call('mkfs.swap'), mock.call('mkswap')], - m_which.call_args_list) - subp.assert_called_once_with( - ['/sbin/mkswap', '/dev/xdb1', '-L', 'swap', '-f'], shell=False) - -# -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_etc_hosts.py b/tests/unittests/test_handler/test_handler_etc_hosts.py deleted file mode 100644 index e3778b11..00000000 --- a/tests/unittests/test_handler/test_handler_etc_hosts.py +++ /dev/null @@ -1,70 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.config import cc_update_etc_hosts - -from cloudinit import cloud -from cloudinit import distros -from cloudinit import helpers -from cloudinit import util - -from cloudinit.tests import helpers as t_help - -import logging -import os -import shutil - -LOG = logging.getLogger(__name__) - - -class TestHostsFile(t_help.FilesystemMockingTestCase): - def setUp(self): - super(TestHostsFile, self).setUp() - self.tmp = self.tmp_dir() - - def _fetch_distro(self, kind): - cls = distros.fetch(kind) - paths = helpers.Paths({}) - return cls(kind, {}, paths) - - def test_write_etc_hosts_suse_localhost(self): - cfg = { - 'manage_etc_hosts': 'localhost', - 'hostname': 'cloud-init.test.us' - } - os.makedirs('%s/etc/' % self.tmp) - hosts_content = '192.168.1.1 blah.blah.us blah\n' - fout = open('%s/etc/hosts' % self.tmp, 'w') - fout.write(hosts_content) - fout.close() - distro = self._fetch_distro('sles') - distro.hosts_fn = '%s/etc/hosts' % self.tmp - paths = helpers.Paths({}) - ds = None - cc = cloud.Cloud(ds, paths, {}, distro, None) - self.patchUtils(self.tmp) - cc_update_etc_hosts.handle('test', cfg, cc, LOG, []) - contents = util.load_file('%s/etc/hosts' % self.tmp) - if '127.0.1.1\tcloud-init.test.us\tcloud-init' not in contents: - self.assertIsNone('No entry for 127.0.1.1 in etc/hosts') - if '192.168.1.1\tblah.blah.us\tblah' not in contents: - self.assertIsNone('Default etc/hosts content modified') - - @t_help.skipUnlessJinja() - def test_write_etc_hosts_suse_template(self): - cfg = { - 'manage_etc_hosts': 'template', - 'hostname': 'cloud-init.test.us' - } - shutil.copytree('templates', '%s/etc/cloud/templates' % self.tmp) - distro = self._fetch_distro('sles') - paths = helpers.Paths({}) - paths.template_tpl = '%s' % self.tmp + '/etc/cloud/templates/%s.tmpl' - ds = None - cc = cloud.Cloud(ds, paths, {}, distro, None) - self.patchUtils(self.tmp) - cc_update_etc_hosts.handle('test', cfg, cc, LOG, []) - contents = util.load_file('%s/etc/hosts' % self.tmp) - if '127.0.1.1 cloud-init.test.us cloud-init' not in contents: - self.assertIsNone('No entry for 127.0.1.1 in etc/hosts') - if '::1 cloud-init.test.us cloud-init' not in contents: - self.assertIsNone('No entry for 127.0.0.1 in etc/hosts') diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py deleted file mode 100644 index b7d5d7ba..00000000 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ /dev/null @@ -1,309 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit import cloud -from cloudinit.config import cc_growpart -from cloudinit import subp -from cloudinit import temp_utils - -from cloudinit.tests.helpers import TestCase - -import errno -import logging -import os -import shutil -import re -import unittest -from contextlib import ExitStack -from unittest import mock -import stat - -# growpart: -# mode: auto # off, on, auto, 'growpart' -# devices: ['root'] - -HELP_GROWPART_RESIZE = """ -growpart disk partition - rewrite partition table so that partition takes up all the space it can - options: - -h | --help print Usage and exit - - -u | --update R update the the kernel partition table info after growing - this requires kernel support and 'partx --update' - R is one of: - - 'auto' : [default] update partition if possible - - Example: - - growpart /dev/sda 1 - Resize partition 1 on /dev/sda -""" - -HELP_GROWPART_NO_RESIZE = """ -growpart disk partition - rewrite partition table so that partition takes up all the space it can - options: - -h | --help print Usage and exit - - Example: - - growpart /dev/sda 1 - Resize partition 1 on /dev/sda -""" - -HELP_GPART = """ -usage: gpart add -t type [-a alignment] [-b start] geom - gpart backup geom - gpart bootcode [-b bootcode] [-p partcode -i index] [-f flags] geom - - gpart resize -i index [-a alignment] [-s size] [-f flags] geom - gpart restore [-lF] [-f flags] provider [...] - gpart recover [-f flags] geom - gpart help - -""" - - -class Dir: - '''Stub object''' - def __init__(self, name): - self.name = name - self.st_mode = name - - def is_dir(self, *args, **kwargs): - return True - - def stat(self, *args, **kwargs): - return self - - -class Scanner: - '''Stub object''' - def __enter__(self): - return (Dir(''), Dir(''),) - - def __exit__(self, *args): - pass - - -class TestDisabled(unittest.TestCase): - def setUp(self): - super(TestDisabled, self).setUp() - self.name = "growpart" - self.cloud_init = None - self.log = logging.getLogger("TestDisabled") - self.args = [] - - self.handle = cc_growpart.handle - - def test_mode_off(self): - # Test that nothing is done if mode is off. - - # this really only verifies that resizer_factory isn't called - config = {'growpart': {'mode': 'off'}} - - with mock.patch.object(cc_growpart, 'resizer_factory') as mockobj: - self.handle(self.name, config, self.cloud_init, self.log, - self.args) - self.assertEqual(mockobj.call_count, 0) - - -class TestConfig(TestCase): - def setUp(self): - super(TestConfig, self).setUp() - self.name = "growpart" - self.paths = None - self.cloud = cloud.Cloud(None, self.paths, None, None, None) - self.log = logging.getLogger("TestConfig") - self.args = [] - - self.cloud_init = None - self.handle = cc_growpart.handle - self.tmppath = '/tmp/cloudinit-test-file' - self.tmpdir = os.scandir('/tmp') - self.tmpfile = open(self.tmppath, 'w') - - def tearDown(self): - self.tmpfile.close() - os.remove(self.tmppath) - - @mock.patch.dict("os.environ", clear=True) - def test_no_resizers_auto_is_fine(self): - with mock.patch.object( - subp, 'subp', - return_value=(HELP_GROWPART_NO_RESIZE, "")) as mockobj: - - config = {'growpart': {'mode': 'auto'}} - self.handle(self.name, config, self.cloud_init, self.log, - self.args) - - mockobj.assert_has_calls([ - mock.call(['growpart', '--help'], env={'LANG': 'C'}), - mock.call(['gpart', 'help'], env={'LANG': 'C'}, rcs=[0, 1])]) - - @mock.patch.dict("os.environ", clear=True) - def test_no_resizers_mode_growpart_is_exception(self): - with mock.patch.object( - subp, 'subp', - return_value=(HELP_GROWPART_NO_RESIZE, "")) as mockobj: - config = {'growpart': {'mode': "growpart"}} - self.assertRaises( - ValueError, self.handle, self.name, config, - self.cloud_init, self.log, self.args) - - mockobj.assert_called_once_with( - ['growpart', '--help'], env={'LANG': 'C'}) - - @mock.patch.dict("os.environ", clear=True) - def test_mode_auto_prefers_growpart(self): - with mock.patch.object( - subp, 'subp', - return_value=(HELP_GROWPART_RESIZE, "")) as mockobj: - ret = cc_growpart.resizer_factory(mode="auto") - self.assertIsInstance(ret, cc_growpart.ResizeGrowPart) - - mockobj.assert_called_once_with( - ['growpart', '--help'], env={'LANG': 'C'}) - - @mock.patch.dict("os.environ", {'LANG': 'cs_CZ.UTF-8'}, clear=True) - @mock.patch.object(temp_utils, 'mkdtemp', return_value='/tmp/much-random') - @mock.patch.object(stat, 'S_ISDIR', return_value=False) - @mock.patch.object(os.path, 'samestat', return_value=True) - @mock.patch.object(os.path, "join", return_value='/tmp') - @mock.patch.object(os, 'scandir', return_value=Scanner()) - @mock.patch.object(os, 'mkdir') - @mock.patch.object(os, 'unlink') - @mock.patch.object(os, 'rmdir') - @mock.patch.object(os, 'open', return_value=1) - @mock.patch.object(os, 'close') - @mock.patch.object(shutil, 'rmtree') - @mock.patch.object(os, 'lseek', return_value=1024) - @mock.patch.object(os, 'lstat', return_value='interesting metadata') - def test_force_lang_check_tempfile(self, *args, **kwargs): - with mock.patch.object( - subp, - 'subp', - return_value=(HELP_GROWPART_RESIZE, "")) as mockobj: - - ret = cc_growpart.resizer_factory(mode="auto") - self.assertIsInstance(ret, cc_growpart.ResizeGrowPart) - diskdev = '/dev/sdb' - partnum = 1 - partdev = '/dev/sdb' - ret.resize(diskdev, partnum, partdev) - mockobj.assert_has_calls([ - mock.call( - ["growpart", '--dry-run', diskdev, partnum], - env={'LANG': 'C', 'TMPDIR': '/tmp'}), - mock.call( - ["growpart", diskdev, partnum], - env={'LANG': 'C', 'TMPDIR': '/tmp'}), - ]) - - @mock.patch.dict("os.environ", {'LANG': 'cs_CZ.UTF-8'}, clear=True) - def test_mode_auto_falls_back_to_gpart(self): - with mock.patch.object( - subp, 'subp', - return_value=("", HELP_GPART)) as mockobj: - ret = cc_growpart.resizer_factory(mode="auto") - self.assertIsInstance(ret, cc_growpart.ResizeGpart) - - mockobj.assert_has_calls([ - mock.call(['growpart', '--help'], env={'LANG': 'C'}), - mock.call(['gpart', 'help'], env={'LANG': 'C'}, rcs=[0, 1])]) - - def test_handle_with_no_growpart_entry(self): - # if no 'growpart' entry in config, then mode=auto should be used - - myresizer = object() - retval = (("/", cc_growpart.RESIZE.CHANGED, "my-message",),) - - with ExitStack() as mocks: - factory = mocks.enter_context( - mock.patch.object(cc_growpart, 'resizer_factory', - return_value=myresizer)) - rsdevs = mocks.enter_context( - mock.patch.object(cc_growpart, 'resize_devices', - return_value=retval)) - mocks.enter_context( - mock.patch.object(cc_growpart, 'RESIZERS', - (('mysizer', object),) - )) - - self.handle(self.name, {}, self.cloud_init, self.log, self.args) - - factory.assert_called_once_with('auto') - rsdevs.assert_called_once_with(myresizer, ['/']) - - -class TestResize(unittest.TestCase): - def setUp(self): - super(TestResize, self).setUp() - self.name = "growpart" - self.log = logging.getLogger("TestResize") - - def test_simple_devices(self): - # test simple device list - # this patches out devent2dev, os.stat, and device_part_info - # so in the end, doesn't test a lot - devs = ["/dev/XXda1", "/dev/YYda2"] - devstat_ret = Bunch(st_mode=25008, st_ino=6078, st_dev=5, - st_nlink=1, st_uid=0, st_gid=6, st_size=0, - st_atime=0, st_mtime=0, st_ctime=0) - enoent = ["/dev/NOENT"] - real_stat = os.stat - resize_calls = [] - - class myresizer(object): - def resize(self, diskdev, partnum, partdev): - resize_calls.append((diskdev, partnum, partdev)) - if partdev == "/dev/YYda2": - return (1024, 2048) - return (1024, 1024) # old size, new size - - def mystat(path): - if path in devs: - return devstat_ret - if path in enoent: - e = OSError("%s: does not exist" % path) - e.errno = errno.ENOENT - raise e - return real_stat(path) - - try: - opinfo = cc_growpart.device_part_info - cc_growpart.device_part_info = simple_device_part_info - os.stat = mystat - - resized = cc_growpart.resize_devices(myresizer(), devs + enoent) - - def find(name, res): - for f in res: - if f[0] == name: - return f - return None - - self.assertEqual(cc_growpart.RESIZE.NOCHANGE, - find("/dev/XXda1", resized)[1]) - self.assertEqual(cc_growpart.RESIZE.CHANGED, - find("/dev/YYda2", resized)[1]) - self.assertEqual(cc_growpart.RESIZE.SKIPPED, - find(enoent[0], resized)[1]) - # self.assertEqual(resize_calls, - # [("/dev/XXda", "1", "/dev/XXda1"), - # ("/dev/YYda", "2", "/dev/YYda2")]) - finally: - cc_growpart.device_part_info = opinfo - os.stat = real_stat - - -def simple_device_part_info(devpath): - # simple stupid return (/dev/vda, 1) for /dev/vda - ret = re.search("([^0-9]*)([0-9]*)$", devpath) - x = (ret.group(1), ret.group(2)) - return x - - -class Bunch(object): - def __init__(self, **kwds): - self.__dict__.update(kwds) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_install_hotplug.py b/tests/unittests/test_handler/test_handler_install_hotplug.py deleted file mode 100644 index 5d6b1e77..00000000 --- a/tests/unittests/test_handler/test_handler_install_hotplug.py +++ /dev/null @@ -1,113 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -from collections import namedtuple -from unittest import mock - -import pytest - -from cloudinit.config.cc_install_hotplug import ( - handle, - HOTPLUG_UDEV_PATH, - HOTPLUG_UDEV_RULES_TEMPLATE, -) -from cloudinit.event import EventScope, EventType - - -@pytest.yield_fixture() -def mocks(): - m_update_enabled = mock.patch('cloudinit.stages.update_event_enabled') - m_write = mock.patch('cloudinit.util.write_file', autospec=True) - m_del = mock.patch('cloudinit.util.del_file', autospec=True) - m_subp = mock.patch('cloudinit.subp.subp') - m_which = mock.patch('cloudinit.subp.which', return_value=None) - m_path_exists = mock.patch('os.path.exists', return_value=False) - - yield namedtuple( - 'Mocks', - 'm_update_enabled m_write m_del m_subp m_which m_path_exists' - )( - m_update_enabled.start(), m_write.start(), m_del.start(), - m_subp.start(), m_which.start(), m_path_exists.start() - ) - - m_update_enabled.stop() - m_write.stop() - m_del.stop() - m_subp.stop() - m_which.stop() - m_path_exists.stop() - - -class TestInstallHotplug: - @pytest.mark.parametrize('libexec_exists', [True, False]) - def test_rules_installed_when_supported_and_enabled( - self, mocks, libexec_exists - ): - mocks.m_which.return_value = 'udevadm' - mocks.m_update_enabled.return_value = True - m_cloud = mock.MagicMock() - m_cloud.datasource.get_supported_events.return_value = { - EventScope.NETWORK: {EventType.HOTPLUG} - } - - if libexec_exists: - libexecdir = "/usr/libexec/cloud-init" - else: - libexecdir = "/usr/lib/cloud-init" - with mock.patch('os.path.exists', return_value=libexec_exists): - handle(None, {}, m_cloud, mock.Mock(), None) - mocks.m_write.assert_called_once_with( - filename=HOTPLUG_UDEV_PATH, - content=HOTPLUG_UDEV_RULES_TEMPLATE.format( - libexecdir=libexecdir), - ) - assert mocks.m_subp.call_args_list == [mock.call([ - 'udevadm', 'control', '--reload-rules', - ])] - assert mocks.m_del.call_args_list == [] - - def test_rules_not_installed_when_unsupported(self, mocks): - mocks.m_update_enabled.return_value = True - m_cloud = mock.MagicMock() - m_cloud.datasource.get_supported_events.return_value = {} - - handle(None, {}, m_cloud, mock.Mock(), None) - assert mocks.m_write.call_args_list == [] - assert mocks.m_del.call_args_list == [] - assert mocks.m_subp.call_args_list == [] - - def test_rules_not_installed_when_disabled(self, mocks): - mocks.m_update_enabled.return_value = False - m_cloud = mock.MagicMock() - m_cloud.datasource.get_supported_events.return_value = { - EventScope.NETWORK: {EventType.HOTPLUG} - } - - handle(None, {}, m_cloud, mock.Mock(), None) - assert mocks.m_write.call_args_list == [] - assert mocks.m_del.call_args_list == [] - assert mocks.m_subp.call_args_list == [] - - def test_rules_uninstalled_when_disabled(self, mocks): - mocks.m_path_exists.return_value = True - mocks.m_update_enabled.return_value = False - m_cloud = mock.MagicMock() - m_cloud.datasource.get_supported_events.return_value = {} - - handle(None, {}, m_cloud, mock.Mock(), None) - mocks.m_del.assert_called_with(HOTPLUG_UDEV_PATH) - assert mocks.m_subp.call_args_list == [mock.call([ - 'udevadm', 'control', '--reload-rules', - ])] - assert mocks.m_write.call_args_list == [] - - def test_rules_not_installed_when_no_udevadm(self, mocks): - mocks.m_update_enabled.return_value = True - m_cloud = mock.MagicMock() - m_cloud.datasource.get_supported_events.return_value = { - EventScope.NETWORK: {EventType.HOTPLUG} - } - - handle(None, {}, m_cloud, mock.Mock(), None) - assert mocks.m_del.call_args_list == [] - assert mocks.m_write.call_args_list == [] - assert mocks.m_subp.call_args_list == [] diff --git a/tests/unittests/test_handler/test_handler_landscape.py b/tests/unittests/test_handler/test_handler_landscape.py deleted file mode 100644 index 1cc73ea2..00000000 --- a/tests/unittests/test_handler/test_handler_landscape.py +++ /dev/null @@ -1,126 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -import logging -from configobj import ConfigObj - -from cloudinit.config import cc_landscape -from cloudinit import util -from cloudinit.tests.helpers import (FilesystemMockingTestCase, mock, - wrap_and_call) - -from tests.unittests.util import get_cloud - -LOG = logging.getLogger(__name__) - - -class TestLandscape(FilesystemMockingTestCase): - - with_logs = True - - def setUp(self): - super(TestLandscape, self).setUp() - self.new_root = self.tmp_dir() - self.conf = self.tmp_path('client.conf', self.new_root) - self.default_file = self.tmp_path('default_landscape', self.new_root) - self.patchUtils(self.new_root) - self.add_patch( - 'cloudinit.distros.ubuntu.Distro.install_packages', - 'm_install_packages' - ) - - def test_handler_skips_empty_landscape_cloudconfig(self): - """Empty landscape cloud-config section does no work.""" - mycloud = get_cloud('ubuntu') - mycloud.distro = mock.MagicMock() - cfg = {'landscape': {}} - cc_landscape.handle('notimportant', cfg, mycloud, LOG, None) - self.assertFalse(mycloud.distro.install_packages.called) - - def test_handler_error_on_invalid_landscape_type(self): - """Raise an error when landscape configuraiton option is invalid.""" - mycloud = get_cloud('ubuntu') - cfg = {'landscape': 'wrongtype'} - with self.assertRaises(RuntimeError) as context_manager: - cc_landscape.handle('notimportant', cfg, mycloud, LOG, None) - self.assertIn( - "'landscape' key existed in config, but not a dict", - str(context_manager.exception)) - - @mock.patch('cloudinit.config.cc_landscape.subp') - def test_handler_restarts_landscape_client(self, m_subp): - """handler restarts lansdscape-client after install.""" - mycloud = get_cloud('ubuntu') - cfg = {'landscape': {'client': {}}} - wrap_and_call( - 'cloudinit.config.cc_landscape', - {'LSC_CLIENT_CFG_FILE': {'new': self.conf}}, - cc_landscape.handle, 'notimportant', cfg, mycloud, LOG, None) - self.assertEqual( - [mock.call(['service', 'landscape-client', 'restart'])], - m_subp.subp.call_args_list) - - def test_handler_installs_client_and_creates_config_file(self): - """Write landscape client.conf and install landscape-client.""" - mycloud = get_cloud('ubuntu') - cfg = {'landscape': {'client': {}}} - expected = {'client': { - 'log_level': 'info', - 'url': 'https://landscape.canonical.com/message-system', - 'ping_url': 'http://landscape.canonical.com/ping', - 'data_path': '/var/lib/landscape/client'}} - mycloud.distro = mock.MagicMock() - wrap_and_call( - 'cloudinit.config.cc_landscape', - {'LSC_CLIENT_CFG_FILE': {'new': self.conf}, - 'LS_DEFAULT_FILE': {'new': self.default_file}}, - cc_landscape.handle, 'notimportant', cfg, mycloud, LOG, None) - self.assertEqual( - [mock.call('landscape-client')], - mycloud.distro.install_packages.call_args) - self.assertEqual(expected, dict(ConfigObj(self.conf))) - self.assertIn( - 'Wrote landscape config file to {0}'.format(self.conf), - self.logs.getvalue()) - default_content = util.load_file(self.default_file) - self.assertEqual('RUN=1\n', default_content) - - def test_handler_writes_merged_client_config_file_with_defaults(self): - """Merge and write options from LSC_CLIENT_CFG_FILE with defaults.""" - # Write existing sparse client.conf file - util.write_file(self.conf, '[client]\ncomputer_title = My PC\n') - mycloud = get_cloud('ubuntu') - cfg = {'landscape': {'client': {}}} - expected = {'client': { - 'log_level': 'info', - 'url': 'https://landscape.canonical.com/message-system', - 'ping_url': 'http://landscape.canonical.com/ping', - 'data_path': '/var/lib/landscape/client', - 'computer_title': 'My PC'}} - wrap_and_call( - 'cloudinit.config.cc_landscape', - {'LSC_CLIENT_CFG_FILE': {'new': self.conf}}, - cc_landscape.handle, 'notimportant', cfg, mycloud, LOG, None) - self.assertEqual(expected, dict(ConfigObj(self.conf))) - self.assertIn( - 'Wrote landscape config file to {0}'.format(self.conf), - self.logs.getvalue()) - - def test_handler_writes_merged_provided_cloudconfig_with_defaults(self): - """Merge and write options from cloud-config options with defaults.""" - # Write empty sparse client.conf file - util.write_file(self.conf, '') - mycloud = get_cloud('ubuntu') - cfg = {'landscape': {'client': {'computer_title': 'My PC'}}} - expected = {'client': { - 'log_level': 'info', - 'url': 'https://landscape.canonical.com/message-system', - 'ping_url': 'http://landscape.canonical.com/ping', - 'data_path': '/var/lib/landscape/client', - 'computer_title': 'My PC'}} - wrap_and_call( - 'cloudinit.config.cc_landscape', - {'LSC_CLIENT_CFG_FILE': {'new': self.conf}}, - cc_landscape.handle, 'notimportant', cfg, mycloud, LOG, None) - self.assertEqual(expected, dict(ConfigObj(self.conf))) - self.assertIn( - 'Wrote landscape config file to {0}'.format(self.conf), - self.logs.getvalue()) diff --git a/tests/unittests/test_handler/test_handler_locale.py b/tests/unittests/test_handler/test_handler_locale.py deleted file mode 100644 index 3c17927e..00000000 --- a/tests/unittests/test_handler/test_handler_locale.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. -# -# Author: Juerg Haefliger -# -# This file is part of cloud-init. See LICENSE file for license information. -import logging -import os -import shutil -import tempfile -from io import BytesIO -from configobj import ConfigObj -from unittest import mock - -from cloudinit import util -from cloudinit.config import cc_locale -from cloudinit.tests import helpers as t_help - -from tests.unittests.util import get_cloud - - -LOG = logging.getLogger(__name__) - - -class TestLocale(t_help.FilesystemMockingTestCase): - - def setUp(self): - super(TestLocale, self).setUp() - self.new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.new_root) - self.patchUtils(self.new_root) - - def test_set_locale_arch(self): - locale = 'en_GB.UTF-8' - locale_configfile = '/etc/invalid-locale-path' - cfg = { - 'locale': locale, - 'locale_configfile': locale_configfile, - } - cc = get_cloud('arch') - - with mock.patch('cloudinit.distros.arch.subp.subp') as m_subp: - with mock.patch('cloudinit.distros.arch.LOG.warning') as m_LOG: - cc_locale.handle('cc_locale', cfg, cc, LOG, []) - m_LOG.assert_called_with('Invalid locale_configfile %s, ' - 'only supported value is ' - '/etc/locale.conf', - locale_configfile) - - contents = util.load_file(cc.distro.locale_gen_fn) - self.assertIn('%s UTF-8' % locale, contents) - m_subp.assert_called_with(['localectl', - 'set-locale', - locale], capture=False) - - def test_set_locale_sles(self): - - cfg = { - 'locale': 'My.Locale', - } - cc = get_cloud('sles') - cc_locale.handle('cc_locale', cfg, cc, LOG, []) - if cc.distro.uses_systemd(): - locale_conf = cc.distro.systemd_locale_conf_fn - else: - locale_conf = cc.distro.locale_conf_fn - contents = util.load_file(locale_conf, decode=False) - n_cfg = ConfigObj(BytesIO(contents)) - if cc.distro.uses_systemd(): - self.assertEqual({'LANG': cfg['locale']}, dict(n_cfg)) - else: - self.assertEqual({'RC_LANG': cfg['locale']}, dict(n_cfg)) - - def test_set_locale_sles_default(self): - cfg = {} - cc = get_cloud('sles') - cc_locale.handle('cc_locale', cfg, cc, LOG, []) - - if cc.distro.uses_systemd(): - locale_conf = cc.distro.systemd_locale_conf_fn - keyname = 'LANG' - else: - locale_conf = cc.distro.locale_conf_fn - keyname = 'RC_LANG' - - contents = util.load_file(locale_conf, decode=False) - n_cfg = ConfigObj(BytesIO(contents)) - self.assertEqual({keyname: 'en_US.UTF-8'}, dict(n_cfg)) - - def test_locale_update_config_if_different_than_default(self): - """Test cc_locale writes updates conf if different than default""" - locale_conf = os.path.join(self.new_root, "etc/default/locale") - util.write_file(locale_conf, 'LANG="en_US.UTF-8"\n') - cfg = {'locale': 'C.UTF-8'} - cc = get_cloud('ubuntu') - with mock.patch('cloudinit.distros.debian.subp.subp') as m_subp: - with mock.patch('cloudinit.distros.debian.LOCALE_CONF_FN', - locale_conf): - cc_locale.handle('cc_locale', cfg, cc, LOG, []) - m_subp.assert_called_with(['update-locale', - '--locale-file=%s' % locale_conf, - 'LANG=C.UTF-8'], capture=False) - - def test_locale_rhel_defaults_en_us_utf8(self): - """Test cc_locale gets en_US.UTF-8 from distro get_locale fallback""" - cfg = {} - cc = get_cloud('rhel') - update_sysconfig = 'cloudinit.distros.rhel_util.update_sysconfig_file' - with mock.patch.object(cc.distro, 'uses_systemd') as m_use_sd: - m_use_sd.return_value = True - with mock.patch(update_sysconfig) as m_update_syscfg: - cc_locale.handle('cc_locale', cfg, cc, LOG, []) - m_update_syscfg.assert_called_with('/etc/locale.conf', - {'LANG': 'en_US.UTF-8'}) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py deleted file mode 100644 index ea8b6e90..00000000 --- a/tests/unittests/test_handler/test_handler_lxd.py +++ /dev/null @@ -1,222 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -from unittest import mock - -from cloudinit.config import cc_lxd -from cloudinit.tests import helpers as t_help - -from tests.unittests.util import get_cloud - - -class TestLxd(t_help.CiTestCase): - - with_logs = True - - lxd_cfg = { - 'lxd': { - 'init': { - 'network_address': '0.0.0.0', - 'storage_backend': 'zfs', - 'storage_pool': 'poolname', - } - } - } - - @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") - @mock.patch("cloudinit.config.cc_lxd.subp") - def test_lxd_init(self, mock_subp, m_maybe_clean): - cc = get_cloud() - mock_subp.which.return_value = True - m_maybe_clean.return_value = None - cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, []) - self.assertTrue(mock_subp.which.called) - # no bridge config, so maybe_cleanup should not be called. - self.assertFalse(m_maybe_clean.called) - self.assertEqual( - [mock.call(['lxd', 'waitready', '--timeout=300']), - mock.call( - ['lxd', 'init', '--auto', '--network-address=0.0.0.0', - '--storage-backend=zfs', '--storage-pool=poolname'])], - mock_subp.subp.call_args_list) - - @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") - @mock.patch("cloudinit.config.cc_lxd.subp") - def test_lxd_install(self, mock_subp, m_maybe_clean): - cc = get_cloud() - cc.distro = mock.MagicMock() - mock_subp.which.return_value = None - cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, []) - self.assertNotIn('WARN', self.logs.getvalue()) - self.assertTrue(cc.distro.install_packages.called) - cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, self.logger, []) - self.assertFalse(m_maybe_clean.called) - install_pkg = cc.distro.install_packages.call_args_list[0][0][0] - self.assertEqual(sorted(install_pkg), ['lxd', 'zfsutils-linux']) - - @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") - @mock.patch("cloudinit.config.cc_lxd.subp") - def test_no_init_does_nothing(self, mock_subp, m_maybe_clean): - cc = get_cloud() - cc.distro = mock.MagicMock() - cc_lxd.handle('cc_lxd', {'lxd': {}}, cc, self.logger, []) - self.assertFalse(cc.distro.install_packages.called) - self.assertFalse(mock_subp.subp.called) - self.assertFalse(m_maybe_clean.called) - - @mock.patch("cloudinit.config.cc_lxd.maybe_cleanup_default") - @mock.patch("cloudinit.config.cc_lxd.subp") - def test_no_lxd_does_nothing(self, mock_subp, m_maybe_clean): - cc = get_cloud() - cc.distro = mock.MagicMock() - cc_lxd.handle('cc_lxd', {'package_update': True}, cc, self.logger, []) - self.assertFalse(cc.distro.install_packages.called) - self.assertFalse(mock_subp.subp.called) - self.assertFalse(m_maybe_clean.called) - - def test_lxd_debconf_new_full(self): - data = {"mode": "new", - "name": "testbr0", - "ipv4_address": "10.0.8.1", - "ipv4_netmask": "24", - "ipv4_dhcp_first": "10.0.8.2", - "ipv4_dhcp_last": "10.0.8.254", - "ipv4_dhcp_leases": "250", - "ipv4_nat": "true", - "ipv6_address": "fd98:9e0:3744::1", - "ipv6_netmask": "64", - "ipv6_nat": "true", - "domain": "lxd"} - self.assertEqual( - cc_lxd.bridge_to_debconf(data), - {"lxd/setup-bridge": "true", - "lxd/bridge-name": "testbr0", - "lxd/bridge-ipv4": "true", - "lxd/bridge-ipv4-address": "10.0.8.1", - "lxd/bridge-ipv4-netmask": "24", - "lxd/bridge-ipv4-dhcp-first": "10.0.8.2", - "lxd/bridge-ipv4-dhcp-last": "10.0.8.254", - "lxd/bridge-ipv4-dhcp-leases": "250", - "lxd/bridge-ipv4-nat": "true", - "lxd/bridge-ipv6": "true", - "lxd/bridge-ipv6-address": "fd98:9e0:3744::1", - "lxd/bridge-ipv6-netmask": "64", - "lxd/bridge-ipv6-nat": "true", - "lxd/bridge-domain": "lxd"}) - - def test_lxd_debconf_new_partial(self): - data = {"mode": "new", - "ipv6_address": "fd98:9e0:3744::1", - "ipv6_netmask": "64", - "ipv6_nat": "true"} - self.assertEqual( - cc_lxd.bridge_to_debconf(data), - {"lxd/setup-bridge": "true", - "lxd/bridge-ipv6": "true", - "lxd/bridge-ipv6-address": "fd98:9e0:3744::1", - "lxd/bridge-ipv6-netmask": "64", - "lxd/bridge-ipv6-nat": "true"}) - - def test_lxd_debconf_existing(self): - data = {"mode": "existing", - "name": "testbr0"} - self.assertEqual( - cc_lxd.bridge_to_debconf(data), - {"lxd/setup-bridge": "false", - "lxd/use-existing-bridge": "true", - "lxd/bridge-name": "testbr0"}) - - def test_lxd_debconf_none(self): - data = {"mode": "none"} - self.assertEqual( - cc_lxd.bridge_to_debconf(data), - {"lxd/setup-bridge": "false", - "lxd/bridge-name": ""}) - - def test_lxd_cmd_new_full(self): - data = {"mode": "new", - "name": "testbr0", - "ipv4_address": "10.0.8.1", - "ipv4_netmask": "24", - "ipv4_dhcp_first": "10.0.8.2", - "ipv4_dhcp_last": "10.0.8.254", - "ipv4_dhcp_leases": "250", - "ipv4_nat": "true", - "ipv6_address": "fd98:9e0:3744::1", - "ipv6_netmask": "64", - "ipv6_nat": "true", - "domain": "lxd"} - self.assertEqual( - cc_lxd.bridge_to_cmd(data), - (["network", "create", "testbr0", - "ipv4.address=10.0.8.1/24", "ipv4.nat=true", - "ipv4.dhcp.ranges=10.0.8.2-10.0.8.254", - "ipv6.address=fd98:9e0:3744::1/64", - "ipv6.nat=true", "dns.domain=lxd"], - ["network", "attach-profile", - "testbr0", "default", "eth0"])) - - def test_lxd_cmd_new_partial(self): - data = {"mode": "new", - "ipv6_address": "fd98:9e0:3744::1", - "ipv6_netmask": "64", - "ipv6_nat": "true"} - self.assertEqual( - cc_lxd.bridge_to_cmd(data), - (["network", "create", "lxdbr0", "ipv4.address=none", - "ipv6.address=fd98:9e0:3744::1/64", "ipv6.nat=true"], - ["network", "attach-profile", - "lxdbr0", "default", "eth0"])) - - def test_lxd_cmd_existing(self): - data = {"mode": "existing", - "name": "testbr0"} - self.assertEqual( - cc_lxd.bridge_to_cmd(data), - (None, ["network", "attach-profile", - "testbr0", "default", "eth0"])) - - def test_lxd_cmd_none(self): - data = {"mode": "none"} - self.assertEqual( - cc_lxd.bridge_to_cmd(data), - (None, None)) - - -class TestLxdMaybeCleanupDefault(t_help.CiTestCase): - """Test the implementation of maybe_cleanup_default.""" - - defnet = cc_lxd._DEFAULT_NETWORK_NAME - - @mock.patch("cloudinit.config.cc_lxd._lxc") - def test_network_other_than_default_not_deleted(self, m_lxc): - """deletion or removal should only occur if bridge is default.""" - cc_lxd.maybe_cleanup_default( - net_name="lxdbr1", did_init=True, create=True, attach=True) - m_lxc.assert_not_called() - - @mock.patch("cloudinit.config.cc_lxd._lxc") - def test_did_init_false_does_not_delete(self, m_lxc): - """deletion or removal should only occur if did_init is True.""" - cc_lxd.maybe_cleanup_default( - net_name=self.defnet, did_init=False, create=True, attach=True) - m_lxc.assert_not_called() - - @mock.patch("cloudinit.config.cc_lxd._lxc") - def test_network_deleted_if_create_true(self, m_lxc): - """deletion of network should occur if create is True.""" - cc_lxd.maybe_cleanup_default( - net_name=self.defnet, did_init=True, create=True, attach=False) - m_lxc.assert_called_with(["network", "delete", self.defnet]) - - @mock.patch("cloudinit.config.cc_lxd._lxc") - def test_device_removed_if_attach_true(self, m_lxc): - """deletion of network should occur if create is True.""" - nic_name = "my_nic" - profile = "my_profile" - cc_lxd.maybe_cleanup_default( - net_name=self.defnet, did_init=True, create=False, attach=True, - profile=profile, nic_name=nic_name) - m_lxc.assert_called_once_with( - ["profile", "device", "remove", profile, nic_name]) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_mcollective.py b/tests/unittests/test_handler/test_handler_mcollective.py deleted file mode 100644 index 9cda6fbe..00000000 --- a/tests/unittests/test_handler/test_handler_mcollective.py +++ /dev/null @@ -1,146 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -import configobj -import logging -import os -import shutil -import tempfile -from io import BytesIO - -from cloudinit import (util) -from cloudinit.config import cc_mcollective -from cloudinit.tests import helpers as t_help - -from tests.unittests.util import get_cloud - -LOG = logging.getLogger(__name__) - - -STOCK_CONFIG = """\ -main_collective = mcollective -collectives = mcollective -libdir = /usr/share/mcollective/plugins -logfile = /var/log/mcollective.log -loglevel = info -daemonize = 1 - -# Plugins -securityprovider = psk -plugin.psk = unset - -connector = activemq -plugin.activemq.pool.size = 1 -plugin.activemq.pool.1.host = stomp1 -plugin.activemq.pool.1.port = 61613 -plugin.activemq.pool.1.user = mcollective -plugin.activemq.pool.1.password = marionette - -# Facts -factsource = yaml -plugin.yaml = /etc/mcollective/facts.yaml -""" - - -class TestConfig(t_help.FilesystemMockingTestCase): - def setUp(self): - super(TestConfig, self).setUp() - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - # "./": make os.path.join behave correctly with abs path as second arg - self.server_cfg = os.path.join( - self.tmp, "./" + cc_mcollective.SERVER_CFG) - self.pubcert_file = os.path.join( - self.tmp, "./" + cc_mcollective.PUBCERT_FILE) - self.pricert_file = os.path.join( - self.tmp, self.tmp, "./" + cc_mcollective.PRICERT_FILE) - - def test_basic_config(self): - cfg = { - 'mcollective': { - 'conf': { - 'loglevel': 'debug', - 'connector': 'rabbitmq', - 'logfile': '/var/log/mcollective.log', - 'ttl': '4294957', - 'collectives': 'mcollective', - 'main_collective': 'mcollective', - 'securityprovider': 'psk', - 'daemonize': '1', - 'factsource': 'yaml', - 'direct_addressing': '1', - 'plugin.psk': 'unset', - 'libdir': '/usr/share/mcollective/plugins', - 'identity': '1', - }, - }, - } - expected = cfg['mcollective']['conf'] - - self.patchUtils(self.tmp) - cc_mcollective.configure(cfg['mcollective']['conf']) - contents = util.load_file(cc_mcollective.SERVER_CFG, decode=False) - contents = configobj.ConfigObj(BytesIO(contents)) - self.assertEqual(expected, dict(contents)) - - def test_existing_config_is_saved(self): - cfg = {'loglevel': 'warn'} - util.write_file(self.server_cfg, STOCK_CONFIG) - cc_mcollective.configure(config=cfg, server_cfg=self.server_cfg) - self.assertTrue(os.path.exists(self.server_cfg)) - self.assertTrue(os.path.exists(self.server_cfg + ".old")) - self.assertEqual(util.load_file(self.server_cfg + ".old"), - STOCK_CONFIG) - - def test_existing_updated(self): - cfg = {'loglevel': 'warn'} - util.write_file(self.server_cfg, STOCK_CONFIG) - cc_mcollective.configure(config=cfg, server_cfg=self.server_cfg) - cfgobj = configobj.ConfigObj(self.server_cfg) - self.assertEqual(cfg['loglevel'], cfgobj['loglevel']) - - def test_certificats_written(self): - # check public-cert and private-cert keys in config get written - cfg = {'loglevel': 'debug', - 'public-cert': "this is my public-certificate", - 'private-cert': "secret private certificate"} - - cc_mcollective.configure( - config=cfg, server_cfg=self.server_cfg, - pricert_file=self.pricert_file, pubcert_file=self.pubcert_file) - - found = configobj.ConfigObj(self.server_cfg) - - # make sure these didnt get written in - self.assertFalse('public-cert' in found) - self.assertFalse('private-cert' in found) - - # these need updating to the specified paths - self.assertEqual(found['plugin.ssl_server_public'], self.pubcert_file) - self.assertEqual(found['plugin.ssl_server_private'], self.pricert_file) - - # and the security provider should be ssl - self.assertEqual(found['securityprovider'], 'ssl') - - self.assertEqual( - util.load_file(self.pricert_file), cfg['private-cert']) - self.assertEqual( - util.load_file(self.pubcert_file), cfg['public-cert']) - - -class TestHandler(t_help.TestCase): - @t_help.mock.patch("cloudinit.config.cc_mcollective.subp") - @t_help.mock.patch("cloudinit.config.cc_mcollective.util") - def test_mcollective_install(self, mock_util, mock_subp): - cc = get_cloud() - cc.distro = t_help.mock.MagicMock() - mock_util.load_file.return_value = b"" - mycfg = {'mcollective': {'conf': {'loglevel': 'debug'}}} - cc_mcollective.handle('cc_mcollective', mycfg, cc, LOG, []) - self.assertTrue(cc.distro.install_packages.called) - install_pkg = cc.distro.install_packages.call_args_list[0][0][0] - self.assertEqual(install_pkg, ('mcollective',)) - - self.assertTrue(mock_subp.subp.called) - self.assertEqual(mock_subp.subp.call_args_list[0][0][0], - ['service', 'mcollective', 'restart']) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/test_handler/test_handler_mounts.py deleted file mode 100644 index 69e8b30d..00000000 --- a/tests/unittests/test_handler/test_handler_mounts.py +++ /dev/null @@ -1,406 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import os.path -from unittest import mock - -from cloudinit.config import cc_mounts - -from cloudinit.tests import helpers as test_helpers - - -class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase): - - def setUp(self): - super(TestSanitizeDevname, self).setUp() - self.new_root = self.tmp_dir() - self.patchOS(self.new_root) - - def _touch(self, path): - path = os.path.join(self.new_root, path.lstrip('/')) - basedir = os.path.dirname(path) - if not os.path.exists(basedir): - os.makedirs(basedir) - open(path, 'a').close() - - def _makedirs(self, directory): - directory = os.path.join(self.new_root, directory.lstrip('/')) - if not os.path.exists(directory): - os.makedirs(directory) - - def mock_existence_of_disk(self, disk_path): - self._touch(disk_path) - self._makedirs(os.path.join('/sys/block', disk_path.split('/')[-1])) - - def mock_existence_of_partition(self, disk_path, partition_number): - self.mock_existence_of_disk(disk_path) - self._touch(disk_path + str(partition_number)) - disk_name = disk_path.split('/')[-1] - self._makedirs(os.path.join('/sys/block', - disk_name, - disk_name + str(partition_number))) - - def test_existent_full_disk_path_is_returned(self): - disk_path = '/dev/sda' - self.mock_existence_of_disk(disk_path) - self.assertEqual(disk_path, - cc_mounts.sanitize_devname(disk_path, - lambda x: None, - mock.Mock())) - - def test_existent_disk_name_returns_full_path(self): - disk_name = 'sda' - disk_path = '/dev/' + disk_name - self.mock_existence_of_disk(disk_path) - self.assertEqual(disk_path, - cc_mounts.sanitize_devname(disk_name, - lambda x: None, - mock.Mock())) - - def test_existent_meta_disk_is_returned(self): - actual_disk_path = '/dev/sda' - self.mock_existence_of_disk(actual_disk_path) - self.assertEqual( - actual_disk_path, - cc_mounts.sanitize_devname('ephemeral0', - lambda x: actual_disk_path, - mock.Mock())) - - def test_existent_meta_partition_is_returned(self): - disk_name, partition_part = '/dev/sda', '1' - actual_partition_path = disk_name + partition_part - self.mock_existence_of_partition(disk_name, partition_part) - self.assertEqual( - actual_partition_path, - cc_mounts.sanitize_devname('ephemeral0.1', - lambda x: disk_name, - mock.Mock())) - - def test_existent_meta_partition_with_p_is_returned(self): - disk_name, partition_part = '/dev/sda', 'p1' - actual_partition_path = disk_name + partition_part - self.mock_existence_of_partition(disk_name, partition_part) - self.assertEqual( - actual_partition_path, - cc_mounts.sanitize_devname('ephemeral0.1', - lambda x: disk_name, - mock.Mock())) - - def test_first_partition_returned_if_existent_disk_is_partitioned(self): - disk_name, partition_part = '/dev/sda', '1' - actual_partition_path = disk_name + partition_part - self.mock_existence_of_partition(disk_name, partition_part) - self.assertEqual( - actual_partition_path, - cc_mounts.sanitize_devname('ephemeral0', - lambda x: disk_name, - mock.Mock())) - - def test_nth_partition_returned_if_requested(self): - disk_name, partition_part = '/dev/sda', '3' - actual_partition_path = disk_name + partition_part - self.mock_existence_of_partition(disk_name, partition_part) - self.assertEqual( - actual_partition_path, - cc_mounts.sanitize_devname('ephemeral0.3', - lambda x: disk_name, - mock.Mock())) - - def test_transformer_returning_none_returns_none(self): - self.assertIsNone( - cc_mounts.sanitize_devname( - 'ephemeral0', lambda x: None, mock.Mock())) - - def test_missing_device_returns_none(self): - self.assertIsNone( - cc_mounts.sanitize_devname('/dev/sda', None, mock.Mock())) - - def test_missing_sys_returns_none(self): - disk_path = '/dev/sda' - self._makedirs(disk_path) - self.assertIsNone( - cc_mounts.sanitize_devname(disk_path, None, mock.Mock())) - - def test_existent_disk_but_missing_partition_returns_none(self): - disk_path = '/dev/sda' - self.mock_existence_of_disk(disk_path) - self.assertIsNone( - cc_mounts.sanitize_devname( - 'ephemeral0.1', lambda x: disk_path, mock.Mock())) - - def test_network_device_returns_network_device(self): - disk_path = 'netdevice:/path' - self.assertEqual( - disk_path, - cc_mounts.sanitize_devname(disk_path, None, mock.Mock())) - - def test_device_aliases_remapping(self): - disk_path = '/dev/sda' - self.mock_existence_of_disk(disk_path) - self.assertEqual(disk_path, - cc_mounts.sanitize_devname('mydata', - lambda x: None, - mock.Mock(), - {'mydata': disk_path})) - - -class TestSwapFileCreation(test_helpers.FilesystemMockingTestCase): - - def setUp(self): - super(TestSwapFileCreation, self).setUp() - self.new_root = self.tmp_dir() - self.patchOS(self.new_root) - - self.fstab_path = os.path.join(self.new_root, 'etc/fstab') - self.swap_path = os.path.join(self.new_root, 'swap.img') - self._makedirs('/etc') - - self.add_patch('cloudinit.config.cc_mounts.FSTAB_PATH', - 'mock_fstab_path', - self.fstab_path, - autospec=False) - - self.add_patch('cloudinit.config.cc_mounts.subp.subp', - 'm_subp_subp') - - self.add_patch('cloudinit.config.cc_mounts.util.mounts', - 'mock_util_mounts', - return_value={ - '/dev/sda1': {'fstype': 'ext4', - 'mountpoint': '/', - 'opts': 'rw,relatime,discard' - }}) - - self.mock_cloud = mock.Mock() - self.mock_log = mock.Mock() - self.mock_cloud.device_name_to_device = self.device_name_to_device - - self.cc = { - 'swap': { - 'filename': self.swap_path, - 'size': '512', - 'maxsize': '512'}} - - def _makedirs(self, directory): - directory = os.path.join(self.new_root, directory.lstrip('/')) - if not os.path.exists(directory): - os.makedirs(directory) - - def device_name_to_device(self, path): - if path == 'swap': - return self.swap_path - else: - dev = None - - return dev - - @mock.patch('cloudinit.util.get_mount_info') - @mock.patch('cloudinit.util.kernel_version') - def test_swap_creation_method_fallocate_on_xfs(self, m_kernel_version, - m_get_mount_info): - m_kernel_version.return_value = (4, 20) - m_get_mount_info.return_value = ["", "xfs"] - - cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, []) - self.m_subp_subp.assert_has_calls([ - mock.call(['fallocate', '-l', '0M', self.swap_path], capture=True), - mock.call(['mkswap', self.swap_path]), - mock.call(['swapon', '-a'])]) - - @mock.patch('cloudinit.util.get_mount_info') - @mock.patch('cloudinit.util.kernel_version') - def test_swap_creation_method_xfs(self, m_kernel_version, - m_get_mount_info): - m_kernel_version.return_value = (3, 18) - m_get_mount_info.return_value = ["", "xfs"] - - cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, []) - self.m_subp_subp.assert_has_calls([ - mock.call(['dd', 'if=/dev/zero', - 'of=' + self.swap_path, - 'bs=1M', 'count=0'], capture=True), - mock.call(['mkswap', self.swap_path]), - mock.call(['swapon', '-a'])]) - - @mock.patch('cloudinit.util.get_mount_info') - @mock.patch('cloudinit.util.kernel_version') - def test_swap_creation_method_btrfs(self, m_kernel_version, - m_get_mount_info): - m_kernel_version.return_value = (4, 20) - m_get_mount_info.return_value = ["", "btrfs"] - - cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, []) - self.m_subp_subp.assert_has_calls([ - mock.call(['dd', 'if=/dev/zero', - 'of=' + self.swap_path, - 'bs=1M', 'count=0'], capture=True), - mock.call(['mkswap', self.swap_path]), - mock.call(['swapon', '-a'])]) - - @mock.patch('cloudinit.util.get_mount_info') - @mock.patch('cloudinit.util.kernel_version') - def test_swap_creation_method_ext4(self, m_kernel_version, - m_get_mount_info): - m_kernel_version.return_value = (5, 14) - m_get_mount_info.return_value = ["", "ext4"] - - cc_mounts.handle(None, self.cc, self.mock_cloud, self.mock_log, []) - self.m_subp_subp.assert_has_calls([ - mock.call(['fallocate', '-l', '0M', self.swap_path], capture=True), - mock.call(['mkswap', self.swap_path]), - mock.call(['swapon', '-a'])]) - - -class TestFstabHandling(test_helpers.FilesystemMockingTestCase): - - swap_path = '/dev/sdb1' - - def setUp(self): - super(TestFstabHandling, self).setUp() - self.new_root = self.tmp_dir() - self.patchOS(self.new_root) - - self.fstab_path = os.path.join(self.new_root, 'etc/fstab') - self._makedirs('/etc') - - self.add_patch('cloudinit.config.cc_mounts.FSTAB_PATH', - 'mock_fstab_path', - self.fstab_path, - autospec=False) - - self.add_patch('cloudinit.config.cc_mounts._is_block_device', - 'mock_is_block_device', - return_value=True) - - self.add_patch('cloudinit.config.cc_mounts.subp.subp', - 'm_subp_subp') - - self.add_patch('cloudinit.config.cc_mounts.util.mounts', - 'mock_util_mounts', - return_value={ - '/dev/sda1': {'fstype': 'ext4', - 'mountpoint': '/', - 'opts': 'rw,relatime,discard' - }}) - - self.mock_cloud = mock.Mock() - self.mock_log = mock.Mock() - self.mock_cloud.device_name_to_device = self.device_name_to_device - - def _makedirs(self, directory): - directory = os.path.join(self.new_root, directory.lstrip('/')) - if not os.path.exists(directory): - os.makedirs(directory) - - def device_name_to_device(self, path): - if path == 'swap': - return self.swap_path - else: - dev = None - - return dev - - def test_no_fstab(self): - """ Handle images which do not include an fstab. """ - self.assertFalse(os.path.exists(cc_mounts.FSTAB_PATH)) - fstab_expected_content = ( - '%s\tnone\tswap\tsw,comment=cloudconfig\t' - '0\t0\n' % (self.swap_path,) - ) - cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, []) - with open(cc_mounts.FSTAB_PATH, 'r') as fd: - fstab_new_content = fd.read() - self.assertEqual(fstab_expected_content, fstab_new_content) - - def test_swap_integrity(self): - '''Ensure that the swap file is correctly created and can - swapon successfully. Fixing the corner case of: - kernel: swapon: swapfile has holes''' - - fstab = '/swap.img swap swap defaults 0 0\n' - - with open(cc_mounts.FSTAB_PATH, 'w') as fd: - fd.write(fstab) - cc = {'swap': ['filename: /swap.img', 'size: 512', 'maxsize: 512']} - cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, []) - - def test_fstab_no_swap_device(self): - '''Ensure that cloud-init adds a discovered swap partition - to /etc/fstab.''' - - fstab_original_content = '' - fstab_expected_content = ( - '%s\tnone\tswap\tsw,comment=cloudconfig\t' - '0\t0\n' % (self.swap_path,) - ) - - with open(cc_mounts.FSTAB_PATH, 'w') as fd: - fd.write(fstab_original_content) - - cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, []) - - with open(cc_mounts.FSTAB_PATH, 'r') as fd: - fstab_new_content = fd.read() - self.assertEqual(fstab_expected_content, fstab_new_content) - - def test_fstab_same_swap_device_already_configured(self): - '''Ensure that cloud-init will not add a swap device if the same - device already exists in /etc/fstab.''' - - fstab_original_content = '%s swap swap defaults 0 0\n' % ( - self.swap_path,) - fstab_expected_content = fstab_original_content - - with open(cc_mounts.FSTAB_PATH, 'w') as fd: - fd.write(fstab_original_content) - - cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, []) - - with open(cc_mounts.FSTAB_PATH, 'r') as fd: - fstab_new_content = fd.read() - self.assertEqual(fstab_expected_content, fstab_new_content) - - def test_fstab_alternate_swap_device_already_configured(self): - '''Ensure that cloud-init will add a discovered swap device to - /etc/fstab even when there exists a swap definition on another - device.''' - - fstab_original_content = '/dev/sdc1 swap swap defaults 0 0\n' - fstab_expected_content = ( - fstab_original_content + - '%s\tnone\tswap\tsw,comment=cloudconfig\t' - '0\t0\n' % (self.swap_path,) - ) - - with open(cc_mounts.FSTAB_PATH, 'w') as fd: - fd.write(fstab_original_content) - - cc_mounts.handle(None, {}, self.mock_cloud, self.mock_log, []) - - with open(cc_mounts.FSTAB_PATH, 'r') as fd: - fstab_new_content = fd.read() - self.assertEqual(fstab_expected_content, fstab_new_content) - - def test_no_change_fstab_sets_needs_mount_all(self): - '''verify unchanged fstab entries are mounted if not call mount -a''' - fstab_original_content = ( - 'LABEL=cloudimg-rootfs / ext4 defaults 0 0\n' - 'LABEL=UEFI /boot/efi vfat defaults 0 0\n' - '/dev/vdb /mnt auto defaults,noexec,comment=cloudconfig 0 2\n' - ) - fstab_expected_content = fstab_original_content - cc = { - 'mounts': [ - ['/dev/vdb', '/mnt', 'auto', 'defaults,noexec'] - ] - } - with open(cc_mounts.FSTAB_PATH, 'w') as fd: - fd.write(fstab_original_content) - with open(cc_mounts.FSTAB_PATH, 'r') as fd: - fstab_new_content = fd.read() - self.assertEqual(fstab_expected_content, fstab_new_content) - cc_mounts.handle(None, cc, self.mock_cloud, self.mock_log, []) - self.m_subp_subp.assert_has_calls([ - mock.call(['mount', '-a']), - mock.call(['systemctl', 'daemon-reload'])]) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py deleted file mode 100644 index b34a18cb..00000000 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ /dev/null @@ -1,765 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -import copy -import os -import shutil -from functools import partial -from os.path import dirname - -from cloudinit import (helpers, util) -from cloudinit.config import cc_ntp -from cloudinit.tests.helpers import ( - CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema) - -from tests.unittests.util import get_cloud - - -NTP_TEMPLATE = """\ -## template: jinja -servers {{servers}} -pools {{pools}} -""" - -TIMESYNCD_TEMPLATE = """\ -## template:jinja -[Time] -{% if servers or pools -%} -NTP={% for host in servers|list + pools|list %}{{ host }} {% endfor -%} -{% endif -%} -""" - - -class TestNtp(FilesystemMockingTestCase): - - with_logs = True - - def setUp(self): - super(TestNtp, self).setUp() - self.new_root = self.tmp_dir() - self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy') - self.m_snappy.return_value = False - self.new_root = self.reRoot() - self._get_cloud = partial( - get_cloud, - paths=helpers.Paths({'templates_dir': self.new_root}) - ) - - def _get_template_path(self, template_name, distro, basepath=None): - # ntp.conf.{distro} -> ntp.conf.debian.tmpl - template_fn = '{0}.tmpl'.format( - template_name.replace('{distro}', distro)) - if not basepath: - basepath = self.new_root - path = os.path.join(basepath, template_fn) - return path - - def _generate_template(self, template=None): - if not template: - template = NTP_TEMPLATE - confpath = os.path.join(self.new_root, 'client.conf') - template_fn = os.path.join(self.new_root, 'client.conf.tmpl') - util.write_file(template_fn, content=template) - return (confpath, template_fn) - - def _mock_ntp_client_config(self, client=None, distro=None): - if not client: - client = 'ntp' - if not distro: - distro = 'ubuntu' - dcfg = cc_ntp.distro_ntp_client_configs(distro) - if client == 'systemd-timesyncd': - template = TIMESYNCD_TEMPLATE - else: - template = NTP_TEMPLATE - (confpath, _template_fn) = self._generate_template(template=template) - ntpconfig = copy.deepcopy(dcfg[client]) - ntpconfig['confpath'] = confpath - ntpconfig['template_name'] = os.path.basename(confpath) - return ntpconfig - - @mock.patch("cloudinit.config.cc_ntp.subp") - def test_ntp_install(self, mock_subp): - """ntp_install_client runs install_func when check_exe is absent.""" - mock_subp.which.return_value = None # check_exe not found. - install_func = mock.MagicMock() - cc_ntp.install_ntp_client(install_func, - packages=['ntpx'], check_exe='ntpdx') - mock_subp.which.assert_called_with('ntpdx') - install_func.assert_called_once_with(['ntpx']) - - @mock.patch("cloudinit.config.cc_ntp.subp") - def test_ntp_install_not_needed(self, mock_subp): - """ntp_install_client doesn't install when check_exe is found.""" - client = 'chrony' - mock_subp.which.return_value = [client] # check_exe found. - install_func = mock.MagicMock() - cc_ntp.install_ntp_client(install_func, packages=[client], - check_exe=client) - install_func.assert_not_called() - - @mock.patch("cloudinit.config.cc_ntp.subp") - def test_ntp_install_no_op_with_empty_pkg_list(self, mock_subp): - """ntp_install_client runs install_func with empty list""" - mock_subp.which.return_value = None # check_exe not found - install_func = mock.MagicMock() - cc_ntp.install_ntp_client(install_func, packages=[], - check_exe='timesyncd') - install_func.assert_called_once_with([]) - - def test_ntp_rename_ntp_conf(self): - """When NTP_CONF exists, rename_ntp moves it.""" - ntpconf = self.tmp_path("ntp.conf", self.new_root) - util.write_file(ntpconf, "") - cc_ntp.rename_ntp_conf(confpath=ntpconf) - self.assertFalse(os.path.exists(ntpconf)) - self.assertTrue(os.path.exists("{0}.dist".format(ntpconf))) - - def test_ntp_rename_ntp_conf_skip_missing(self): - """When NTP_CONF doesn't exist rename_ntp doesn't create a file.""" - ntpconf = self.tmp_path("ntp.conf", self.new_root) - self.assertFalse(os.path.exists(ntpconf)) - cc_ntp.rename_ntp_conf(confpath=ntpconf) - self.assertFalse(os.path.exists("{0}.dist".format(ntpconf))) - self.assertFalse(os.path.exists(ntpconf)) - - def test_write_ntp_config_template_uses_ntp_conf_distro_no_servers(self): - """write_ntp_config_template reads from $client.conf.distro.tmpl""" - servers = [] - pools = ['10.0.0.1', '10.0.0.2'] - (confpath, template_fn) = self._generate_template() - mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' - with mock.patch(mock_path, self.new_root): - cc_ntp.write_ntp_config_template('ubuntu', - servers=servers, pools=pools, - path=confpath, - template_fn=template_fn, - template=None) - self.assertEqual( - "servers []\npools ['10.0.0.1', '10.0.0.2']\n", - util.load_file(confpath)) - - def test_write_ntp_config_template_defaults_pools_w_empty_lists(self): - """write_ntp_config_template defaults pools servers upon empty config. - - When both pools and servers are empty, default NR_POOL_SERVERS get - configured. - """ - distro = 'ubuntu' - pools = cc_ntp.generate_server_names(distro) - servers = [] - (confpath, template_fn) = self._generate_template() - mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' - with mock.patch(mock_path, self.new_root): - cc_ntp.write_ntp_config_template(distro, - servers=servers, pools=pools, - path=confpath, - template_fn=template_fn, - template=None) - self.assertEqual( - "servers []\npools {0}\n".format(pools), - util.load_file(confpath)) - - def test_defaults_pools_empty_lists_sles(self): - """write_ntp_config_template defaults opensuse pools upon empty config. - - When both pools and servers are empty, default NR_POOL_SERVERS get - configured. - """ - distro = 'sles' - default_pools = cc_ntp.generate_server_names(distro) - (confpath, template_fn) = self._generate_template() - - cc_ntp.write_ntp_config_template(distro, - servers=[], pools=[], - path=confpath, - template_fn=template_fn, - template=None) - for pool in default_pools: - self.assertIn('opensuse', pool) - self.assertEqual( - "servers []\npools {0}\n".format(default_pools), - util.load_file(confpath)) - self.assertIn( - "Adding distro default ntp pool servers: {0}".format( - ",".join(default_pools)), - self.logs.getvalue()) - - def test_timesyncd_template(self): - """Test timesycnd template is correct""" - pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org'] - servers = ['192.168.23.3', '192.168.23.4'] - (confpath, template_fn) = self._generate_template( - template=TIMESYNCD_TEMPLATE) - cc_ntp.write_ntp_config_template('ubuntu', - servers=servers, pools=pools, - path=confpath, - template_fn=template_fn, - template=None) - self.assertEqual( - "[Time]\nNTP=%s %s \n" % (" ".join(servers), " ".join(pools)), - util.load_file(confpath)) - - def test_distro_ntp_client_configs(self): - """Test we have updated ntp client configs on different distros""" - delta = copy.deepcopy(cc_ntp.DISTRO_CLIENT_CONFIG) - base = copy.deepcopy(cc_ntp.NTP_CLIENT_CONFIG) - # confirm no-delta distros match the base config - for distro in cc_ntp.distros: - if distro not in delta: - result = cc_ntp.distro_ntp_client_configs(distro) - self.assertEqual(base, result) - # for distros with delta, ensure the merged config values match - # what is set in the delta - for distro in delta.keys(): - result = cc_ntp.distro_ntp_client_configs(distro) - for client in delta[distro].keys(): - for key in delta[distro][client].keys(): - self.assertEqual(delta[distro][client][key], - result[client][key]) - - def _get_expected_pools(self, pools, distro, client): - if client in ['ntp', 'chrony']: - if client == 'ntp' and distro == 'alpine': - # NTP for Alpine Linux is Busybox's ntp which does not - # support 'pool' lines in its configuration file. - expected_pools = [] - else: - expected_pools = [ - 'pool {0} iburst'.format(pool) for pool in pools] - elif client == 'systemd-timesyncd': - expected_pools = " ".join(pools) - - return expected_pools - - def _get_expected_servers(self, servers, distro, client): - if client in ['ntp', 'chrony']: - if client == 'ntp' and distro == 'alpine': - # NTP for Alpine Linux is Busybox's ntp which only supports - # 'server' lines without iburst option. - expected_servers = [ - 'server {0}'.format(srv) for srv in servers] - else: - expected_servers = [ - 'server {0} iburst'.format(srv) for srv in servers] - elif client == 'systemd-timesyncd': - expected_servers = " ".join(servers) - - return expected_servers - - def test_ntp_handler_real_distro_ntp_templates(self): - """Test ntp handler renders the shipped distro ntp client templates.""" - pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org'] - servers = ['192.168.23.3', '192.168.23.4'] - for client in ['ntp', 'systemd-timesyncd', 'chrony']: - for distro in cc_ntp.distros: - distro_cfg = cc_ntp.distro_ntp_client_configs(distro) - ntpclient = distro_cfg[client] - confpath = ( - os.path.join(self.new_root, ntpclient.get('confpath')[1:])) - template = ntpclient.get('template_name') - # find sourcetree template file - root_dir = ( - dirname(dirname(os.path.realpath(util.__file__))) + - '/templates') - source_fn = self._get_template_path(template, distro, - basepath=root_dir) - template_fn = self._get_template_path(template, distro) - # don't fail if cloud-init doesn't have a template for - # a distro,client pair - if not os.path.exists(source_fn): - continue - # Create a copy in our tmp_dir - shutil.copy(source_fn, template_fn) - cc_ntp.write_ntp_config_template(distro, servers=servers, - pools=pools, path=confpath, - template_fn=template_fn) - content = util.load_file(confpath) - if client in ['ntp', 'chrony']: - content_lines = content.splitlines() - expected_servers = self._get_expected_servers(servers, - distro, - client) - print('distro=%s client=%s' % (distro, client)) - for sline in expected_servers: - self.assertIn(sline, content_lines, - ('failed to render {0} conf' - ' for distro:{1}'.format(client, - distro))) - expected_pools = self._get_expected_pools(pools, distro, - client) - if expected_pools != []: - for pline in expected_pools: - self.assertIn(pline, content_lines, - ('failed to render {0} conf' - ' for distro:{1}'.format(client, - distro))) - elif client == 'systemd-timesyncd': - expected_servers = self._get_expected_servers(servers, - distro, - client) - expected_pools = self._get_expected_pools(pools, - distro, - client) - expected_content = ( - "# cloud-init generated file\n" + - "# See timesyncd.conf(5) for details.\n\n" + - "[Time]\nNTP=%s %s \n" % (expected_servers, - expected_pools)) - self.assertEqual(expected_content, content) - - def test_no_ntpcfg_does_nothing(self): - """When no ntp section is defined handler logs a warning and noops.""" - cc_ntp.handle('cc_ntp', {}, None, None, []) - self.assertEqual( - 'DEBUG: Skipping module named cc_ntp, ' - 'not present or disabled by cfg\n', - self.logs.getvalue()) - - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') - def test_ntp_handler_schema_validation_allows_empty_ntp_config(self, - m_select): - """Ntp schema validation allows for an empty ntp: configuration.""" - valid_empty_configs = [{'ntp': {}}, {'ntp': None}] - for valid_empty_config in valid_empty_configs: - for distro in cc_ntp.distros: - mycloud = self._get_cloud(distro) - ntpconfig = self._mock_ntp_client_config(distro=distro) - confpath = ntpconfig['confpath'] - m_select.return_value = ntpconfig - cc_ntp.handle('cc_ntp', valid_empty_config, mycloud, None, []) - if distro == 'alpine': - # _mock_ntp_client_config call above did not specify a - # client value and so it defaults to "ntp" which on - # Alpine Linux only supports servers and not pools. - - servers = cc_ntp.generate_server_names(mycloud.distro.name) - self.assertEqual( - "servers {0}\npools []\n".format(servers), - util.load_file(confpath)) - else: - pools = cc_ntp.generate_server_names(mycloud.distro.name) - self.assertEqual( - "servers []\npools {0}\n".format(pools), - util.load_file(confpath)) - self.assertNotIn('Invalid config:', self.logs.getvalue()) - - @skipUnlessJsonSchema() - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') - def test_ntp_handler_schema_validation_warns_non_string_item_type(self, - m_sel): - """Ntp schema validation warns of non-strings in pools or servers. - - Schema validation is not strict, so ntp config is still be rendered. - """ - invalid_config = {'ntp': {'pools': [123], 'servers': ['valid', None]}} - for distro in cc_ntp.distros: - mycloud = self._get_cloud(distro) - ntpconfig = self._mock_ntp_client_config(distro=distro) - confpath = ntpconfig['confpath'] - m_sel.return_value = ntpconfig - cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) - self.assertIn( - "Invalid config:\nntp.pools.0: 123 is not of type 'string'\n" - "ntp.servers.1: None is not of type 'string'", - self.logs.getvalue()) - self.assertEqual("servers ['valid', None]\npools [123]\n", - util.load_file(confpath)) - - @skipUnlessJsonSchema() - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') - def test_ntp_handler_schema_validation_warns_of_non_array_type(self, - m_select): - """Ntp schema validation warns of non-array pools or servers types. - - Schema validation is not strict, so ntp config is still be rendered. - """ - invalid_config = {'ntp': {'pools': 123, 'servers': 'non-array'}} - - for distro in cc_ntp.distros: - mycloud = self._get_cloud(distro) - ntpconfig = self._mock_ntp_client_config(distro=distro) - confpath = ntpconfig['confpath'] - m_select.return_value = ntpconfig - cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) - self.assertIn( - "Invalid config:\nntp.pools: 123 is not of type 'array'\n" - "ntp.servers: 'non-array' is not of type 'array'", - self.logs.getvalue()) - self.assertEqual("servers non-array\npools 123\n", - util.load_file(confpath)) - - @skipUnlessJsonSchema() - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') - def test_ntp_handler_schema_validation_warns_invalid_key_present(self, - m_select): - """Ntp schema validation warns of invalid keys present in ntp config. - - Schema validation is not strict, so ntp config is still be rendered. - """ - invalid_config = { - 'ntp': {'invalidkey': 1, 'pools': ['0.mycompany.pool.ntp.org']}} - for distro in cc_ntp.distros: - if distro != 'alpine': - mycloud = self._get_cloud(distro) - ntpconfig = self._mock_ntp_client_config(distro=distro) - confpath = ntpconfig['confpath'] - m_select.return_value = ntpconfig - cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) - self.assertIn( - "Invalid config:\nntp: Additional properties are not " - "allowed ('invalidkey' was unexpected)", - self.logs.getvalue()) - self.assertEqual( - "servers []\npools ['0.mycompany.pool.ntp.org']\n", - util.load_file(confpath)) - - @skipUnlessJsonSchema() - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') - def test_ntp_handler_schema_validation_warns_of_duplicates(self, m_select): - """Ntp schema validation warns of duplicates in servers or pools. - - Schema validation is not strict, so ntp config is still be rendered. - """ - invalid_config = { - 'ntp': {'pools': ['0.mypool.org', '0.mypool.org'], - 'servers': ['10.0.0.1', '10.0.0.1']}} - for distro in cc_ntp.distros: - mycloud = self._get_cloud(distro) - ntpconfig = self._mock_ntp_client_config(distro=distro) - confpath = ntpconfig['confpath'] - m_select.return_value = ntpconfig - cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) - self.assertIn( - "Invalid config:\nntp.pools: ['0.mypool.org', '0.mypool.org']" - " has non-unique elements\nntp.servers: " - "['10.0.0.1', '10.0.0.1'] has non-unique elements", - self.logs.getvalue()) - self.assertEqual( - "servers ['10.0.0.1', '10.0.0.1']\n" - "pools ['0.mypool.org', '0.mypool.org']\n", - util.load_file(confpath)) - - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') - def test_ntp_handler_timesyncd(self, m_select): - """Test ntp handler configures timesyncd""" - servers = ['192.168.2.1', '192.168.2.2'] - pools = ['0.mypool.org'] - cfg = {'ntp': {'servers': servers, 'pools': pools}} - client = 'systemd-timesyncd' - for distro in cc_ntp.distros: - mycloud = self._get_cloud(distro) - ntpconfig = self._mock_ntp_client_config(distro=distro, - client=client) - confpath = ntpconfig['confpath'] - m_select.return_value = ntpconfig - cc_ntp.handle('cc_ntp', cfg, mycloud, None, []) - self.assertEqual( - "[Time]\nNTP=192.168.2.1 192.168.2.2 0.mypool.org \n", - util.load_file(confpath)) - - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') - def test_ntp_handler_enabled_false(self, m_select): - """Test ntp handler does not run if enabled: false """ - cfg = {'ntp': {'enabled': False}} - for distro in cc_ntp.distros: - mycloud = self._get_cloud(distro) - cc_ntp.handle('notimportant', cfg, mycloud, None, None) - self.assertEqual(0, m_select.call_count) - - @mock.patch("cloudinit.distros.subp") - @mock.patch("cloudinit.config.cc_ntp.subp") - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') - @mock.patch("cloudinit.distros.Distro.uses_systemd") - def test_ntp_the_whole_package(self, m_sysd, m_select, m_subp, m_dsubp): - """Test enabled config renders template, and restarts service """ - cfg = {'ntp': {'enabled': True}} - for distro in cc_ntp.distros: - mycloud = self._get_cloud(distro) - ntpconfig = self._mock_ntp_client_config(distro=distro) - confpath = ntpconfig['confpath'] - service_name = ntpconfig['service_name'] - m_select.return_value = ntpconfig - - hosts = cc_ntp.generate_server_names(mycloud.distro.name) - uses_systemd = True - expected_service_call = ['systemctl', 'reload-or-restart', - service_name] - expected_content = "servers []\npools {0}\n".format(hosts) - - if distro == 'alpine': - uses_systemd = False - expected_service_call = ['rc-service', service_name, 'restart'] - # _mock_ntp_client_config call above did not specify a client - # value and so it defaults to "ntp" which on Alpine Linux only - # supports servers and not pools. - expected_content = "servers {0}\npools []\n".format(hosts) - - m_sysd.return_value = uses_systemd - with mock.patch('cloudinit.config.cc_ntp.util') as m_util: - # allow use of util.mergemanydict - m_util.mergemanydict.side_effect = util.mergemanydict - # default client is present - m_subp.which.return_value = True - # use the config 'enabled' value - m_util.is_false.return_value = util.is_false( - cfg['ntp']['enabled']) - cc_ntp.handle('notimportant', cfg, mycloud, None, None) - m_dsubp.subp.assert_called_with( - expected_service_call, capture=True) - - self.assertEqual(expected_content, util.load_file(confpath)) - - @mock.patch('cloudinit.util.system_info') - def test_opensuse_picks_chrony(self, m_sysinfo): - """Test opensuse picks chrony or ntp on certain distro versions""" - # < 15.0 => ntp - m_sysinfo.return_value = { - 'dist': ('openSUSE', '13.2', 'Harlequin') - } - mycloud = self._get_cloud('opensuse') - expected_client = mycloud.distro.preferred_ntp_clients[0] - self.assertEqual('ntp', expected_client) - - # >= 15.0 and not openSUSE => chrony - m_sysinfo.return_value = { - 'dist': ('SLES', '15.0', 'SUSE Linux Enterprise Server 15') - } - mycloud = self._get_cloud('sles') - expected_client = mycloud.distro.preferred_ntp_clients[0] - self.assertEqual('chrony', expected_client) - - # >= 15.0 and openSUSE and ver != 42 => chrony - m_sysinfo.return_value = { - 'dist': ('openSUSE Tumbleweed', '20180326', 'timbleweed') - } - mycloud = self._get_cloud('opensuse') - expected_client = mycloud.distro.preferred_ntp_clients[0] - self.assertEqual('chrony', expected_client) - - @mock.patch('cloudinit.util.system_info') - def test_ubuntu_xenial_picks_ntp(self, m_sysinfo): - """Test Ubuntu picks ntp on xenial release""" - - m_sysinfo.return_value = {'dist': ('Ubuntu', '16.04', 'xenial')} - mycloud = self._get_cloud('ubuntu') - expected_client = mycloud.distro.preferred_ntp_clients[0] - self.assertEqual('ntp', expected_client) - - @mock.patch('cloudinit.config.cc_ntp.subp.which') - def test_snappy_system_picks_timesyncd(self, m_which): - """Test snappy systems prefer installed clients""" - - # we are on ubuntu-core here - self.m_snappy.return_value = True - - # ubuntu core systems will have timesyncd installed - m_which.side_effect = iter([None, '/lib/systemd/systemd-timesyncd', - None, None, None]) - distro = 'ubuntu' - mycloud = self._get_cloud(distro) - distro_configs = cc_ntp.distro_ntp_client_configs(distro) - expected_client = 'systemd-timesyncd' - expected_cfg = distro_configs[expected_client] - expected_calls = [] - # we only get to timesyncd - for client in mycloud.distro.preferred_ntp_clients[0:2]: - cfg = distro_configs[client] - expected_calls.append(mock.call(cfg['check_exe'])) - result = cc_ntp.select_ntp_client(None, mycloud.distro) - m_which.assert_has_calls(expected_calls) - self.assertEqual(sorted(expected_cfg), sorted(cfg)) - self.assertEqual(sorted(expected_cfg), sorted(result)) - - @mock.patch('cloudinit.config.cc_ntp.subp.which') - def test_ntp_distro_searches_all_preferred_clients(self, m_which): - """Test select_ntp_client search all distro perferred clients """ - # nothing is installed - m_which.return_value = None - for distro in cc_ntp.distros: - mycloud = self._get_cloud(distro) - distro_configs = cc_ntp.distro_ntp_client_configs(distro) - expected_client = mycloud.distro.preferred_ntp_clients[0] - expected_cfg = distro_configs[expected_client] - expected_calls = [] - for client in mycloud.distro.preferred_ntp_clients: - cfg = distro_configs[client] - expected_calls.append(mock.call(cfg['check_exe'])) - cc_ntp.select_ntp_client({}, mycloud.distro) - m_which.assert_has_calls(expected_calls) - self.assertEqual(sorted(expected_cfg), sorted(cfg)) - - @mock.patch('cloudinit.config.cc_ntp.subp.which') - def test_user_cfg_ntp_client_auto_uses_distro_clients(self, m_which): - """Test user_cfg.ntp_client='auto' defaults to distro search""" - # nothing is installed - m_which.return_value = None - for distro in cc_ntp.distros: - mycloud = self._get_cloud(distro) - distro_configs = cc_ntp.distro_ntp_client_configs(distro) - expected_client = mycloud.distro.preferred_ntp_clients[0] - expected_cfg = distro_configs[expected_client] - expected_calls = [] - for client in mycloud.distro.preferred_ntp_clients: - cfg = distro_configs[client] - expected_calls.append(mock.call(cfg['check_exe'])) - cc_ntp.select_ntp_client('auto', mycloud.distro) - m_which.assert_has_calls(expected_calls) - self.assertEqual(sorted(expected_cfg), sorted(cfg)) - - @mock.patch('cloudinit.config.cc_ntp.write_ntp_config_template') - @mock.patch('cloudinit.cloud.Cloud.get_template_filename') - @mock.patch('cloudinit.config.cc_ntp.subp.which') - def test_ntp_custom_client_overrides_installed_clients(self, m_which, - m_tmpfn, m_write): - """Test user client is installed despite other clients present """ - client = 'ntpdate' - cfg = {'ntp': {'ntp_client': client}} - for distro in cc_ntp.distros: - # client is not installed - m_which.side_effect = iter([None]) - mycloud = self._get_cloud(distro) - with mock.patch.object(mycloud.distro, - 'install_packages') as m_install: - cc_ntp.handle('notimportant', cfg, mycloud, None, None) - m_install.assert_called_with([client]) - m_which.assert_called_with(client) - - @mock.patch('cloudinit.config.cc_ntp.subp.which') - def test_ntp_system_config_overrides_distro_builtin_clients(self, m_which): - """Test distro system_config overrides builtin preferred ntp clients""" - system_client = 'chrony' - sys_cfg = {'ntp_client': system_client} - # no clients installed - m_which.return_value = None - for distro in cc_ntp.distros: - mycloud = self._get_cloud(distro, sys_cfg=sys_cfg) - distro_configs = cc_ntp.distro_ntp_client_configs(distro) - expected_cfg = distro_configs[system_client] - result = cc_ntp.select_ntp_client(None, mycloud.distro) - self.assertEqual(sorted(expected_cfg), sorted(result)) - m_which.assert_has_calls([]) - - @mock.patch('cloudinit.config.cc_ntp.subp.which') - def test_ntp_user_config_overrides_system_cfg(self, m_which): - """Test user-data overrides system_config ntp_client""" - system_client = 'chrony' - sys_cfg = {'ntp_client': system_client} - user_client = 'systemd-timesyncd' - # no clients installed - m_which.return_value = None - for distro in cc_ntp.distros: - mycloud = self._get_cloud(distro, sys_cfg=sys_cfg) - distro_configs = cc_ntp.distro_ntp_client_configs(distro) - expected_cfg = distro_configs[user_client] - result = cc_ntp.select_ntp_client(user_client, mycloud.distro) - self.assertEqual(sorted(expected_cfg), sorted(result)) - m_which.assert_has_calls([]) - - @mock.patch('cloudinit.config.cc_ntp.install_ntp_client') - def test_ntp_user_provided_config_with_template(self, m_install): - custom = r'\n#MyCustomTemplate' - user_template = NTP_TEMPLATE + custom - confpath = os.path.join(self.new_root, 'etc/myntp/myntp.conf') - cfg = { - 'ntp': { - 'pools': ['mypool.org'], - 'ntp_client': 'myntpd', - 'config': { - 'check_exe': 'myntpd', - 'confpath': confpath, - 'packages': ['myntp'], - 'service_name': 'myntp', - 'template': user_template, - } - } - } - for distro in cc_ntp.distros: - mycloud = self._get_cloud(distro) - mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' - with mock.patch(mock_path, self.new_root): - cc_ntp.handle('notimportant', cfg, mycloud, None, None) - self.assertEqual( - "servers []\npools ['mypool.org']\n%s" % custom, - util.load_file(confpath)) - - @mock.patch('cloudinit.config.cc_ntp.supplemental_schema_validation') - @mock.patch('cloudinit.config.cc_ntp.install_ntp_client') - @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') - def test_ntp_user_provided_config_template_only(self, m_select, m_install, - m_schema): - """Test custom template for default client""" - custom = r'\n#MyCustomTemplate' - user_template = NTP_TEMPLATE + custom - client = 'chrony' - cfg = { - 'pools': ['mypool.org'], - 'ntp_client': client, - 'config': { - 'template': user_template, - } - } - expected_merged_cfg = { - 'check_exe': 'chronyd', - 'confpath': '{tmpdir}/client.conf'.format(tmpdir=self.new_root), - 'template_name': 'client.conf', 'template': user_template, - 'service_name': 'chrony', 'packages': ['chrony']} - for distro in cc_ntp.distros: - mycloud = self._get_cloud(distro) - ntpconfig = self._mock_ntp_client_config(client=client, - distro=distro) - confpath = ntpconfig['confpath'] - m_select.return_value = ntpconfig - mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' - with mock.patch(mock_path, self.new_root): - cc_ntp.handle('notimportant', - {'ntp': cfg}, mycloud, None, None) - self.assertEqual( - "servers []\npools ['mypool.org']\n%s" % custom, - util.load_file(confpath)) - m_schema.assert_called_with(expected_merged_cfg) - - -class TestSupplementalSchemaValidation(CiTestCase): - - def test_error_on_missing_keys(self): - """ValueError raised reporting any missing required ntp:config keys""" - cfg = {} - match = (r'Invalid ntp configuration:\\nMissing required ntp:config' - ' keys: check_exe, confpath, packages, service_name') - with self.assertRaisesRegex(ValueError, match): - cc_ntp.supplemental_schema_validation(cfg) - - def test_error_requiring_either_template_or_template_name(self): - """ValueError raised if both template not template_name are None.""" - cfg = {'confpath': 'someconf', 'check_exe': '', 'service_name': '', - 'template': None, 'template_name': None, 'packages': []} - match = (r'Invalid ntp configuration:\\nEither ntp:config:template' - ' or ntp:config:template_name values are required') - with self.assertRaisesRegex(ValueError, match): - cc_ntp.supplemental_schema_validation(cfg) - - def test_error_on_non_list_values(self): - """ValueError raised when packages is not of type list.""" - cfg = {'confpath': 'someconf', 'check_exe': '', 'service_name': '', - 'template': 'asdf', 'template_name': None, 'packages': 'NOPE'} - match = (r'Invalid ntp configuration:\\nExpected a list of required' - ' package names for ntp:config:packages. Found \\(NOPE\\)') - with self.assertRaisesRegex(ValueError, match): - cc_ntp.supplemental_schema_validation(cfg) - - def test_error_on_non_string_values(self): - """ValueError raised for any values expected as string type.""" - cfg = {'confpath': 1, 'check_exe': 2, 'service_name': 3, - 'template': 4, 'template_name': 5, 'packages': []} - errors = [ - 'Expected a config file path ntp:config:confpath. Found (1)', - 'Expected a string type for ntp:config:check_exe. Found (2)', - 'Expected a string type for ntp:config:service_name. Found (3)', - 'Expected a string type for ntp:config:template. Found (4)', - 'Expected a string type for ntp:config:template_name. Found (5)'] - with self.assertRaises(ValueError) as context_mgr: - cc_ntp.supplemental_schema_validation(cfg) - error_msg = str(context_mgr.exception) - for error in errors: - self.assertIn(error, error_msg) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/test_handler/test_handler_power_state.py deleted file mode 100644 index 4ac49424..00000000 --- a/tests/unittests/test_handler/test_handler_power_state.py +++ /dev/null @@ -1,159 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import sys - -from cloudinit.config import cc_power_state_change as psc - -from cloudinit import distros -from cloudinit import helpers - -from cloudinit.tests import helpers as t_help -from cloudinit.tests.helpers import mock - - -class TestLoadPowerState(t_help.TestCase): - def setUp(self): - super(TestLoadPowerState, self).setUp() - cls = distros.fetch('ubuntu') - paths = helpers.Paths({}) - self.dist = cls('ubuntu', {}, paths) - - def test_no_config(self): - # completely empty config should mean do nothing - (cmd, _timeout, _condition) = psc.load_power_state({}, self.dist) - self.assertIsNone(cmd) - - def test_irrelevant_config(self): - # no power_state field in config should return None for cmd - (cmd, _timeout, _condition) = psc.load_power_state({'foo': 'bar'}, - self.dist) - self.assertIsNone(cmd) - - def test_invalid_mode(self): - - cfg = {'power_state': {'mode': 'gibberish'}} - self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist) - - cfg = {'power_state': {'mode': ''}} - self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist) - - def test_empty_mode(self): - cfg = {'power_state': {'message': 'goodbye'}} - self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist) - - def test_valid_modes(self): - cfg = {'power_state': {}} - for mode in ('halt', 'poweroff', 'reboot'): - cfg['power_state']['mode'] = mode - check_lps_ret(psc.load_power_state(cfg, self.dist), mode=mode) - - def test_invalid_delay(self): - cfg = {'power_state': {'mode': 'poweroff', 'delay': 'goodbye'}} - self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist) - - def test_valid_delay(self): - cfg = {'power_state': {'mode': 'poweroff', 'delay': ''}} - for delay in ("now", "+1", "+30"): - cfg['power_state']['delay'] = delay - check_lps_ret(psc.load_power_state(cfg, self.dist)) - - def test_message_present(self): - cfg = {'power_state': {'mode': 'poweroff', 'message': 'GOODBYE'}} - ret = psc.load_power_state(cfg, self.dist) - check_lps_ret(psc.load_power_state(cfg, self.dist)) - self.assertIn(cfg['power_state']['message'], ret[0]) - - def test_no_message(self): - # if message is not present, then no argument should be passed for it - cfg = {'power_state': {'mode': 'poweroff'}} - (cmd, _timeout, _condition) = psc.load_power_state(cfg, self.dist) - self.assertNotIn("", cmd) - check_lps_ret(psc.load_power_state(cfg, self.dist)) - self.assertTrue(len(cmd) == 3) - - def test_condition_null_raises(self): - cfg = {'power_state': {'mode': 'poweroff', 'condition': None}} - self.assertRaises(TypeError, psc.load_power_state, cfg, self.dist) - - def test_condition_default_is_true(self): - cfg = {'power_state': {'mode': 'poweroff'}} - _cmd, _timeout, cond = psc.load_power_state(cfg, self.dist) - self.assertEqual(cond, True) - - def test_freebsd_poweroff_uses_lowercase_p(self): - cls = distros.fetch('freebsd') - paths = helpers.Paths({}) - freebsd = cls('freebsd', {}, paths) - cfg = {'power_state': {'mode': 'poweroff'}} - ret = psc.load_power_state(cfg, freebsd) - self.assertIn('-p', ret[0]) - - def test_alpine_delay(self): - # alpine takes delay in seconds. - cls = distros.fetch('alpine') - paths = helpers.Paths({}) - alpine = cls('alpine', {}, paths) - cfg = {'power_state': {'mode': 'poweroff', 'delay': ''}} - for delay, value in (('now', 0), ("+1", 60), ("+30", 1800)): - cfg['power_state']['delay'] = delay - ret = psc.load_power_state(cfg, alpine) - self.assertEqual('-d', ret[0][1]) - self.assertEqual(str(value), ret[0][2]) - - -class TestCheckCondition(t_help.TestCase): - def cmd_with_exit(self, rc): - return([sys.executable, '-c', 'import sys; sys.exit(%s)' % rc]) - - def test_true_is_true(self): - self.assertEqual(psc.check_condition(True), True) - - def test_false_is_false(self): - self.assertEqual(psc.check_condition(False), False) - - def test_cmd_exit_zero_true(self): - self.assertEqual(psc.check_condition(self.cmd_with_exit(0)), True) - - def test_cmd_exit_one_false(self): - self.assertEqual(psc.check_condition(self.cmd_with_exit(1)), False) - - def test_cmd_exit_nonzero_warns(self): - mocklog = mock.Mock() - self.assertEqual( - psc.check_condition(self.cmd_with_exit(2), mocklog), False) - self.assertEqual(mocklog.warning.call_count, 1) - - -def check_lps_ret(psc_return, mode=None): - if len(psc_return) != 3: - raise TypeError("length returned = %d" % len(psc_return)) - - errs = [] - cmd = psc_return[0] - timeout = psc_return[1] - condition = psc_return[2] - - if 'shutdown' not in psc_return[0][0]: - errs.append("string 'shutdown' not in cmd") - - if condition is None: - errs.append("condition was not returned") - - if mode is not None: - opt = {'halt': '-H', 'poweroff': '-P', 'reboot': '-r'}[mode] - if opt not in psc_return[0]: - errs.append("opt '%s' not in cmd: %s" % (opt, cmd)) - - if len(cmd) != 3 and len(cmd) != 4: - errs.append("Invalid command length: %s" % len(cmd)) - - try: - float(timeout) - except Exception: - errs.append("timeout failed convert to float") - - if len(errs): - lines = ["Errors in result: %s" % str(psc_return)] + errs - raise Exception('\n'.join(lines)) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_puppet.py b/tests/unittests/test_handler/test_handler_puppet.py deleted file mode 100644 index 8d99f535..00000000 --- a/tests/unittests/test_handler/test_handler_puppet.py +++ /dev/null @@ -1,380 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -import logging -import textwrap - -from cloudinit.config import cc_puppet -from cloudinit import util -from cloudinit.tests.helpers import CiTestCase, HttprettyTestCase, mock - -from tests.unittests.util import get_cloud - -LOG = logging.getLogger(__name__) - - -@mock.patch('cloudinit.config.cc_puppet.subp.subp') -@mock.patch('cloudinit.config.cc_puppet.os') -class TestAutostartPuppet(CiTestCase): - - def test_wb_autostart_puppet_updates_puppet_default(self, m_os, m_subp): - """Update /etc/default/puppet to autostart if it exists.""" - - def _fake_exists(path): - return path == '/etc/default/puppet' - - m_os.path.exists.side_effect = _fake_exists - cc_puppet._autostart_puppet(LOG) - self.assertEqual( - [mock.call(['sed', '-i', '-e', 's/^START=.*/START=yes/', - '/etc/default/puppet'], capture=False)], - m_subp.call_args_list) - - def test_wb_autostart_pupppet_enables_puppet_systemctl(self, m_os, m_subp): - """If systemctl is present, enable puppet via systemctl.""" - - def _fake_exists(path): - return path == '/bin/systemctl' - - m_os.path.exists.side_effect = _fake_exists - cc_puppet._autostart_puppet(LOG) - expected_calls = [mock.call( - ['/bin/systemctl', 'enable', 'puppet.service'], capture=False)] - self.assertEqual(expected_calls, m_subp.call_args_list) - - def test_wb_autostart_pupppet_enables_puppet_chkconfig(self, m_os, m_subp): - """If chkconfig is present, enable puppet via checkcfg.""" - - def _fake_exists(path): - return path == '/sbin/chkconfig' - - m_os.path.exists.side_effect = _fake_exists - cc_puppet._autostart_puppet(LOG) - expected_calls = [mock.call( - ['/sbin/chkconfig', 'puppet', 'on'], capture=False)] - self.assertEqual(expected_calls, m_subp.call_args_list) - - -@mock.patch('cloudinit.config.cc_puppet._autostart_puppet') -class TestPuppetHandle(CiTestCase): - - with_logs = True - - def setUp(self): - super(TestPuppetHandle, self).setUp() - self.new_root = self.tmp_dir() - self.conf = self.tmp_path('puppet.conf') - self.csr_attributes_path = self.tmp_path( - 'csr_attributes.yaml') - self.cloud = get_cloud() - - def test_skips_missing_puppet_key_in_cloudconfig(self, m_auto): - """Cloud-config containing no 'puppet' key is skipped.""" - - cfg = {} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - self.assertIn( - "no 'puppet' configuration found", self.logs.getvalue()) - self.assertEqual(0, m_auto.call_count) - - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_config_starts_puppet_service(self, m_subp, m_auto): - """Cloud-config 'puppet' configuration starts puppet.""" - - cfg = {'puppet': {'install': False}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - self.assertEqual(1, m_auto.call_count) - self.assertIn( - [mock.call(['service', 'puppet', 'start'], capture=False)], - m_subp.call_args_list) - - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_empty_puppet_config_installs_puppet(self, m_subp, m_auto): - """Cloud-config empty 'puppet' configuration installs latest puppet.""" - - self.cloud.distro = mock.MagicMock() - cfg = {'puppet': {}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - self.assertEqual( - [mock.call(('puppet', None))], - self.cloud.distro.install_packages.call_args_list) - - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_config_installs_puppet_on_true(self, m_subp, _): - """Cloud-config with 'puppet' key installs when 'install' is True.""" - - self.cloud.distro = mock.MagicMock() - cfg = {'puppet': {'install': True}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - self.assertEqual( - [mock.call(('puppet', None))], - self.cloud.distro.install_packages.call_args_list) - - @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_config_installs_puppet_aio(self, m_subp, m_aio, _): - """Cloud-config with 'puppet' key installs - when 'install_type' is 'aio'.""" - - self.cloud.distro = mock.MagicMock() - cfg = {'puppet': {'install': True, 'install_type': 'aio'}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - m_aio.assert_called_with( - cc_puppet.AIO_INSTALL_URL, - None, None, True) - - @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_config_installs_puppet_aio_with_version(self, - m_subp, m_aio, _): - """Cloud-config with 'puppet' key installs - when 'install_type' is 'aio' and 'version' is specified.""" - - self.cloud.distro = mock.MagicMock() - cfg = {'puppet': {'install': True, - 'version': '6.24.0', 'install_type': 'aio'}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - m_aio.assert_called_with( - cc_puppet.AIO_INSTALL_URL, - '6.24.0', None, True) - - @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_config_installs_puppet_aio_with_collection(self, - m_subp, - m_aio, _): - """Cloud-config with 'puppet' key installs - when 'install_type' is 'aio' and 'collection' is specified.""" - - self.cloud.distro = mock.MagicMock() - cfg = {'puppet': {'install': True, - 'collection': 'puppet6', 'install_type': 'aio'}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - m_aio.assert_called_with( - cc_puppet.AIO_INSTALL_URL, - None, 'puppet6', True) - - @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_config_installs_puppet_aio_with_custom_url(self, - m_subp, - m_aio, _): - """Cloud-config with 'puppet' key installs - when 'install_type' is 'aio' and 'aio_install_url' is specified.""" - - self.cloud.distro = mock.MagicMock() - cfg = {'puppet': - {'install': True, - 'aio_install_url': 'http://test.url/path/to/script.sh', - 'install_type': 'aio'}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - m_aio.assert_called_with( - 'http://test.url/path/to/script.sh', None, None, True) - - @mock.patch('cloudinit.config.cc_puppet.install_puppet_aio', autospec=True) - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_config_installs_puppet_aio_without_cleanup(self, - m_subp, - m_aio, _): - """Cloud-config with 'puppet' key installs - when 'install_type' is 'aio' and no cleanup.""" - - self.cloud.distro = mock.MagicMock() - cfg = {'puppet': {'install': True, - 'cleanup': False, 'install_type': 'aio'}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - m_aio.assert_called_with( - cc_puppet.AIO_INSTALL_URL, - None, None, False) - - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_config_installs_puppet_version(self, m_subp, _): - """Cloud-config 'puppet' configuration can specify a version.""" - - self.cloud.distro = mock.MagicMock() - cfg = {'puppet': {'version': '3.8'}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - self.assertEqual( - [mock.call(('puppet', '3.8'))], - self.cloud.distro.install_packages.call_args_list) - - @mock.patch('cloudinit.config.cc_puppet.get_config_value') - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_config_updates_puppet_conf(self, - m_subp, m_default, m_auto): - """When 'conf' is provided update values in PUPPET_CONF_PATH.""" - - def _fake_get_config_value(puppet_bin, setting): - return self.conf - - m_default.side_effect = _fake_get_config_value - - cfg = { - 'puppet': { - 'conf': {'agent': {'server': 'puppetserver.example.org'}}}} - util.write_file( - self.conf, '[agent]\nserver = origpuppet\nother = 3') - self.cloud.distro = mock.MagicMock() - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - content = util.load_file(self.conf) - expected = '[agent]\nserver = puppetserver.example.org\nother = 3\n\n' - self.assertEqual(expected, content) - - @mock.patch('cloudinit.config.cc_puppet.get_config_value') - @mock.patch('cloudinit.config.cc_puppet.subp.subp') - def test_puppet_writes_csr_attributes_file(self, - m_subp, m_default, m_auto): - """When csr_attributes is provided - creates file in PUPPET_CSR_ATTRIBUTES_PATH.""" - - def _fake_get_config_value(puppet_bin, setting): - return self.csr_attributes_path - - m_default.side_effect = _fake_get_config_value - - self.cloud.distro = mock.MagicMock() - cfg = { - 'puppet': { - 'csr_attributes': { - 'custom_attributes': { - '1.2.840.113549.1.9.7': - '342thbjkt82094y0uthhor289jnqthpc2290' - }, - 'extension_requests': { - 'pp_uuid': 'ED803750-E3C7-44F5-BB08-41A04433FE2E', - 'pp_image_name': 'my_ami_image', - 'pp_preshared_key': - '342thbjkt82094y0uthhor289jnqthpc2290' - } - } - } - } - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - content = util.load_file(self.csr_attributes_path) - expected = textwrap.dedent("""\ - custom_attributes: - 1.2.840.113549.1.9.7: 342thbjkt82094y0uthhor289jnqthpc2290 - extension_requests: - pp_image_name: my_ami_image - pp_preshared_key: 342thbjkt82094y0uthhor289jnqthpc2290 - pp_uuid: ED803750-E3C7-44F5-BB08-41A04433FE2E - """) - self.assertEqual(expected, content) - - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_runs_puppet_if_requested(self, m_subp, m_auto): - """Run puppet with default args if 'exec' is set to True.""" - - cfg = {'puppet': {'exec': True}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - self.assertEqual(1, m_auto.call_count) - self.assertIn( - [mock.call(['puppet', 'agent', '--test'], capture=False)], - m_subp.call_args_list) - - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_starts_puppetd(self, m_subp, m_auto): - """Run puppet with default args if 'exec' is set to True.""" - - cfg = {'puppet': {}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - self.assertEqual(1, m_auto.call_count) - self.assertIn( - [mock.call(['service', 'puppet', 'start'], capture=False)], - m_subp.call_args_list) - - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_skips_puppetd(self, m_subp, m_auto): - """Run puppet with default args if 'exec' is set to True.""" - - cfg = {'puppet': {'start_service': False}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - self.assertEqual(0, m_auto.call_count) - self.assertNotIn( - [mock.call(['service', 'puppet', 'start'], capture=False)], - m_subp.call_args_list) - - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_runs_puppet_with_args_list_if_requested(self, - m_subp, m_auto): - """Run puppet with 'exec_args' list if 'exec' is set to True.""" - - cfg = {'puppet': {'exec': True, 'exec_args': [ - '--onetime', '--detailed-exitcodes']}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - self.assertEqual(1, m_auto.call_count) - self.assertIn( - [mock.call( - ['puppet', 'agent', '--onetime', '--detailed-exitcodes'], - capture=False)], - m_subp.call_args_list) - - @mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=("", "")) - def test_puppet_runs_puppet_with_args_string_if_requested(self, - m_subp, m_auto): - """Run puppet with 'exec_args' string if 'exec' is set to True.""" - - cfg = {'puppet': {'exec': True, - 'exec_args': '--onetime --detailed-exitcodes'}} - cc_puppet.handle('notimportant', cfg, self.cloud, LOG, None) - self.assertEqual(1, m_auto.call_count) - self.assertIn( - [mock.call( - ['puppet', 'agent', '--onetime', '--detailed-exitcodes'], - capture=False)], - m_subp.call_args_list) - - -URL_MOCK = mock.Mock() -URL_MOCK.contents = b'#!/bin/bash\necho "Hi Mom"' - - -@mock.patch('cloudinit.config.cc_puppet.subp.subp', return_value=(None, None)) -@mock.patch( - 'cloudinit.config.cc_puppet.url_helper.readurl', - return_value=URL_MOCK, autospec=True, -) -class TestInstallPuppetAio(HttprettyTestCase): - def test_install_with_default_arguments(self, m_readurl, m_subp): - """Install AIO with no arguments""" - cc_puppet.install_puppet_aio() - - self.assertEqual( - [mock.call([mock.ANY, '--cleanup'], capture=False)], - m_subp.call_args_list) - - def test_install_with_custom_url(self, m_readurl, m_subp): - """Install AIO from custom URL""" - cc_puppet.install_puppet_aio('http://custom.url/path/to/script.sh') - m_readurl.assert_called_with( - url='http://custom.url/path/to/script.sh', - retries=5) - - self.assertEqual( - [mock.call([mock.ANY, '--cleanup'], capture=False)], - m_subp.call_args_list) - - def test_install_with_version(self, m_readurl, m_subp): - """Install AIO with specific version""" - cc_puppet.install_puppet_aio(cc_puppet.AIO_INSTALL_URL, '7.6.0') - - self.assertEqual( - [mock.call([mock.ANY, '-v', '7.6.0', '--cleanup'], capture=False)], - m_subp.call_args_list) - - def test_install_with_collection(self, m_readurl, m_subp): - """Install AIO with specific collection""" - cc_puppet.install_puppet_aio( - cc_puppet.AIO_INSTALL_URL, None, 'puppet6-nightly') - - self.assertEqual( - [mock.call([mock.ANY, '-c', 'puppet6-nightly', '--cleanup'], - capture=False)], - m_subp.call_args_list) - - def test_install_with_no_cleanup(self, m_readurl, m_subp): - """Install AIO with no cleanup""" - cc_puppet.install_puppet_aio( - cc_puppet.AIO_INSTALL_URL, None, None, False) - - self.assertEqual( - [mock.call([mock.ANY], capture=False)], - m_subp.call_args_list) diff --git a/tests/unittests/test_handler/test_handler_refresh_rmc_and_interface.py b/tests/unittests/test_handler/test_handler_refresh_rmc_and_interface.py deleted file mode 100644 index e13b7793..00000000 --- a/tests/unittests/test_handler/test_handler_refresh_rmc_and_interface.py +++ /dev/null @@ -1,109 +0,0 @@ -from cloudinit.config import cc_refresh_rmc_and_interface as ccrmci - -from cloudinit import util - -from cloudinit.tests import helpers as t_help -from cloudinit.tests.helpers import mock - -from textwrap import dedent -import logging - -LOG = logging.getLogger(__name__) -MPATH = "cloudinit.config.cc_refresh_rmc_and_interface" -NET_INFO = { - 'lo': {'ipv4': [{'ip': '127.0.0.1', - 'bcast': '', 'mask': '255.0.0.0', - 'scope': 'host'}], - 'ipv6': [{'ip': '::1/128', - 'scope6': 'host'}], 'hwaddr': '', - 'up': 'True'}, - 'env2': {'ipv4': [{'ip': '8.0.0.19', - 'bcast': '8.0.0.255', 'mask': '255.255.255.0', - 'scope': 'global'}], - 'ipv6': [{'ip': 'fe80::f896:c2ff:fe81:8220/64', - 'scope6': 'link'}], 'hwaddr': 'fa:96:c2:81:82:20', - 'up': 'True'}, - 'env3': {'ipv4': [{'ip': '90.0.0.14', - 'bcast': '90.0.0.255', 'mask': '255.255.255.0', - 'scope': 'global'}], - 'ipv6': [{'ip': 'fe80::f896:c2ff:fe81:8221/64', - 'scope6': 'link'}], 'hwaddr': 'fa:96:c2:81:82:21', - 'up': 'True'}, - 'env4': {'ipv4': [{'ip': '9.114.23.7', - 'bcast': '9.114.23.255', 'mask': '255.255.255.0', - 'scope': 'global'}], - 'ipv6': [{'ip': 'fe80::f896:c2ff:fe81:8222/64', - 'scope6': 'link'}], 'hwaddr': 'fa:96:c2:81:82:22', - 'up': 'True'}, - 'env5': {'ipv4': [], - 'ipv6': [{'ip': 'fe80::9c26:c3ff:fea4:62c8/64', - 'scope6': 'link'}], 'hwaddr': '42:20:86:df:fa:4c', - 'up': 'True'}} - - -class TestRsctNodeFile(t_help.CiTestCase): - def test_disable_ipv6_interface(self): - """test parsing of iface files.""" - fname = self.tmp_path("iface-eth5") - util.write_file(fname, dedent("""\ - BOOTPROTO=static - DEVICE=eth5 - HWADDR=42:20:86:df:fa:4c - IPV6INIT=yes - IPADDR6=fe80::9c26:c3ff:fea4:62c8/64 - IPV6ADDR=fe80::9c26:c3ff:fea4:62c8/64 - NM_CONTROLLED=yes - ONBOOT=yes - STARTMODE=auto - TYPE=Ethernet - USERCTL=no - """)) - - ccrmci.disable_ipv6(fname) - self.assertEqual(dedent("""\ - BOOTPROTO=static - DEVICE=eth5 - HWADDR=42:20:86:df:fa:4c - ONBOOT=yes - STARTMODE=auto - TYPE=Ethernet - USERCTL=no - NM_CONTROLLED=no - """), util.load_file(fname)) - - @mock.patch(MPATH + '.refresh_rmc') - @mock.patch(MPATH + '.restart_network_manager') - @mock.patch(MPATH + '.disable_ipv6') - @mock.patch(MPATH + '.refresh_ipv6') - @mock.patch(MPATH + '.netinfo.netdev_info') - @mock.patch(MPATH + '.subp.which') - def test_handle(self, m_refresh_rmc, - m_netdev_info, m_refresh_ipv6, m_disable_ipv6, - m_restart_nm, m_which): - """Basic test of handle.""" - m_netdev_info.return_value = NET_INFO - m_which.return_value = '/opt/rsct/bin/rmcctrl' - ccrmci.handle( - "refresh_rmc_and_interface", None, None, None, None) - self.assertEqual(1, m_netdev_info.call_count) - m_refresh_ipv6.assert_called_with('env5') - m_disable_ipv6.assert_called_with( - '/etc/sysconfig/network-scripts/ifcfg-env5') - self.assertEqual(1, m_restart_nm.call_count) - self.assertEqual(1, m_refresh_rmc.call_count) - - @mock.patch(MPATH + '.netinfo.netdev_info') - def test_find_ipv6(self, m_netdev_info): - """find_ipv6_ifaces parses netdev_info returning those with ipv6""" - m_netdev_info.return_value = NET_INFO - found = ccrmci.find_ipv6_ifaces() - self.assertEqual(['env5'], found) - - @mock.patch(MPATH + '.subp.subp') - def test_refresh_ipv6(self, m_subp): - """refresh_ipv6 should ip down and up the interface.""" - iface = "myeth0" - ccrmci.refresh_ipv6(iface) - m_subp.assert_has_calls([ - mock.call(['ip', 'link', 'set', iface, 'down']), - mock.call(['ip', 'link', 'set', iface, 'up'])]) diff --git a/tests/unittests/test_handler/test_handler_resizefs.py b/tests/unittests/test_handler/test_handler_resizefs.py deleted file mode 100644 index 28d55072..00000000 --- a/tests/unittests/test_handler/test_handler_resizefs.py +++ /dev/null @@ -1,398 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.config.cc_resizefs import ( - can_skip_resize, handle, maybe_get_writable_device_path, _resize_btrfs, - _resize_zfs, _resize_xfs, _resize_ext, _resize_ufs) - -from collections import namedtuple -import logging - -from cloudinit.subp import ProcessExecutionError -from cloudinit.tests.helpers import ( - CiTestCase, mock, skipUnlessJsonSchema, util, wrap_and_call) - - -LOG = logging.getLogger(__name__) - - -class TestResizefs(CiTestCase): - with_logs = True - - def setUp(self): - super(TestResizefs, self).setUp() - self.name = "resizefs" - - @mock.patch('cloudinit.subp.subp') - def test_skip_ufs_resize(self, m_subp): - fs_type = "ufs" - resize_what = "/" - devpth = "/dev/da0p2" - err = ("growfs: requested size 2.0GB is not larger than the " - "current filesystem size 2.0GB\n") - exception = ProcessExecutionError(stderr=err, exit_code=1) - m_subp.side_effect = exception - res = can_skip_resize(fs_type, resize_what, devpth) - self.assertTrue(res) - - @mock.patch('cloudinit.subp.subp') - def test_cannot_skip_ufs_resize(self, m_subp): - fs_type = "ufs" - resize_what = "/" - devpth = "/dev/da0p2" - m_subp.return_value = ( - ("stdout: super-block backups (for fsck_ffs -b #) at:\n\n"), - ("growfs: no room to allocate last cylinder group; " - "leaving 364KB unused\n") - ) - res = can_skip_resize(fs_type, resize_what, devpth) - self.assertFalse(res) - - @mock.patch('cloudinit.subp.subp') - def test_cannot_skip_ufs_growfs_exception(self, m_subp): - fs_type = "ufs" - resize_what = "/" - devpth = "/dev/da0p2" - err = "growfs: /dev/da0p2 is not clean - run fsck.\n" - exception = ProcessExecutionError(stderr=err, exit_code=1) - m_subp.side_effect = exception - with self.assertRaises(ProcessExecutionError): - can_skip_resize(fs_type, resize_what, devpth) - - def test_can_skip_resize_ext(self): - self.assertFalse(can_skip_resize('ext', '/', '/dev/sda1')) - - def test_handle_noops_on_disabled(self): - """The handle function logs when the configuration disables resize.""" - cfg = {'resize_rootfs': False} - handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[]) - self.assertIn( - 'DEBUG: Skipping module named cc_resizefs, resizing disabled\n', - self.logs.getvalue()) - - @skipUnlessJsonSchema() - def test_handle_schema_validation_logs_invalid_resize_rootfs_value(self): - """The handle reports json schema violations as a warning. - - Invalid values for resize_rootfs result in disabling the module. - """ - cfg = {'resize_rootfs': 'junk'} - handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[]) - logs = self.logs.getvalue() - self.assertIn( - "WARNING: Invalid config:\nresize_rootfs: 'junk' is not one of" - " [True, False, 'noblock']", - logs) - self.assertIn( - 'DEBUG: Skipping module named cc_resizefs, resizing disabled\n', - logs) - - @mock.patch('cloudinit.config.cc_resizefs.util.get_mount_info') - def test_handle_warns_on_unknown_mount_info(self, m_get_mount_info): - """handle warns when get_mount_info sees unknown filesystem for /.""" - m_get_mount_info.return_value = None - cfg = {'resize_rootfs': True} - handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[]) - logs = self.logs.getvalue() - self.assertNotIn("WARNING: Invalid config:\nresize_rootfs:", logs) - self.assertIn( - 'WARNING: Could not determine filesystem type of /\n', - logs) - self.assertEqual( - [mock.call('/', LOG)], - m_get_mount_info.call_args_list) - - def test_handle_warns_on_undiscoverable_root_path_in_commandline(self): - """handle noops when the root path is not found on the commandline.""" - cfg = {'resize_rootfs': True} - exists_mock_path = 'cloudinit.config.cc_resizefs.os.path.exists' - - def fake_mount_info(path, log): - self.assertEqual('/', path) - self.assertEqual(LOG, log) - return ('/dev/root', 'ext4', '/') - - with mock.patch(exists_mock_path) as m_exists: - m_exists.return_value = False - wrap_and_call( - 'cloudinit.config.cc_resizefs.util', - {'is_container': {'return_value': False}, - 'get_mount_info': {'side_effect': fake_mount_info}, - 'get_cmdline': {'return_value': 'BOOT_IMAGE=/vmlinuz.efi'}}, - handle, 'cc_resizefs', cfg, _cloud=None, log=LOG, - args=[]) - logs = self.logs.getvalue() - self.assertIn("WARNING: Unable to find device '/dev/root'", logs) - - def test_resize_zfs_cmd_return(self): - zpool = 'zroot' - devpth = 'gpt/system' - self.assertEqual(('zpool', 'online', '-e', zpool, devpth), - _resize_zfs(zpool, devpth)) - - def test_resize_xfs_cmd_return(self): - mount_point = '/mnt/test' - devpth = '/dev/sda1' - self.assertEqual(('xfs_growfs', mount_point), - _resize_xfs(mount_point, devpth)) - - def test_resize_ext_cmd_return(self): - mount_point = '/' - devpth = '/dev/sdb1' - self.assertEqual(('resize2fs', devpth), - _resize_ext(mount_point, devpth)) - - def test_resize_ufs_cmd_return(self): - mount_point = '/' - devpth = '/dev/sda2' - self.assertEqual(('growfs', '-y', mount_point), - _resize_ufs(mount_point, devpth)) - - @mock.patch('cloudinit.util.is_container', return_value=False) - @mock.patch('cloudinit.util.parse_mount') - @mock.patch('cloudinit.util.get_device_info_from_zpool') - @mock.patch('cloudinit.util.get_mount_info') - def test_handle_zfs_root(self, mount_info, zpool_info, parse_mount, - is_container): - devpth = 'vmzroot/ROOT/freebsd' - disk = 'gpt/system' - fs_type = 'zfs' - mount_point = '/' - - mount_info.return_value = (devpth, fs_type, mount_point) - zpool_info.return_value = disk - parse_mount.return_value = (devpth, fs_type, mount_point) - - cfg = {'resize_rootfs': True} - - with mock.patch('cloudinit.config.cc_resizefs.do_resize') as dresize: - handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[]) - ret = dresize.call_args[0][0] - - self.assertEqual(('zpool', 'online', '-e', 'vmzroot', disk), ret) - - @mock.patch('cloudinit.util.is_container', return_value=False) - @mock.patch('cloudinit.util.get_mount_info') - @mock.patch('cloudinit.util.get_device_info_from_zpool') - @mock.patch('cloudinit.util.parse_mount') - def test_handle_modern_zfsroot(self, mount_info, zpool_info, parse_mount, - is_container): - devpth = 'zroot/ROOT/default' - disk = 'da0p3' - fs_type = 'zfs' - mount_point = '/' - - mount_info.return_value = (devpth, fs_type, mount_point) - zpool_info.return_value = disk - parse_mount.return_value = (devpth, fs_type, mount_point) - - cfg = {'resize_rootfs': True} - - def fake_stat(devpath): - if devpath == disk: - raise OSError("not here") - FakeStat = namedtuple( - 'FakeStat', ['st_mode', 'st_size', 'st_mtime']) # minimal stat - return FakeStat(25008, 0, 1) # fake char block device - - with mock.patch('cloudinit.config.cc_resizefs.do_resize') as dresize: - with mock.patch('cloudinit.config.cc_resizefs.os.stat') as m_stat: - m_stat.side_effect = fake_stat - handle('cc_resizefs', cfg, _cloud=None, log=LOG, args=[]) - - self.assertEqual(('zpool', 'online', '-e', 'zroot', '/dev/' + disk), - dresize.call_args[0][0]) - - -class TestRootDevFromCmdline(CiTestCase): - - def test_rootdev_from_cmdline_with_no_root(self): - """Return None from rootdev_from_cmdline when root is not present.""" - invalid_cases = [ - 'BOOT_IMAGE=/adsf asdfa werasef root adf', 'BOOT_IMAGE=/adsf', ''] - for case in invalid_cases: - self.assertIsNone(util.rootdev_from_cmdline(case)) - - def test_rootdev_from_cmdline_with_root_startswith_dev(self): - """Return the cmdline root when the path starts with /dev.""" - self.assertEqual( - '/dev/this', util.rootdev_from_cmdline('asdf root=/dev/this')) - - def test_rootdev_from_cmdline_with_root_without_dev_prefix(self): - """Add /dev prefix to cmdline root when the path lacks the prefix.""" - self.assertEqual( - '/dev/this', util.rootdev_from_cmdline('asdf root=this')) - - def test_rootdev_from_cmdline_with_root_with_label(self): - """When cmdline root contains a LABEL, our root is disk/by-label.""" - self.assertEqual( - '/dev/disk/by-label/unique', - util.rootdev_from_cmdline('asdf root=LABEL=unique')) - - def test_rootdev_from_cmdline_with_root_with_uuid(self): - """When cmdline root contains a UUID, our root is disk/by-uuid.""" - self.assertEqual( - '/dev/disk/by-uuid/adsfdsaf-adsf', - util.rootdev_from_cmdline('asdf root=UUID=adsfdsaf-adsf')) - - -class TestMaybeGetDevicePathAsWritableBlock(CiTestCase): - - with_logs = True - - def test_maybe_get_writable_device_path_none_on_overlayroot(self): - """When devpath is overlayroot (on MAAS), is_dev_writable is False.""" - info = 'does not matter' - devpath = wrap_and_call( - 'cloudinit.config.cc_resizefs.util', - {'is_container': {'return_value': False}}, - maybe_get_writable_device_path, 'overlayroot', info, LOG) - self.assertIsNone(devpath) - self.assertIn( - "Not attempting to resize devpath 'overlayroot'", - self.logs.getvalue()) - - def test_maybe_get_writable_device_path_warns_missing_cmdline_root(self): - """When root does not exist isn't in the cmdline, log warning.""" - info = 'does not matter' - - def fake_mount_info(path, log): - self.assertEqual('/', path) - self.assertEqual(LOG, log) - return ('/dev/root', 'ext4', '/') - - exists_mock_path = 'cloudinit.config.cc_resizefs.os.path.exists' - with mock.patch(exists_mock_path) as m_exists: - m_exists.return_value = False - devpath = wrap_and_call( - 'cloudinit.config.cc_resizefs.util', - {'is_container': {'return_value': False}, - 'get_mount_info': {'side_effect': fake_mount_info}, - 'get_cmdline': {'return_value': 'BOOT_IMAGE=/vmlinuz.efi'}}, - maybe_get_writable_device_path, '/dev/root', info, LOG) - self.assertIsNone(devpath) - logs = self.logs.getvalue() - self.assertIn("WARNING: Unable to find device '/dev/root'", logs) - - def test_maybe_get_writable_device_path_does_not_exist(self): - """When devpath does not exist, a warning is logged.""" - info = 'dev=/dev/I/dont/exist mnt_point=/ path=/dev/none' - devpath = wrap_and_call( - 'cloudinit.config.cc_resizefs.util', - {'is_container': {'return_value': False}}, - maybe_get_writable_device_path, '/dev/I/dont/exist', info, LOG) - self.assertIsNone(devpath) - self.assertIn( - "WARNING: Device '/dev/I/dont/exist' did not exist." - ' cannot resize: %s' % info, - self.logs.getvalue()) - - def test_maybe_get_writable_device_path_does_not_exist_in_container(self): - """When devpath does not exist in a container, log a debug message.""" - info = 'dev=/dev/I/dont/exist mnt_point=/ path=/dev/none' - devpath = wrap_and_call( - 'cloudinit.config.cc_resizefs.util', - {'is_container': {'return_value': True}}, - maybe_get_writable_device_path, '/dev/I/dont/exist', info, LOG) - self.assertIsNone(devpath) - self.assertIn( - "DEBUG: Device '/dev/I/dont/exist' did not exist in container." - ' cannot resize: %s' % info, - self.logs.getvalue()) - - def test_maybe_get_writable_device_path_raises_oserror(self): - """When unexpected OSError is raises by os.stat it is reraised.""" - info = 'dev=/dev/I/dont/exist mnt_point=/ path=/dev/none' - with self.assertRaises(OSError) as context_manager: - wrap_and_call( - 'cloudinit.config.cc_resizefs', - {'util.is_container': {'return_value': True}, - 'os.stat': {'side_effect': OSError('Something unexpected')}}, - maybe_get_writable_device_path, '/dev/I/dont/exist', info, LOG) - self.assertEqual( - 'Something unexpected', str(context_manager.exception)) - - def test_maybe_get_writable_device_path_non_block(self): - """When device is not a block device, emit warning return False.""" - fake_devpath = self.tmp_path('dev/readwrite') - util.write_file(fake_devpath, '', mode=0o600) # read-write - info = 'dev=/dev/root mnt_point=/ path={0}'.format(fake_devpath) - - devpath = wrap_and_call( - 'cloudinit.config.cc_resizefs.util', - {'is_container': {'return_value': False}}, - maybe_get_writable_device_path, fake_devpath, info, LOG) - self.assertIsNone(devpath) - self.assertIn( - "WARNING: device '{0}' not a block device. cannot resize".format( - fake_devpath), - self.logs.getvalue()) - - def test_maybe_get_writable_device_path_non_block_on_container(self): - """When device is non-block device in container, emit debug log.""" - fake_devpath = self.tmp_path('dev/readwrite') - util.write_file(fake_devpath, '', mode=0o600) # read-write - info = 'dev=/dev/root mnt_point=/ path={0}'.format(fake_devpath) - - devpath = wrap_and_call( - 'cloudinit.config.cc_resizefs.util', - {'is_container': {'return_value': True}}, - maybe_get_writable_device_path, fake_devpath, info, LOG) - self.assertIsNone(devpath) - self.assertIn( - "DEBUG: device '{0}' not a block device in container." - ' cannot resize'.format(fake_devpath), - self.logs.getvalue()) - - def test_maybe_get_writable_device_path_returns_cmdline_root(self): - """When root device is UUID in kernel commandline, update devpath.""" - # XXX Long-term we want to use FilesystemMocking test to avoid - # touching os.stat. - FakeStat = namedtuple( - 'FakeStat', ['st_mode', 'st_size', 'st_mtime']) # minimal def. - info = 'dev=/dev/root mnt_point=/ path=/does/not/matter' - devpath = wrap_and_call( - 'cloudinit.config.cc_resizefs', - {'util.get_cmdline': {'return_value': 'asdf root=UUID=my-uuid'}, - 'util.is_container': False, - 'os.path.exists': False, # /dev/root doesn't exist - 'os.stat': { - 'return_value': FakeStat(25008, 0, 1)} # char block device - }, - maybe_get_writable_device_path, '/dev/root', info, LOG) - self.assertEqual('/dev/disk/by-uuid/my-uuid', devpath) - self.assertIn( - "DEBUG: Converted /dev/root to '/dev/disk/by-uuid/my-uuid'" - " per kernel cmdline", - self.logs.getvalue()) - - @mock.patch('cloudinit.util.mount_is_read_write') - @mock.patch('cloudinit.config.cc_resizefs.os.path.isdir') - def test_resize_btrfs_mount_is_ro(self, m_is_dir, m_is_rw): - """Do not resize / directly if it is read-only. (LP: #1734787).""" - m_is_rw.return_value = False - m_is_dir.return_value = True - self.assertEqual( - ('btrfs', 'filesystem', 'resize', 'max', '//.snapshots'), - _resize_btrfs("/", "/dev/sda1")) - - @mock.patch('cloudinit.util.mount_is_read_write') - @mock.patch('cloudinit.config.cc_resizefs.os.path.isdir') - def test_resize_btrfs_mount_is_rw(self, m_is_dir, m_is_rw): - """Do not resize / directly if it is read-only. (LP: #1734787).""" - m_is_rw.return_value = True - m_is_dir.return_value = True - self.assertEqual( - ('btrfs', 'filesystem', 'resize', 'max', '/'), - _resize_btrfs("/", "/dev/sda1")) - - @mock.patch('cloudinit.util.is_container', return_value=True) - @mock.patch('cloudinit.util.is_FreeBSD') - def test_maybe_get_writable_device_path_zfs_freebsd(self, freebsd, - m_is_container): - freebsd.return_value = True - info = 'dev=gpt/system mnt_point=/ path=/' - devpth = maybe_get_writable_device_path('gpt/system', info, LOG) - self.assertEqual('gpt/system', devpth) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_resolv_conf.py b/tests/unittests/test_handler/test_handler_resolv_conf.py deleted file mode 100644 index 96139001..00000000 --- a/tests/unittests/test_handler/test_handler_resolv_conf.py +++ /dev/null @@ -1,105 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.config import cc_resolv_conf - -from cloudinit import cloud -from cloudinit import distros -from cloudinit import helpers -from cloudinit import util -from copy import deepcopy - -from cloudinit.tests import helpers as t_help - -import logging -import os -import shutil -import tempfile -from unittest import mock - -LOG = logging.getLogger(__name__) - - -class TestResolvConf(t_help.FilesystemMockingTestCase): - with_logs = True - cfg = {'manage_resolv_conf': True, 'resolv_conf': {}} - - def setUp(self): - super(TestResolvConf, self).setUp() - self.tmp = tempfile.mkdtemp() - util.ensure_dir(os.path.join(self.tmp, 'data')) - self.addCleanup(shutil.rmtree, self.tmp) - - def _fetch_distro(self, kind, conf=None): - cls = distros.fetch(kind) - paths = helpers.Paths({'cloud_dir': self.tmp}) - conf = {} if conf is None else conf - return cls(kind, conf, paths) - - def call_resolv_conf_handler(self, distro_name, conf, cc=None): - if not cc: - ds = None - distro = self._fetch_distro(distro_name, conf) - paths = helpers.Paths({'cloud_dir': self.tmp}) - cc = cloud.Cloud(ds, paths, {}, distro, None) - cc_resolv_conf.handle('cc_resolv_conf', conf, cc, LOG, []) - - @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") - def test_resolv_conf_systemd_resolved(self, m_render_to_file): - self.call_resolv_conf_handler('photon', self.cfg) - - assert [ - mock.call(mock.ANY, '/etc/systemd/resolved.conf', mock.ANY) - ] == m_render_to_file.call_args_list - - @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") - def test_resolv_conf_no_param(self, m_render_to_file): - tmp = deepcopy(self.cfg) - self.logs.truncate(0) - tmp.pop('resolv_conf') - self.call_resolv_conf_handler('photon', tmp) - - self.assertIn('manage_resolv_conf True but no parameters provided', - self.logs.getvalue()) - assert [ - mock.call(mock.ANY, '/etc/systemd/resolved.conf', mock.ANY) - ] not in m_render_to_file.call_args_list - - @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") - def test_resolv_conf_manage_resolv_conf_false(self, m_render_to_file): - tmp = deepcopy(self.cfg) - self.logs.truncate(0) - tmp['manage_resolv_conf'] = False - self.call_resolv_conf_handler('photon', tmp) - self.assertIn("'manage_resolv_conf' present but set to False", - self.logs.getvalue()) - assert [ - mock.call(mock.ANY, '/etc/systemd/resolved.conf', mock.ANY) - ] not in m_render_to_file.call_args_list - - @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") - def test_resolv_conf_etc_resolv_conf(self, m_render_to_file): - self.call_resolv_conf_handler('rhel', self.cfg) - - assert [ - mock.call(mock.ANY, '/etc/resolv.conf', mock.ANY) - ] == m_render_to_file.call_args_list - - @mock.patch("cloudinit.config.cc_resolv_conf.templater.render_to_file") - def test_resolv_conf_invalid_resolve_conf_fn(self, m_render_to_file): - ds = None - distro = self._fetch_distro('rhel', self.cfg) - paths = helpers.Paths({'cloud_dir': self.tmp}) - cc = cloud.Cloud(ds, paths, {}, distro, None) - cc.distro.resolve_conf_fn = 'bla' - - self.logs.truncate(0) - self.call_resolv_conf_handler('rhel', self.cfg, cc) - - self.assertIn('No template found, not rendering resolve configs', - self.logs.getvalue()) - - assert [ - mock.call(mock.ANY, '/etc/resolv.conf', mock.ANY) - ] not in m_render_to_file.call_args_list - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_rsyslog.py b/tests/unittests/test_handler/test_handler_rsyslog.py deleted file mode 100644 index 8c8e2838..00000000 --- a/tests/unittests/test_handler/test_handler_rsyslog.py +++ /dev/null @@ -1,178 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import os -import shutil -import tempfile - -from cloudinit.config.cc_rsyslog import ( - apply_rsyslog_changes, DEF_DIR, DEF_FILENAME, DEF_RELOAD, load_config, - parse_remotes_line, remotes_to_rsyslog_cfg) -from cloudinit import util - -from cloudinit.tests import helpers as t_help - - -class TestLoadConfig(t_help.TestCase): - def setUp(self): - super(TestLoadConfig, self).setUp() - self.basecfg = { - 'config_filename': DEF_FILENAME, - 'config_dir': DEF_DIR, - 'service_reload_command': DEF_RELOAD, - 'configs': [], - 'remotes': {}, - } - - def test_legacy_full(self): - found = load_config({ - 'rsyslog': ['*.* @192.168.1.1'], - 'rsyslog_dir': "mydir", - 'rsyslog_filename': "myfilename"}) - self.basecfg.update({ - 'configs': ['*.* @192.168.1.1'], - 'config_dir': "mydir", - 'config_filename': 'myfilename', - 'service_reload_command': 'auto'} - ) - - self.assertEqual(found, self.basecfg) - - def test_legacy_defaults(self): - found = load_config({ - 'rsyslog': ['*.* @192.168.1.1']}) - self.basecfg.update({ - 'configs': ['*.* @192.168.1.1']}) - self.assertEqual(found, self.basecfg) - - def test_new_defaults(self): - self.assertEqual(load_config({}), self.basecfg) - - def test_new_configs(self): - cfgs = ['*.* myhost', '*.* my2host'] - self.basecfg.update({'configs': cfgs}) - self.assertEqual( - load_config({'rsyslog': {'configs': cfgs}}), - self.basecfg) - - -class TestApplyChanges(t_help.TestCase): - def setUp(self): - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - - def test_simple(self): - cfgline = "*.* foohost" - changed = apply_rsyslog_changes( - configs=[cfgline], def_fname="foo.cfg", cfg_dir=self.tmp) - - fname = os.path.join(self.tmp, "foo.cfg") - self.assertEqual([fname], changed) - self.assertEqual( - util.load_file(fname), cfgline + "\n") - - def test_multiple_files(self): - configs = [ - '*.* foohost', - {'content': 'abc', 'filename': 'my.cfg'}, - {'content': 'filefoo-content', - 'filename': os.path.join(self.tmp, 'mydir/mycfg')}, - ] - - changed = apply_rsyslog_changes( - configs=configs, def_fname="default.cfg", cfg_dir=self.tmp) - - expected = [ - (os.path.join(self.tmp, "default.cfg"), - "*.* foohost\n"), - (os.path.join(self.tmp, "my.cfg"), "abc\n"), - (os.path.join(self.tmp, "mydir/mycfg"), "filefoo-content\n"), - ] - self.assertEqual([f[0] for f in expected], changed) - actual = [] - for fname, _content in expected: - util.load_file(fname) - actual.append((fname, util.load_file(fname),)) - self.assertEqual(expected, actual) - - def test_repeat_def(self): - configs = ['*.* foohost', "*.warn otherhost"] - - changed = apply_rsyslog_changes( - configs=configs, def_fname="default.cfg", cfg_dir=self.tmp) - - fname = os.path.join(self.tmp, "default.cfg") - self.assertEqual([fname], changed) - - expected_content = '\n'.join([c for c in configs]) + '\n' - found_content = util.load_file(fname) - self.assertEqual(expected_content, found_content) - - def test_multiline_content(self): - configs = ['line1', 'line2\nline3\n'] - - apply_rsyslog_changes( - configs=configs, def_fname="default.cfg", cfg_dir=self.tmp) - - fname = os.path.join(self.tmp, "default.cfg") - expected_content = '\n'.join([c for c in configs]) - found_content = util.load_file(fname) - self.assertEqual(expected_content, found_content) - - -class TestParseRemotesLine(t_help.TestCase): - def test_valid_port(self): - r = parse_remotes_line("foo:9") - self.assertEqual(9, r.port) - - def test_invalid_port(self): - with self.assertRaises(ValueError): - parse_remotes_line("*.* foo:abc") - - def test_valid_ipv6(self): - r = parse_remotes_line("*.* [::1]") - self.assertEqual("*.* @[::1]", str(r)) - - def test_valid_ipv6_with_port(self): - r = parse_remotes_line("*.* [::1]:100") - self.assertEqual(r.port, 100) - self.assertEqual(r.addr, "::1") - self.assertEqual("*.* @[::1]:100", str(r)) - - def test_invalid_multiple_colon(self): - with self.assertRaises(ValueError): - parse_remotes_line("*.* ::1:100") - - def test_name_in_string(self): - r = parse_remotes_line("syslog.host", name="foobar") - self.assertEqual("*.* @syslog.host # foobar", str(r)) - - -class TestRemotesToSyslog(t_help.TestCase): - def test_simple(self): - # str rendered line must appear in remotes_to_ryslog_cfg return - mycfg = "*.* myhost" - myline = str(parse_remotes_line(mycfg, name="myname")) - r = remotes_to_rsyslog_cfg({'myname': mycfg}) - lines = r.splitlines() - self.assertEqual(1, len(lines)) - self.assertTrue(myline in r.splitlines()) - - def test_header_footer(self): - header = "#foo head" - footer = "#foo foot" - r = remotes_to_rsyslog_cfg( - {'myname': "*.* myhost"}, header=header, footer=footer) - lines = r.splitlines() - self.assertTrue(header, lines[0]) - self.assertTrue(footer, lines[-1]) - - def test_with_empty_or_null(self): - mycfg = "*.* myhost" - myline = str(parse_remotes_line(mycfg, name="myname")) - r = remotes_to_rsyslog_cfg( - {'myname': mycfg, 'removed': None, 'removed2': ""}) - lines = r.splitlines() - self.assertEqual(1, len(lines)) - self.assertTrue(myline in r.splitlines()) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_runcmd.py b/tests/unittests/test_handler/test_handler_runcmd.py deleted file mode 100644 index 672e8093..00000000 --- a/tests/unittests/test_handler/test_handler_runcmd.py +++ /dev/null @@ -1,129 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -import logging -import os -import stat -from unittest.mock import patch - -from cloudinit.config.cc_runcmd import handle, schema -from cloudinit import (helpers, subp, util) -from cloudinit.tests.helpers import ( - CiTestCase, FilesystemMockingTestCase, SchemaTestCaseMixin, - skipUnlessJsonSchema) - -from tests.unittests.util import get_cloud - -LOG = logging.getLogger(__name__) - - -class TestRuncmd(FilesystemMockingTestCase): - - with_logs = True - - def setUp(self): - super(TestRuncmd, self).setUp() - self.subp = subp.subp - self.new_root = self.tmp_dir() - self.patchUtils(self.new_root) - self.paths = helpers.Paths({'scripts': self.new_root}) - - def test_handler_skip_if_no_runcmd(self): - """When the provided config doesn't contain runcmd, skip it.""" - cfg = {} - mycloud = get_cloud(paths=self.paths) - handle('notimportant', cfg, mycloud, LOG, None) - self.assertIn( - "Skipping module named notimportant, no 'runcmd' key", - self.logs.getvalue()) - - @patch('cloudinit.util.shellify') - def test_runcmd_shellify_fails(self, cls): - """When shellify fails throw exception""" - cls.side_effect = TypeError("patched shellify") - valid_config = {'runcmd': ['echo 42']} - cc = get_cloud(paths=self.paths) - with self.assertRaises(TypeError) as cm: - with self.allow_subp(['/bin/sh']): - handle('cc_runcmd', valid_config, cc, LOG, None) - self.assertIn("Failed to shellify", str(cm.exception)) - - def test_handler_invalid_command_set(self): - """Commands which can't be converted to shell will raise errors.""" - invalid_config = {'runcmd': 1} - cc = get_cloud(paths=self.paths) - with self.assertRaises(TypeError) as cm: - handle('cc_runcmd', invalid_config, cc, LOG, []) - self.assertIn( - 'Failed to shellify 1 into file' - ' /var/lib/cloud/instances/iid-datasource-none/scripts/runcmd', - str(cm.exception)) - - @skipUnlessJsonSchema() - def test_handler_schema_validation_warns_non_array_type(self): - """Schema validation warns of non-array type for runcmd key. - - Schema validation is not strict, so runcmd attempts to shellify the - invalid content. - """ - invalid_config = {'runcmd': 1} - cc = get_cloud(paths=self.paths) - with self.assertRaises(TypeError) as cm: - handle('cc_runcmd', invalid_config, cc, LOG, []) - self.assertIn( - 'Invalid config:\nruncmd: 1 is not of type \'array\'', - self.logs.getvalue()) - self.assertIn('Failed to shellify', str(cm.exception)) - - @skipUnlessJsonSchema() - def test_handler_schema_validation_warns_non_array_item_type(self): - """Schema validation warns of non-array or string runcmd items. - - Schema validation is not strict, so runcmd attempts to shellify the - invalid content. - """ - invalid_config = { - 'runcmd': ['ls /', 20, ['wget', 'http://stuff/blah'], {'a': 'n'}]} - cc = get_cloud(paths=self.paths) - with self.assertRaises(TypeError) as cm: - handle('cc_runcmd', invalid_config, cc, LOG, []) - expected_warnings = [ - 'runcmd.1: 20 is not valid under any of the given schemas', - 'runcmd.3: {\'a\': \'n\'} is not valid under any of the given' - ' schema' - ] - logs = self.logs.getvalue() - for warning in expected_warnings: - self.assertIn(warning, logs) - self.assertIn('Failed to shellify', str(cm.exception)) - - def test_handler_write_valid_runcmd_schema_to_file(self): - """Valid runcmd schema is written to a runcmd shell script.""" - valid_config = {'runcmd': [['ls', '/']]} - cc = get_cloud(paths=self.paths) - handle('cc_runcmd', valid_config, cc, LOG, []) - runcmd_file = os.path.join( - self.new_root, - 'var/lib/cloud/instances/iid-datasource-none/scripts/runcmd') - self.assertEqual("#!/bin/sh\n'ls' '/'\n", util.load_file(runcmd_file)) - file_stat = os.stat(runcmd_file) - self.assertEqual(0o700, stat.S_IMODE(file_stat.st_mode)) - - -@skipUnlessJsonSchema() -class TestSchema(CiTestCase, SchemaTestCaseMixin): - """Directly test schema rather than through handle.""" - - schema = schema - - def test_duplicates_are_fine_array_array(self): - """Duplicated commands array/array entries are allowed.""" - self.assertSchemaValid( - [["echo", "bye"], ["echo", "bye"]], - "command entries can be duplicate.") - - def test_duplicates_are_fine_array_string(self): - """Duplicated commands array/string entries are allowed.""" - self.assertSchemaValid( - ["echo bye", "echo bye"], - "command entries can be duplicate.") - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_seed_random.py b/tests/unittests/test_handler/test_handler_seed_random.py deleted file mode 100644 index 2ab153d2..00000000 --- a/tests/unittests/test_handler/test_handler_seed_random.py +++ /dev/null @@ -1,205 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. -# -# Author: Juerg Haefliger -# -# Based on test_handler_set_hostname.py -# -# This file is part of cloud-init. See LICENSE file for license information. -import gzip -import logging -import tempfile -from io import BytesIO - -from cloudinit import subp -from cloudinit import util -from cloudinit.config import cc_seed_random -from cloudinit.tests import helpers as t_help - -from tests.unittests.util import get_cloud - -LOG = logging.getLogger(__name__) - - -class TestRandomSeed(t_help.TestCase): - def setUp(self): - super(TestRandomSeed, self).setUp() - self._seed_file = tempfile.mktemp() - self.unapply = [] - - # by default 'which' has nothing in its path - self.apply_patches([(subp, 'which', self._which)]) - self.apply_patches([(subp, 'subp', self._subp)]) - self.subp_called = [] - self.whichdata = {} - - def tearDown(self): - apply_patches([i for i in reversed(self.unapply)]) - util.del_file(self._seed_file) - - def apply_patches(self, patches): - ret = apply_patches(patches) - self.unapply += ret - - def _which(self, program): - return self.whichdata.get(program) - - def _subp(self, *args, **kwargs): - # supports subp calling with cmd as args or kwargs - if 'args' not in kwargs: - kwargs['args'] = args[0] - self.subp_called.append(kwargs) - return - - def _compress(self, text): - contents = BytesIO() - gz_fh = gzip.GzipFile(mode='wb', fileobj=contents) - gz_fh.write(text) - gz_fh.close() - return contents.getvalue() - - def test_append_random(self): - cfg = { - 'random_seed': { - 'file': self._seed_file, - 'data': 'tiny-tim-was-here', - } - } - cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, []) - contents = util.load_file(self._seed_file) - self.assertEqual("tiny-tim-was-here", contents) - - def test_append_random_unknown_encoding(self): - data = self._compress(b"tiny-toe") - cfg = { - 'random_seed': { - 'file': self._seed_file, - 'data': data, - 'encoding': 'special_encoding', - } - } - self.assertRaises(IOError, cc_seed_random.handle, 'test', cfg, - get_cloud('ubuntu'), LOG, []) - - def test_append_random_gzip(self): - data = self._compress(b"tiny-toe") - cfg = { - 'random_seed': { - 'file': self._seed_file, - 'data': data, - 'encoding': 'gzip', - } - } - cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, []) - contents = util.load_file(self._seed_file) - self.assertEqual("tiny-toe", contents) - - def test_append_random_gz(self): - data = self._compress(b"big-toe") - cfg = { - 'random_seed': { - 'file': self._seed_file, - 'data': data, - 'encoding': 'gz', - } - } - cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, []) - contents = util.load_file(self._seed_file) - self.assertEqual("big-toe", contents) - - def test_append_random_base64(self): - data = util.b64e('bubbles') - cfg = { - 'random_seed': { - 'file': self._seed_file, - 'data': data, - 'encoding': 'base64', - } - } - cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, []) - contents = util.load_file(self._seed_file) - self.assertEqual("bubbles", contents) - - def test_append_random_b64(self): - data = util.b64e('kit-kat') - cfg = { - 'random_seed': { - 'file': self._seed_file, - 'data': data, - 'encoding': 'b64', - } - } - cc_seed_random.handle('test', cfg, get_cloud('ubuntu'), LOG, []) - contents = util.load_file(self._seed_file) - self.assertEqual("kit-kat", contents) - - def test_append_random_metadata(self): - cfg = { - 'random_seed': { - 'file': self._seed_file, - 'data': 'tiny-tim-was-here', - } - } - c = get_cloud('ubuntu', metadata={'random_seed': '-so-was-josh'}) - cc_seed_random.handle('test', cfg, c, LOG, []) - contents = util.load_file(self._seed_file) - self.assertEqual('tiny-tim-was-here-so-was-josh', contents) - - def test_seed_command_provided_and_available(self): - c = get_cloud('ubuntu') - self.whichdata = {'pollinate': '/usr/bin/pollinate'} - cfg = {'random_seed': {'command': ['pollinate', '-q']}} - cc_seed_random.handle('test', cfg, c, LOG, []) - - subp_args = [f['args'] for f in self.subp_called] - self.assertIn(['pollinate', '-q'], subp_args) - - def test_seed_command_not_provided(self): - c = get_cloud('ubuntu') - self.whichdata = {} - cc_seed_random.handle('test', {}, c, LOG, []) - - # subp should not have been called as which would say not available - self.assertFalse(self.subp_called) - - def test_unavailable_seed_command_and_required_raises_error(self): - c = get_cloud('ubuntu') - self.whichdata = {} - cfg = {'random_seed': {'command': ['THIS_NO_COMMAND'], - 'command_required': True}} - self.assertRaises(ValueError, cc_seed_random.handle, - 'test', cfg, c, LOG, []) - - def test_seed_command_and_required(self): - c = get_cloud('ubuntu') - self.whichdata = {'foo': 'foo'} - cfg = {'random_seed': {'command_required': True, 'command': ['foo']}} - cc_seed_random.handle('test', cfg, c, LOG, []) - - self.assertIn(['foo'], [f['args'] for f in self.subp_called]) - - def test_file_in_environment_for_command(self): - c = get_cloud('ubuntu') - self.whichdata = {'foo': 'foo'} - cfg = {'random_seed': {'command_required': True, 'command': ['foo'], - 'file': self._seed_file}} - cc_seed_random.handle('test', cfg, c, LOG, []) - - # this just instists that the first time subp was called, - # RANDOM_SEED_FILE was in the environment set up correctly - subp_env = [f['env'] for f in self.subp_called] - self.assertEqual(subp_env[0].get('RANDOM_SEED_FILE'), self._seed_file) - - -def apply_patches(patches): - ret = [] - for (ref, name, replace) in patches: - if replace is None: - continue - orig = getattr(ref, name) - setattr(ref, name, replace) - ret.append((ref, name, orig)) - return ret - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/test_handler/test_handler_set_hostname.py deleted file mode 100644 index 1a524c7d..00000000 --- a/tests/unittests/test_handler/test_handler_set_hostname.py +++ /dev/null @@ -1,207 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.config import cc_set_hostname - -from cloudinit import cloud -from cloudinit import distros -from cloudinit import helpers -from cloudinit import util - -from cloudinit.tests import helpers as t_help - -from configobj import ConfigObj -import logging -import os -import shutil -import tempfile -from io import BytesIO -from unittest import mock - -LOG = logging.getLogger(__name__) - - -class TestHostname(t_help.FilesystemMockingTestCase): - - with_logs = True - - def setUp(self): - super(TestHostname, self).setUp() - self.tmp = tempfile.mkdtemp() - util.ensure_dir(os.path.join(self.tmp, 'data')) - self.addCleanup(shutil.rmtree, self.tmp) - - def _fetch_distro(self, kind, conf=None): - cls = distros.fetch(kind) - paths = helpers.Paths({'cloud_dir': self.tmp}) - conf = {} if conf is None else conf - return cls(kind, conf, paths) - - def test_debian_write_hostname_prefer_fqdn(self): - cfg = { - 'hostname': 'blah', - 'prefer_fqdn_over_hostname': True, - 'fqdn': 'blah.yahoo.com', - } - distro = self._fetch_distro('debian', cfg) - paths = helpers.Paths({'cloud_dir': self.tmp}) - ds = None - cc = cloud.Cloud(ds, paths, {}, distro, None) - self.patchUtils(self.tmp) - cc_set_hostname.handle('cc_set_hostname', - cfg, cc, LOG, []) - contents = util.load_file("/etc/hostname") - self.assertEqual('blah.yahoo.com', contents.strip()) - - @mock.patch('cloudinit.distros.Distro.uses_systemd', return_value=False) - def test_rhel_write_hostname_prefer_hostname(self, m_uses_systemd): - cfg = { - 'hostname': 'blah', - 'prefer_fqdn_over_hostname': False, - 'fqdn': 'blah.yahoo.com', - } - distro = self._fetch_distro('rhel', cfg) - paths = helpers.Paths({'cloud_dir': self.tmp}) - ds = None - cc = cloud.Cloud(ds, paths, {}, distro, None) - self.patchUtils(self.tmp) - cc_set_hostname.handle('cc_set_hostname', - cfg, cc, LOG, []) - contents = util.load_file("/etc/sysconfig/network", decode=False) - n_cfg = ConfigObj(BytesIO(contents)) - self.assertEqual( - {'HOSTNAME': 'blah'}, - dict(n_cfg)) - - @mock.patch('cloudinit.distros.Distro.uses_systemd', return_value=False) - def test_write_hostname_rhel(self, m_uses_systemd): - cfg = { - 'hostname': 'blah', - 'fqdn': 'blah.blah.blah.yahoo.com' - } - distro = self._fetch_distro('rhel') - paths = helpers.Paths({'cloud_dir': self.tmp}) - ds = None - cc = cloud.Cloud(ds, paths, {}, distro, None) - self.patchUtils(self.tmp) - cc_set_hostname.handle('cc_set_hostname', - cfg, cc, LOG, []) - contents = util.load_file("/etc/sysconfig/network", decode=False) - n_cfg = ConfigObj(BytesIO(contents)) - self.assertEqual( - {'HOSTNAME': 'blah.blah.blah.yahoo.com'}, - dict(n_cfg)) - - def test_write_hostname_debian(self): - cfg = { - 'hostname': 'blah', - 'fqdn': 'blah.blah.blah.yahoo.com', - } - distro = self._fetch_distro('debian') - paths = helpers.Paths({'cloud_dir': self.tmp}) - ds = None - cc = cloud.Cloud(ds, paths, {}, distro, None) - self.patchUtils(self.tmp) - cc_set_hostname.handle('cc_set_hostname', - cfg, cc, LOG, []) - contents = util.load_file("/etc/hostname") - self.assertEqual('blah', contents.strip()) - - @mock.patch('cloudinit.distros.Distro.uses_systemd', return_value=False) - def test_write_hostname_sles(self, m_uses_systemd): - cfg = { - 'hostname': 'blah.blah.blah.suse.com', - } - distro = self._fetch_distro('sles') - paths = helpers.Paths({'cloud_dir': self.tmp}) - ds = None - cc = cloud.Cloud(ds, paths, {}, distro, None) - self.patchUtils(self.tmp) - cc_set_hostname.handle('cc_set_hostname', cfg, cc, LOG, []) - contents = util.load_file(distro.hostname_conf_fn) - self.assertEqual('blah', contents.strip()) - - @mock.patch('cloudinit.distros.photon.subp.subp') - def test_photon_hostname(self, m_subp): - cfg1 = { - 'hostname': 'photon', - 'prefer_fqdn_over_hostname': True, - 'fqdn': 'test1.vmware.com', - } - cfg2 = { - 'hostname': 'photon', - 'prefer_fqdn_over_hostname': False, - 'fqdn': 'test2.vmware.com', - } - - ds = None - m_subp.return_value = (None, None) - distro = self._fetch_distro('photon', cfg1) - paths = helpers.Paths({'cloud_dir': self.tmp}) - cc = cloud.Cloud(ds, paths, {}, distro, None) - for c in [cfg1, cfg2]: - cc_set_hostname.handle('cc_set_hostname', c, cc, LOG, []) - print("\n", m_subp.call_args_list) - if c['prefer_fqdn_over_hostname']: - assert [ - mock.call(['hostnamectl', 'set-hostname', c['fqdn']], - capture=True) - ] in m_subp.call_args_list - assert [ - mock.call(['hostnamectl', 'set-hostname', c['hostname']], - capture=True) - ] not in m_subp.call_args_list - else: - assert [ - mock.call(['hostnamectl', 'set-hostname', c['hostname']], - capture=True) - ] in m_subp.call_args_list - assert [ - mock.call(['hostnamectl', 'set-hostname', c['fqdn']], - capture=True) - ] not in m_subp.call_args_list - - def test_multiple_calls_skips_unchanged_hostname(self): - """Only new hostname or fqdn values will generate a hostname call.""" - distro = self._fetch_distro('debian') - paths = helpers.Paths({'cloud_dir': self.tmp}) - ds = None - cc = cloud.Cloud(ds, paths, {}, distro, None) - self.patchUtils(self.tmp) - cc_set_hostname.handle( - 'cc_set_hostname', {'hostname': 'hostname1.me.com'}, cc, LOG, []) - contents = util.load_file("/etc/hostname") - self.assertEqual('hostname1', contents.strip()) - cc_set_hostname.handle( - 'cc_set_hostname', {'hostname': 'hostname1.me.com'}, cc, LOG, []) - self.assertIn( - 'DEBUG: No hostname changes. Skipping set-hostname\n', - self.logs.getvalue()) - cc_set_hostname.handle( - 'cc_set_hostname', {'hostname': 'hostname2.me.com'}, cc, LOG, []) - contents = util.load_file("/etc/hostname") - self.assertEqual('hostname2', contents.strip()) - self.assertIn( - 'Non-persistently setting the system hostname to hostname2', - self.logs.getvalue()) - - def test_error_on_distro_set_hostname_errors(self): - """Raise SetHostnameError on exceptions from distro.set_hostname.""" - distro = self._fetch_distro('debian') - - def set_hostname_error(hostname, fqdn): - raise Exception("OOPS on: %s" % fqdn) - - distro.set_hostname = set_hostname_error - paths = helpers.Paths({'cloud_dir': self.tmp}) - ds = None - cc = cloud.Cloud(ds, paths, {}, distro, None) - self.patchUtils(self.tmp) - with self.assertRaises(cc_set_hostname.SetHostnameError) as ctx_mgr: - cc_set_hostname.handle( - 'somename', {'hostname': 'hostname1.me.com'}, cc, LOG, []) - self.assertEqual( - 'Failed to set the hostname to hostname1.me.com (hostname1):' - ' OOPS on: hostname1.me.com', - str(ctx_mgr.exception)) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_spacewalk.py b/tests/unittests/test_handler/test_handler_spacewalk.py deleted file mode 100644 index 26f7648f..00000000 --- a/tests/unittests/test_handler/test_handler_spacewalk.py +++ /dev/null @@ -1,42 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.config import cc_spacewalk -from cloudinit import subp - -from cloudinit.tests import helpers - -import logging -from unittest import mock - -LOG = logging.getLogger(__name__) - - -class TestSpacewalk(helpers.TestCase): - space_cfg = { - 'spacewalk': { - 'server': 'localhost', - 'profile_name': 'test', - } - } - - @mock.patch("cloudinit.config.cc_spacewalk.subp.subp") - def test_not_is_registered(self, mock_subp): - mock_subp.side_effect = subp.ProcessExecutionError(exit_code=1) - self.assertFalse(cc_spacewalk.is_registered()) - - @mock.patch("cloudinit.config.cc_spacewalk.subp.subp") - def test_is_registered(self, mock_subp): - mock_subp.side_effect = None - self.assertTrue(cc_spacewalk.is_registered()) - - @mock.patch("cloudinit.config.cc_spacewalk.subp.subp") - def test_do_register(self, mock_subp): - cc_spacewalk.do_register(**self.space_cfg['spacewalk']) - mock_subp.assert_called_with([ - 'rhnreg_ks', - '--serverUrl', 'https://localhost/XMLRPC', - '--profilename', 'test', - '--sslCACert', cc_spacewalk.def_ca_cert_path, - ], capture=False) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_timezone.py b/tests/unittests/test_handler/test_handler_timezone.py deleted file mode 100644 index 77cdb0c2..00000000 --- a/tests/unittests/test_handler/test_handler_timezone.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (C) 2013 Hewlett-Packard Development Company, L.P. -# -# Author: Juerg Haefliger -# -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit.config import cc_timezone - -from cloudinit import util - - -import logging -import shutil -import tempfile -from configobj import ConfigObj -from io import BytesIO - -from cloudinit.tests import helpers as t_help - -from tests.unittests.util import get_cloud - -LOG = logging.getLogger(__name__) - - -class TestTimezone(t_help.FilesystemMockingTestCase): - def setUp(self): - super(TestTimezone, self).setUp() - self.new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.new_root) - self.patchUtils(self.new_root) - self.patchOS(self.new_root) - - def test_set_timezone_sles(self): - - cfg = { - 'timezone': 'Tatooine/Bestine', - } - cc = get_cloud('sles') - - # Create a dummy timezone file - dummy_contents = '0123456789abcdefgh' - util.write_file('/usr/share/zoneinfo/%s' % cfg['timezone'], - dummy_contents) - - cc_timezone.handle('cc_timezone', cfg, cc, LOG, []) - - contents = util.load_file('/etc/sysconfig/clock', decode=False) - n_cfg = ConfigObj(BytesIO(contents)) - self.assertEqual({'TIMEZONE': cfg['timezone']}, dict(n_cfg)) - - contents = util.load_file('/etc/localtime') - self.assertEqual(dummy_contents, contents.strip()) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_write_files.py b/tests/unittests/test_handler/test_handler_write_files.py deleted file mode 100644 index 0af92805..00000000 --- a/tests/unittests/test_handler/test_handler_write_files.py +++ /dev/null @@ -1,246 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import base64 -import copy -import gzip -import io -import shutil -import tempfile - -from cloudinit.config.cc_write_files import ( - handle, decode_perms, write_files) -from cloudinit import log as logging -from cloudinit import util - -from cloudinit.tests.helpers import ( - CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema) - -LOG = logging.getLogger(__name__) - -YAML_TEXT = """ -write_files: - - encoding: gzip - content: !!binary | - H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA= - path: /usr/bin/hello - permissions: '0755' - - content: !!binary | - Zm9vYmFyCg== - path: /wark - permissions: '0755' - - content: | - hi mom line 1 - hi mom line 2 - path: /tmp/message -""" - -YAML_CONTENT_EXPECTED = { - '/usr/bin/hello': "#!/bin/sh\necho hello world\n", - '/wark': "foobar\n", - '/tmp/message': "hi mom line 1\nhi mom line 2\n", -} - -VALID_SCHEMA = { - 'write_files': [ - {'append': False, 'content': 'a', 'encoding': 'gzip', 'owner': 'jeff', - 'path': '/some', 'permissions': '0777'} - ] -} - -INVALID_SCHEMA = { # Dropped required path key - 'write_files': [ - {'append': False, 'content': 'a', 'encoding': 'gzip', 'owner': 'jeff', - 'permissions': '0777'} - ] -} - - -@skipUnlessJsonSchema() -@mock.patch('cloudinit.config.cc_write_files.write_files') -class TestWriteFilesSchema(CiTestCase): - - with_logs = True - - def test_schema_validation_warns_missing_path(self, m_write_files): - """The only required file item property is 'path'.""" - cc = self.tmp_cloud('ubuntu') - valid_config = {'write_files': [{'path': '/some/path'}]} - handle('cc_write_file', valid_config, cc, LOG, []) - self.assertNotIn('Invalid config:', self.logs.getvalue()) - handle('cc_write_file', INVALID_SCHEMA, cc, LOG, []) - self.assertIn('Invalid config:', self.logs.getvalue()) - self.assertIn("'path' is a required property", self.logs.getvalue()) - - def test_schema_validation_warns_non_string_type_for_files( - self, m_write_files): - """Schema validation warns of non-string values for each file item.""" - cc = self.tmp_cloud('ubuntu') - for key in VALID_SCHEMA['write_files'][0].keys(): - if key == 'append': - key_type = 'boolean' - else: - key_type = 'string' - invalid_config = copy.deepcopy(VALID_SCHEMA) - invalid_config['write_files'][0][key] = 1 - handle('cc_write_file', invalid_config, cc, LOG, []) - self.assertIn( - mock.call('cc_write_file', invalid_config['write_files']), - m_write_files.call_args_list) - self.assertIn( - 'write_files.0.%s: 1 is not of type \'%s\'' % (key, key_type), - self.logs.getvalue()) - self.assertIn('Invalid config:', self.logs.getvalue()) - - def test_schema_validation_warns_on_additional_undefined_propertes( - self, m_write_files): - """Schema validation warns on additional undefined file properties.""" - cc = self.tmp_cloud('ubuntu') - invalid_config = copy.deepcopy(VALID_SCHEMA) - invalid_config['write_files'][0]['bogus'] = 'value' - handle('cc_write_file', invalid_config, cc, LOG, []) - self.assertIn( - "Invalid config:\nwrite_files.0: Additional properties" - " are not allowed ('bogus' was unexpected)", - self.logs.getvalue()) - - -class TestWriteFiles(FilesystemMockingTestCase): - - with_logs = True - - def setUp(self): - super(TestWriteFiles, self).setUp() - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - - @skipUnlessJsonSchema() - def test_handler_schema_validation_warns_non_array_type(self): - """Schema validation warns of non-array value.""" - invalid_config = {'write_files': 1} - cc = self.tmp_cloud('ubuntu') - with self.assertRaises(TypeError): - handle('cc_write_file', invalid_config, cc, LOG, []) - self.assertIn( - 'Invalid config:\nwrite_files: 1 is not of type \'array\'', - self.logs.getvalue()) - - def test_simple(self): - self.patchUtils(self.tmp) - expected = "hello world\n" - filename = "/tmp/my.file" - write_files( - "test_simple", [{"content": expected, "path": filename}]) - self.assertEqual(util.load_file(filename), expected) - - def test_append(self): - self.patchUtils(self.tmp) - existing = "hello " - added = "world\n" - expected = existing + added - filename = "/tmp/append.file" - util.write_file(filename, existing) - write_files( - "test_append", - [{"content": added, "path": filename, "append": "true"}]) - self.assertEqual(util.load_file(filename), expected) - - def test_yaml_binary(self): - self.patchUtils(self.tmp) - data = util.load_yaml(YAML_TEXT) - write_files("testname", data['write_files']) - for path, content in YAML_CONTENT_EXPECTED.items(): - self.assertEqual(util.load_file(path), content) - - def test_all_decodings(self): - self.patchUtils(self.tmp) - - # build a 'files' array that has a dictionary of encodings - # for 'gz', 'gzip', 'gz+base64' ... - data = b"foobzr" - utf8_valid = b"foobzr" - utf8_invalid = b'ab\xaadef' - files = [] - expected = [] - - gz_aliases = ('gz', 'gzip') - gz_b64_aliases = ('gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64') - b64_aliases = ('base64', 'b64') - - datum = (("utf8", utf8_valid), ("no-utf8", utf8_invalid)) - for name, data in datum: - gz = (_gzip_bytes(data), gz_aliases) - gz_b64 = (base64.b64encode(_gzip_bytes(data)), gz_b64_aliases) - b64 = (base64.b64encode(data), b64_aliases) - for content, aliases in (gz, gz_b64, b64): - for enc in aliases: - cur = {'content': content, - 'path': '/tmp/file-%s-%s' % (name, enc), - 'encoding': enc} - files.append(cur) - expected.append((cur['path'], data)) - - write_files("test_decoding", files) - - for path, content in expected: - self.assertEqual(util.load_file(path, decode=False), content) - - # make sure we actually wrote *some* files. - flen_expected = ( - len(gz_aliases + gz_b64_aliases + b64_aliases) * len(datum)) - self.assertEqual(len(expected), flen_expected) - - def test_deferred(self): - self.patchUtils(self.tmp) - file_path = '/tmp/deferred.file' - config = { - 'write_files': [ - {'path': file_path, 'defer': True} - ] - } - cc = self.tmp_cloud('ubuntu') - handle('cc_write_file', config, cc, LOG, []) - with self.assertRaises(FileNotFoundError): - util.load_file(file_path) - - -class TestDecodePerms(CiTestCase): - - with_logs = True - - def test_none_returns_default(self): - """If None is passed as perms, then default should be returned.""" - default = object() - found = decode_perms(None, default) - self.assertEqual(default, found) - - def test_integer(self): - """A valid integer should return itself.""" - found = decode_perms(0o755, None) - self.assertEqual(0o755, found) - - def test_valid_octal_string(self): - """A string should be read as octal.""" - found = decode_perms("644", None) - self.assertEqual(0o644, found) - - def test_invalid_octal_string_returns_default_and_warns(self): - """A string with invalid octal should warn and return default.""" - found = decode_perms("999", None) - self.assertIsNone(found) - self.assertIn("WARNING: Undecodable", self.logs.getvalue()) - - -def _gzip_bytes(data): - buf = io.BytesIO() - fp = None - try: - fp = gzip.GzipFile(fileobj=buf, mode="wb") - fp.write(data) - fp.close() - return buf.getvalue() - finally: - if fp: - fp.close() - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_write_files_deferred.py b/tests/unittests/test_handler/test_handler_write_files_deferred.py deleted file mode 100644 index 57b6934a..00000000 --- a/tests/unittests/test_handler/test_handler_write_files_deferred.py +++ /dev/null @@ -1,77 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import tempfile -import shutil - -from cloudinit.config.cc_write_files_deferred import (handle) -from .test_handler_write_files import (VALID_SCHEMA) -from cloudinit import log as logging -from cloudinit import util - -from cloudinit.tests.helpers import ( - CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema) - -LOG = logging.getLogger(__name__) - - -@skipUnlessJsonSchema() -@mock.patch('cloudinit.config.cc_write_files_deferred.write_files') -class TestWriteFilesDeferredSchema(CiTestCase): - - with_logs = True - - def test_schema_validation_warns_invalid_value(self, - m_write_files_deferred): - """If 'defer' is defined, it must be of type 'bool'.""" - - valid_config = { - 'write_files': [ - {**VALID_SCHEMA.get('write_files')[0], 'defer': True} - ] - } - - invalid_config = { - 'write_files': [ - {**VALID_SCHEMA.get('write_files')[0], 'defer': str('no')} - ] - } - - cc = self.tmp_cloud('ubuntu') - handle('cc_write_files_deferred', valid_config, cc, LOG, []) - self.assertNotIn('Invalid config:', self.logs.getvalue()) - handle('cc_write_files_deferred', invalid_config, cc, LOG, []) - self.assertIn('Invalid config:', self.logs.getvalue()) - self.assertIn("defer: 'no' is not of type 'boolean'", - self.logs.getvalue()) - - -class TestWriteFilesDeferred(FilesystemMockingTestCase): - - with_logs = True - - def setUp(self): - super(TestWriteFilesDeferred, self).setUp() - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - - def test_filtering_deferred_files(self): - self.patchUtils(self.tmp) - expected = "hello world\n" - config = { - 'write_files': [ - { - 'path': '/tmp/deferred.file', - 'defer': True, - 'content': expected - }, - {'path': '/tmp/not_deferred.file'} - ] - } - cc = self.tmp_cloud('ubuntu') - handle('cc_write_files_deferred', config, cc, LOG, []) - self.assertEqual(util.load_file('/tmp/deferred.file'), expected) - with self.assertRaises(FileNotFoundError): - util.load_file('/tmp/not_deferred.file') - - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_yum_add_repo.py b/tests/unittests/test_handler/test_handler_yum_add_repo.py deleted file mode 100644 index 7c61bbf9..00000000 --- a/tests/unittests/test_handler/test_handler_yum_add_repo.py +++ /dev/null @@ -1,111 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import configparser -import logging -import shutil -import tempfile - -from cloudinit import util -from cloudinit.config import cc_yum_add_repo -from cloudinit.tests import helpers - -LOG = logging.getLogger(__name__) - - -class TestConfig(helpers.FilesystemMockingTestCase): - def setUp(self): - super(TestConfig, self).setUp() - self.tmp = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmp) - - def test_bad_config(self): - cfg = { - 'yum_repos': { - 'epel-testing': { - 'name': 'Extra Packages for Enterprise Linux 5 - Testing', - # Missing this should cause the repo not to be written - # 'baseurl': 'http://blah.org/pub/epel/testing/5/$barch', - 'enabled': False, - 'gpgcheck': True, - 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL', - 'failovermethod': 'priority', - }, - }, - } - self.patchUtils(self.tmp) - cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, []) - self.assertRaises(IOError, util.load_file, - "/etc/yum.repos.d/epel_testing.repo") - - def test_write_config(self): - cfg = { - 'yum_repos': { - 'epel-testing': { - 'name': 'Extra Packages for Enterprise Linux 5 - Testing', - 'baseurl': 'http://blah.org/pub/epel/testing/5/$basearch', - 'enabled': False, - 'gpgcheck': True, - 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL', - 'failovermethod': 'priority', - }, - }, - } - self.patchUtils(self.tmp) - cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, []) - contents = util.load_file("/etc/yum.repos.d/epel_testing.repo") - parser = configparser.ConfigParser() - parser.read_string(contents) - expected = { - 'epel_testing': { - 'name': 'Extra Packages for Enterprise Linux 5 - Testing', - 'failovermethod': 'priority', - 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL', - 'enabled': '0', - 'baseurl': 'http://blah.org/pub/epel/testing/5/$basearch', - 'gpgcheck': '1', - } - } - for section in expected: - self.assertTrue(parser.has_section(section), - "Contains section {0}".format(section)) - for k, v in expected[section].items(): - self.assertEqual(parser.get(section, k), v) - - def test_write_config_array(self): - cfg = { - 'yum_repos': { - 'puppetlabs-products': { - 'name': 'Puppet Labs Products El 6 - $basearch', - 'baseurl': - 'http://yum.puppetlabs.com/el/6/products/$basearch', - 'gpgkey': [ - 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppetlabs', - 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppet', - ], - 'enabled': True, - 'gpgcheck': True, - } - } - } - self.patchUtils(self.tmp) - cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, []) - contents = util.load_file("/etc/yum.repos.d/puppetlabs_products.repo") - parser = configparser.ConfigParser() - parser.read_string(contents) - expected = { - 'puppetlabs_products': { - 'name': 'Puppet Labs Products El 6 - $basearch', - 'baseurl': 'http://yum.puppetlabs.com/el/6/products/$basearch', - 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppetlabs\n' - 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppet', - 'enabled': '1', - 'gpgcheck': '1', - } - } - for section in expected: - self.assertTrue(parser.has_section(section), - "Contains section {0}".format(section)) - for k, v in expected[section].items(): - self.assertEqual(parser.get(section, k), v) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_handler/test_handler_zypper_add_repo.py b/tests/unittests/test_handler/test_handler_zypper_add_repo.py deleted file mode 100644 index 0fb1de1a..00000000 --- a/tests/unittests/test_handler/test_handler_zypper_add_repo.py +++ /dev/null @@ -1,231 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import configparser -import glob -import logging -import os - -from cloudinit import util -from cloudinit.config import cc_zypper_add_repo -from cloudinit.tests import helpers -from cloudinit.tests.helpers import mock - -LOG = logging.getLogger(__name__) - - -class TestConfig(helpers.FilesystemMockingTestCase): - def setUp(self): - super(TestConfig, self).setUp() - self.tmp = self.tmp_dir() - self.zypp_conf = 'etc/zypp/zypp.conf' - - def test_bad_repo_config(self): - """Config has no baseurl, no file should be written""" - cfg = { - 'repos': [ - { - 'id': 'foo', - 'name': 'suse-test', - 'enabled': '1' - }, - ] - } - self.patchUtils(self.tmp) - cc_zypper_add_repo._write_repos(cfg['repos'], '/etc/zypp/repos.d') - self.assertRaises(IOError, util.load_file, - "/etc/zypp/repos.d/foo.repo") - - def test_write_repos(self): - """Verify valid repos get written""" - cfg = self._get_base_config_repos() - root_d = self.tmp_dir() - cc_zypper_add_repo._write_repos(cfg['zypper']['repos'], root_d) - repos = glob.glob('%s/*.repo' % root_d) - expected_repos = ['testing-foo.repo', 'testing-bar.repo'] - if len(repos) != 2: - assert 'Number of repos written is "%d" expected 2' % len(repos) - for repo in repos: - repo_name = os.path.basename(repo) - if repo_name not in expected_repos: - assert 'Found repo with name "%s"; unexpected' % repo_name - # Validation that the content gets properly written is in another test - - def test_write_repo(self): - """Verify the content of a repo file""" - cfg = { - 'repos': [ - { - 'baseurl': 'http://foo', - 'name': 'test-foo', - 'id': 'testing-foo' - }, - ] - } - root_d = self.tmp_dir() - cc_zypper_add_repo._write_repos(cfg['repos'], root_d) - contents = util.load_file("%s/testing-foo.repo" % root_d) - parser = configparser.ConfigParser() - parser.read_string(contents) - expected = { - 'testing-foo': { - 'name': 'test-foo', - 'baseurl': 'http://foo', - 'enabled': '1', - 'autorefresh': '1' - } - } - for section in expected: - self.assertTrue(parser.has_section(section), - "Contains section {0}".format(section)) - for k, v in expected[section].items(): - self.assertEqual(parser.get(section, k), v) - - def test_config_write(self): - """Write valid configuration data""" - cfg = { - 'config': { - 'download.deltarpm': 'False', - 'reposdir': 'foo' - } - } - root_d = self.tmp_dir() - helpers.populate_dir(root_d, {self.zypp_conf: '# Zypp config\n'}) - self.reRoot(root_d) - cc_zypper_add_repo._write_zypp_config(cfg['config']) - cfg_out = os.path.join(root_d, self.zypp_conf) - contents = util.load_file(cfg_out) - expected = [ - '# Zypp config', - '# Added via cloud.cfg', - 'download.deltarpm=False', - 'reposdir=foo' - ] - for item in contents.split('\n'): - if item not in expected: - self.assertIsNone(item) - - @mock.patch('cloudinit.log.logging') - def test_config_write_skip_configdir(self, mock_logging): - """Write configuration but skip writing 'configdir' setting""" - cfg = { - 'config': { - 'download.deltarpm': 'False', - 'reposdir': 'foo', - 'configdir': 'bar' - } - } - root_d = self.tmp_dir() - helpers.populate_dir(root_d, {self.zypp_conf: '# Zypp config\n'}) - self.reRoot(root_d) - cc_zypper_add_repo._write_zypp_config(cfg['config']) - cfg_out = os.path.join(root_d, self.zypp_conf) - contents = util.load_file(cfg_out) - expected = [ - '# Zypp config', - '# Added via cloud.cfg', - 'download.deltarpm=False', - 'reposdir=foo' - ] - for item in contents.split('\n'): - if item not in expected: - self.assertIsNone(item) - # Not finding teh right path for mocking :( - # assert mock_logging.warning.called - - def test_empty_config_section_no_new_data(self): - """When the config section is empty no new data should be written to - zypp.conf""" - cfg = self._get_base_config_repos() - cfg['zypper']['config'] = None - root_d = self.tmp_dir() - helpers.populate_dir(root_d, {self.zypp_conf: '# No data'}) - self.reRoot(root_d) - cc_zypper_add_repo._write_zypp_config(cfg.get('config', {})) - cfg_out = os.path.join(root_d, self.zypp_conf) - contents = util.load_file(cfg_out) - self.assertEqual(contents, '# No data') - - def test_empty_config_value_no_new_data(self): - """When the config section is not empty but there are no values - no new data should be written to zypp.conf""" - cfg = self._get_base_config_repos() - cfg['zypper']['config'] = { - 'download.deltarpm': None - } - root_d = self.tmp_dir() - helpers.populate_dir(root_d, {self.zypp_conf: '# No data'}) - self.reRoot(root_d) - cc_zypper_add_repo._write_zypp_config(cfg.get('config', {})) - cfg_out = os.path.join(root_d, self.zypp_conf) - contents = util.load_file(cfg_out) - self.assertEqual(contents, '# No data') - - def test_handler_full_setup(self): - """Test that the handler ends up calling the renderers""" - cfg = self._get_base_config_repos() - cfg['zypper']['config'] = { - 'download.deltarpm': 'False', - } - root_d = self.tmp_dir() - os.makedirs('%s/etc/zypp/repos.d' % root_d) - helpers.populate_dir(root_d, {self.zypp_conf: '# Zypp config\n'}) - self.reRoot(root_d) - cc_zypper_add_repo.handle('zypper_add_repo', cfg, None, LOG, []) - cfg_out = os.path.join(root_d, self.zypp_conf) - contents = util.load_file(cfg_out) - expected = [ - '# Zypp config', - '# Added via cloud.cfg', - 'download.deltarpm=False', - ] - for item in contents.split('\n'): - if item not in expected: - self.assertIsNone(item) - repos = glob.glob('%s/etc/zypp/repos.d/*.repo' % root_d) - expected_repos = ['testing-foo.repo', 'testing-bar.repo'] - if len(repos) != 2: - assert 'Number of repos written is "%d" expected 2' % len(repos) - for repo in repos: - repo_name = os.path.basename(repo) - if repo_name not in expected_repos: - assert 'Found repo with name "%s"; unexpected' % repo_name - - def test_no_config_section_no_new_data(self): - """When there is no config section no new data should be written to - zypp.conf""" - cfg = self._get_base_config_repos() - root_d = self.tmp_dir() - helpers.populate_dir(root_d, {self.zypp_conf: '# No data'}) - self.reRoot(root_d) - cc_zypper_add_repo._write_zypp_config(cfg.get('config', {})) - cfg_out = os.path.join(root_d, self.zypp_conf) - contents = util.load_file(cfg_out) - self.assertEqual(contents, '# No data') - - def test_no_repo_data(self): - """When there is no repo data nothing should happen""" - root_d = self.tmp_dir() - self.reRoot(root_d) - cc_zypper_add_repo._write_repos(None, root_d) - content = glob.glob('%s/*' % root_d) - self.assertEqual(len(content), 0) - - def _get_base_config_repos(self): - """Basic valid repo configuration""" - cfg = { - 'zypper': { - 'repos': [ - { - 'baseurl': 'http://foo', - 'name': 'test-foo', - 'id': 'testing-foo' - }, - { - 'baseurl': 'http://bar', - 'name': 'test-bar', - 'id': 'testing-bar' - } - ] - } - } - return cfg diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py deleted file mode 100644 index 1dae223d..00000000 --- a/tests/unittests/test_handler/test_schema.py +++ /dev/null @@ -1,515 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. -import cloudinit -from cloudinit.config.schema import ( - CLOUD_CONFIG_HEADER, SchemaValidationError, annotated_cloudconfig_file, - get_schema_doc, get_schema, validate_cloudconfig_file, - validate_cloudconfig_schema, main) -from cloudinit.util import write_file - -from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema - -from copy import copy -import itertools -import pytest -from pathlib import Path -from textwrap import dedent -from yaml import safe_load - - -class GetSchemaTest(CiTestCase): - - def test_get_schema_coalesces_known_schema(self): - """Every cloudconfig module with schema is listed in allOf keyword.""" - schema = get_schema() - self.assertCountEqual( - [ - 'cc_apk_configure', - 'cc_apt_configure', - 'cc_bootcmd', - 'cc_locale', - 'cc_ntp', - 'cc_resizefs', - 'cc_runcmd', - 'cc_snap', - 'cc_ubuntu_advantage', - 'cc_ubuntu_drivers', - 'cc_write_files', - 'cc_write_files_deferred', - 'cc_zypper_add_repo', - 'cc_chef', - 'cc_install_hotplug', - ], - [subschema['id'] for subschema in schema['allOf']]) - self.assertEqual('cloud-config-schema', schema['id']) - self.assertEqual( - 'http://json-schema.org/draft-04/schema#', - schema['$schema']) - # FULL_SCHEMA is updated by the get_schema call - from cloudinit.config.schema import FULL_SCHEMA - self.assertCountEqual(['id', '$schema', 'allOf'], FULL_SCHEMA.keys()) - - def test_get_schema_returns_global_when_set(self): - """When FULL_SCHEMA global is already set, get_schema returns it.""" - m_schema_path = 'cloudinit.config.schema.FULL_SCHEMA' - with mock.patch(m_schema_path, {'here': 'iam'}): - self.assertEqual({'here': 'iam'}, get_schema()) - - -class SchemaValidationErrorTest(CiTestCase): - """Test validate_cloudconfig_schema""" - - def test_schema_validation_error_expects_schema_errors(self): - """SchemaValidationError is initialized from schema_errors.""" - errors = (('key.path', 'unexpected key "junk"'), - ('key2.path', '"-123" is not a valid "hostname" format')) - exception = SchemaValidationError(schema_errors=errors) - self.assertIsInstance(exception, Exception) - self.assertEqual(exception.schema_errors, errors) - self.assertEqual( - 'Cloud config schema errors: key.path: unexpected key "junk", ' - 'key2.path: "-123" is not a valid "hostname" format', - str(exception)) - self.assertTrue(isinstance(exception, ValueError)) - - -class ValidateCloudConfigSchemaTest(CiTestCase): - """Tests for validate_cloudconfig_schema.""" - - with_logs = True - - @skipUnlessJsonSchema() - def test_validateconfig_schema_non_strict_emits_warnings(self): - """When strict is False validate_cloudconfig_schema emits warnings.""" - schema = {'properties': {'p1': {'type': 'string'}}} - validate_cloudconfig_schema({'p1': -1}, schema, strict=False) - self.assertIn( - "Invalid config:\np1: -1 is not of type 'string'\n", - self.logs.getvalue()) - - @skipUnlessJsonSchema() - def test_validateconfig_schema_emits_warning_on_missing_jsonschema(self): - """Warning from validate_cloudconfig_schema when missing jsonschema.""" - schema = {'properties': {'p1': {'type': 'string'}}} - with mock.patch.dict('sys.modules', **{'jsonschema': ImportError()}): - validate_cloudconfig_schema({'p1': -1}, schema, strict=True) - self.assertIn( - 'Ignoring schema validation. python-jsonschema is not present', - self.logs.getvalue()) - - @skipUnlessJsonSchema() - def test_validateconfig_schema_strict_raises_errors(self): - """When strict is True validate_cloudconfig_schema raises errors.""" - schema = {'properties': {'p1': {'type': 'string'}}} - with self.assertRaises(SchemaValidationError) as context_mgr: - validate_cloudconfig_schema({'p1': -1}, schema, strict=True) - self.assertEqual( - "Cloud config schema errors: p1: -1 is not of type 'string'", - str(context_mgr.exception)) - - @skipUnlessJsonSchema() - def test_validateconfig_schema_honors_formats(self): - """With strict True, validate_cloudconfig_schema errors on format.""" - schema = { - 'properties': {'p1': {'type': 'string', 'format': 'email'}}} - with self.assertRaises(SchemaValidationError) as context_mgr: - validate_cloudconfig_schema({'p1': '-1'}, schema, strict=True) - self.assertEqual( - "Cloud config schema errors: p1: '-1' is not a 'email'", - str(context_mgr.exception)) - - -class TestCloudConfigExamples: - schema = get_schema() - params = [ - (schema["id"], example) - for schema in schema["allOf"] for example in schema["examples"]] - - @pytest.mark.parametrize("schema_id,example", params) - @skipUnlessJsonSchema() - def test_validateconfig_schema_of_example(self, schema_id, example): - """ For a given example in a config module we test if it is valid - according to the unified schema of all config modules - """ - config_load = safe_load(example) - validate_cloudconfig_schema( - config_load, self.schema, strict=True) - - -class ValidateCloudConfigFileTest(CiTestCase): - """Tests for validate_cloudconfig_file.""" - - def setUp(self): - super(ValidateCloudConfigFileTest, self).setUp() - self.config_file = self.tmp_path('cloudcfg.yaml') - - def test_validateconfig_file_error_on_absent_file(self): - """On absent config_path, validate_cloudconfig_file errors.""" - with self.assertRaises(RuntimeError) as context_mgr: - validate_cloudconfig_file('/not/here', {}) - self.assertEqual( - 'Configfile /not/here does not exist', - str(context_mgr.exception)) - - def test_validateconfig_file_error_on_invalid_header(self): - """On invalid header, validate_cloudconfig_file errors. - - A SchemaValidationError is raised when the file doesn't begin with - CLOUD_CONFIG_HEADER. - """ - write_file(self.config_file, '#junk') - with self.assertRaises(SchemaValidationError) as context_mgr: - validate_cloudconfig_file(self.config_file, {}) - self.assertEqual( - 'Cloud config schema errors: format-l1.c1: File {0} needs to begin' - ' with "{1}"'.format( - self.config_file, CLOUD_CONFIG_HEADER.decode()), - str(context_mgr.exception)) - - def test_validateconfig_file_error_on_non_yaml_scanner_error(self): - """On non-yaml scan issues, validate_cloudconfig_file errors.""" - # Generate a scanner error by providing text on a single line with - # improper indent. - write_file(self.config_file, '#cloud-config\nasdf:\nasdf') - with self.assertRaises(SchemaValidationError) as context_mgr: - validate_cloudconfig_file(self.config_file, {}) - self.assertIn( - 'schema errors: format-l3.c1: File {0} is not valid yaml.'.format( - self.config_file), - str(context_mgr.exception)) - - def test_validateconfig_file_error_on_non_yaml_parser_error(self): - """On non-yaml parser issues, validate_cloudconfig_file errors.""" - write_file(self.config_file, '#cloud-config\n{}}') - with self.assertRaises(SchemaValidationError) as context_mgr: - validate_cloudconfig_file(self.config_file, {}) - self.assertIn( - 'schema errors: format-l2.c3: File {0} is not valid yaml.'.format( - self.config_file), - str(context_mgr.exception)) - - @skipUnlessJsonSchema() - def test_validateconfig_file_sctrictly_validates_schema(self): - """validate_cloudconfig_file raises errors on invalid schema.""" - schema = { - 'properties': {'p1': {'type': 'string', 'format': 'string'}}} - write_file(self.config_file, '#cloud-config\np1: -1') - with self.assertRaises(SchemaValidationError) as context_mgr: - validate_cloudconfig_file(self.config_file, schema) - self.assertEqual( - "Cloud config schema errors: p1: -1 is not of type 'string'", - str(context_mgr.exception)) - - -class GetSchemaDocTest(CiTestCase): - """Tests for get_schema_doc.""" - - def setUp(self): - super(GetSchemaDocTest, self).setUp() - self.required_schema = { - 'title': 'title', 'description': 'description', 'id': 'id', - 'name': 'name', 'frequency': 'frequency', - 'distros': ['debian', 'rhel']} - - def test_get_schema_doc_returns_restructured_text(self): - """get_schema_doc returns restructured text for a cloudinit schema.""" - full_schema = copy(self.required_schema) - full_schema.update( - {'properties': { - 'prop1': {'type': 'array', 'description': 'prop-description', - 'items': {'type': 'integer'}}}}) - self.assertEqual( - dedent(""" - name - ---- - **Summary:** title - - description - - **Internal name:** ``id`` - - **Module frequency:** frequency - - **Supported distros:** debian, rhel - - **Config schema**: - **prop1:** (array of integer) prop-description\n\n"""), - get_schema_doc(full_schema)) - - def test_get_schema_doc_handles_multiple_types(self): - """get_schema_doc delimits multiple property types with a '/'.""" - full_schema = copy(self.required_schema) - full_schema.update( - {'properties': { - 'prop1': {'type': ['string', 'integer'], - 'description': 'prop-description'}}}) - self.assertIn( - '**prop1:** (string/integer) prop-description', - get_schema_doc(full_schema)) - - def test_get_schema_doc_handles_enum_types(self): - """get_schema_doc converts enum types to yaml and delimits with '/'.""" - full_schema = copy(self.required_schema) - full_schema.update( - {'properties': { - 'prop1': {'enum': [True, False, 'stuff'], - 'description': 'prop-description'}}}) - self.assertIn( - '**prop1:** (true/false/stuff) prop-description', - get_schema_doc(full_schema)) - - def test_get_schema_doc_handles_nested_oneof_property_types(self): - """get_schema_doc describes array items oneOf declarations in type.""" - full_schema = copy(self.required_schema) - full_schema.update( - {'properties': { - 'prop1': {'type': 'array', - 'items': { - 'oneOf': [{'type': 'string'}, - {'type': 'integer'}]}, - 'description': 'prop-description'}}}) - self.assertIn( - '**prop1:** (array of (string)/(integer)) prop-description', - get_schema_doc(full_schema)) - - def test_get_schema_doc_handles_string_examples(self): - """get_schema_doc properly indented examples as a list of strings.""" - full_schema = copy(self.required_schema) - full_schema.update( - {'examples': ['ex1:\n [don\'t, expand, "this"]', 'ex2: true'], - 'properties': { - 'prop1': {'type': 'array', 'description': 'prop-description', - 'items': {'type': 'integer'}}}}) - self.assertIn( - dedent(""" - **Config schema**: - **prop1:** (array of integer) prop-description - - **Examples**:: - - ex1: - [don't, expand, "this"] - # --- Example2 --- - ex2: true - """), - get_schema_doc(full_schema)) - - def test_get_schema_doc_properly_parse_description(self): - """get_schema_doc description properly formatted""" - full_schema = copy(self.required_schema) - full_schema.update( - {'properties': { - 'p1': { - 'type': 'string', - 'description': dedent("""\ - This item - has the - following options: - - - option1 - - option2 - - option3 - - The default value is - option1""") - } - }} - ) - - self.assertIn( - dedent(""" - **Config schema**: - **p1:** (string) This item has the following options: - - - option1 - - option2 - - option3 - - The default value is option1 - """), - get_schema_doc(full_schema)) - - def test_get_schema_doc_raises_key_errors(self): - """get_schema_doc raises KeyErrors on missing keys.""" - for key in self.required_schema: - invalid_schema = copy(self.required_schema) - invalid_schema.pop(key) - with self.assertRaises(KeyError) as context_mgr: - get_schema_doc(invalid_schema) - self.assertIn(key, str(context_mgr.exception)) - - -class AnnotatedCloudconfigFileTest(CiTestCase): - maxDiff = None - - def test_annotated_cloudconfig_file_no_schema_errors(self): - """With no schema_errors, print the original content.""" - content = b'ntp:\n pools: [ntp1.pools.com]\n' - self.assertEqual( - content, - annotated_cloudconfig_file({}, content, schema_errors=[])) - - def test_annotated_cloudconfig_file_schema_annotates_and_adds_footer(self): - """With schema_errors, error lines are annotated and a footer added.""" - content = dedent("""\ - #cloud-config - # comment - ntp: - pools: [-99, 75] - """).encode() - expected = dedent("""\ - #cloud-config - # comment - ntp: # E1 - pools: [-99, 75] # E2,E3 - - # Errors: ------------- - # E1: Some type error - # E2: -99 is not a string - # E3: 75 is not a string - - """) - parsed_config = safe_load(content[13:]) - schema_errors = [ - ('ntp', 'Some type error'), ('ntp.pools.0', '-99 is not a string'), - ('ntp.pools.1', '75 is not a string')] - self.assertEqual( - expected, - annotated_cloudconfig_file(parsed_config, content, schema_errors)) - - def test_annotated_cloudconfig_file_annotates_separate_line_items(self): - """Errors are annotated for lists with items on separate lines.""" - content = dedent("""\ - #cloud-config - # comment - ntp: - pools: - - -99 - - 75 - """).encode() - expected = dedent("""\ - ntp: - pools: - - -99 # E1 - - 75 # E2 - """) - parsed_config = safe_load(content[13:]) - schema_errors = [ - ('ntp.pools.0', '-99 is not a string'), - ('ntp.pools.1', '75 is not a string')] - self.assertIn( - expected, - annotated_cloudconfig_file(parsed_config, content, schema_errors)) - - -class TestMain: - - exclusive_combinations = itertools.combinations( - ["--system", "--docs all", "--config-file something"], 2 - ) - - @pytest.mark.parametrize("params", exclusive_combinations) - def test_main_exclusive_args(self, params, capsys): - """Main exits non-zero and error on required exclusive args.""" - params = list(itertools.chain(*[a.split() for a in params])) - with mock.patch('sys.argv', ['mycmd'] + params): - with pytest.raises(SystemExit) as context_manager: - main() - assert 1 == context_manager.value.code - - _out, err = capsys.readouterr() - expected = ( - 'Expected one of --config-file, --system or --docs arguments\n' - ) - assert expected == err - - def test_main_missing_args(self, capsys): - """Main exits non-zero and reports an error on missing parameters.""" - with mock.patch('sys.argv', ['mycmd']): - with pytest.raises(SystemExit) as context_manager: - main() - assert 1 == context_manager.value.code - - _out, err = capsys.readouterr() - expected = ( - 'Expected one of --config-file, --system or --docs arguments\n' - ) - assert expected == err - - def test_main_absent_config_file(self, capsys): - """Main exits non-zero when config file is absent.""" - myargs = ['mycmd', '--annotate', '--config-file', 'NOT_A_FILE'] - with mock.patch('sys.argv', myargs): - with pytest.raises(SystemExit) as context_manager: - main() - assert 1 == context_manager.value.code - _out, err = capsys.readouterr() - assert 'Configfile NOT_A_FILE does not exist\n' == err - - def test_main_prints_docs(self, capsys): - """When --docs parameter is provided, main generates documentation.""" - myargs = ['mycmd', '--docs', 'all'] - with mock.patch('sys.argv', myargs): - assert 0 == main(), 'Expected 0 exit code' - out, _err = capsys.readouterr() - assert '\nNTP\n---\n' in out - assert '\nRuncmd\n------\n' in out - - def test_main_validates_config_file(self, tmpdir, capsys): - """When --config-file parameter is provided, main validates schema.""" - myyaml = tmpdir.join('my.yaml') - myargs = ['mycmd', '--config-file', myyaml.strpath] - myyaml.write(b'#cloud-config\nntp:') # shortest ntp schema - with mock.patch('sys.argv', myargs): - assert 0 == main(), 'Expected 0 exit code' - out, _err = capsys.readouterr() - assert 'Valid cloud-config: {0}\n'.format(myyaml) == out - - @mock.patch('cloudinit.config.schema.read_cfg_paths') - @mock.patch('cloudinit.config.schema.os.getuid', return_value=0) - def test_main_validates_system_userdata( - self, m_getuid, m_read_cfg_paths, capsys, paths - ): - """When --system is provided, main validates system userdata.""" - m_read_cfg_paths.return_value = paths - ud_file = paths.get_ipath_cur("userdata_raw") - write_file(ud_file, b'#cloud-config\nntp:') - myargs = ['mycmd', '--system'] - with mock.patch('sys.argv', myargs): - assert 0 == main(), 'Expected 0 exit code' - out, _err = capsys.readouterr() - assert 'Valid cloud-config: system userdata\n' == out - - @mock.patch('cloudinit.config.schema.os.getuid', return_value=1000) - def test_main_system_userdata_requires_root(self, m_getuid, capsys, paths): - """Non-root user can't use --system param""" - myargs = ['mycmd', '--system'] - with mock.patch('sys.argv', myargs): - with pytest.raises(SystemExit) as context_manager: - main() - assert 1 == context_manager.value.code - _out, err = capsys.readouterr() - expected = ( - 'Unable to read system userdata as non-root user. Try using sudo\n' - ) - assert expected == err - - -def _get_schema_doc_examples(): - examples_dir = Path( - cloudinit.__file__).parent.parent / 'doc' / 'examples' - assert examples_dir.is_dir() - - all_text_files = (f for f in examples_dir.glob('cloud-config*.txt') - if not f.name.startswith('cloud-config-archive')) - return all_text_files - - -class TestSchemaDocExamples: - schema = get_schema() - - @pytest.mark.parametrize("example_path", _get_schema_doc_examples()) - @skipUnlessJsonSchema() - def test_schema_doc_examples(self, example_path): - validate_cloudconfig_file(str(example_path), self.schema) - -# vi: ts=4 expandtab syntax=python diff --git a/tests/unittests/test_helpers.py b/tests/unittests/test_helpers.py index 2e4582a0..c6f9b94a 100644 --- a/tests/unittests/test_helpers.py +++ b/tests/unittests/test_helpers.py @@ -4,7 +4,7 @@ import os -from cloudinit.tests import helpers as test_helpers +from tests.unittests import helpers as test_helpers from cloudinit import sources diff --git a/tests/unittests/test_log.py b/tests/unittests/test_log.py index e069a487..3d1b9582 100644 --- a/tests/unittests/test_log.py +++ b/tests/unittests/test_log.py @@ -9,7 +9,7 @@ import time from cloudinit import log as ci_logging from cloudinit.analyze.dump import CLOUD_INIT_ASCTIME_FMT -from cloudinit.tests.helpers import CiTestCase +from tests.unittests.helpers import CiTestCase class TestCloudInitLogger(CiTestCase): diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py index 10871bcf..48ab6602 100644 --- a/tests/unittests/test_merging.py +++ b/tests/unittests/test_merging.py @@ -1,6 +1,6 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.tests import helpers +from tests.unittests import helpers from cloudinit.handlers import cloud_config from cloudinit.handlers import (CONTENT_START, CONTENT_END) diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 57edc89a..b5c38c55 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -12,7 +12,7 @@ from cloudinit import subp from cloudinit import util from cloudinit import safeyaml as yaml -from cloudinit.tests.helpers import ( +from tests.unittests.helpers import ( CiTestCase, FilesystemMockingTestCase, dir2dict, mock, populate_dir) import base64 diff --git a/tests/unittests/test_net_freebsd.py b/tests/unittests/test_net_freebsd.py index e339e132..f0dde097 100644 --- a/tests/unittests/test_net_freebsd.py +++ b/tests/unittests/test_net_freebsd.py @@ -3,7 +3,7 @@ import os import cloudinit.net import cloudinit.net.network_state from cloudinit import safeyaml -from cloudinit.tests.helpers import (CiTestCase, mock, readResource, dir2dict) +from tests.unittests.helpers import (CiTestCase, mock, readResource, dir2dict) SAMPLE_FREEBSD_IFCONFIG_OUT = readResource("netinfo/freebsd-ifconfig-output") diff --git a/tests/unittests/test_netinfo.py b/tests/unittests/test_netinfo.py new file mode 100644 index 00000000..238f7b0a --- /dev/null +++ b/tests/unittests/test_netinfo.py @@ -0,0 +1,181 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Tests netinfo module functions and classes.""" + +from copy import copy + +from cloudinit.netinfo import netdev_info, netdev_pformat, route_pformat +from tests.unittests.helpers import CiTestCase, mock, readResource + + +# Example ifconfig and route output +SAMPLE_OLD_IFCONFIG_OUT = readResource("netinfo/old-ifconfig-output") +SAMPLE_NEW_IFCONFIG_OUT = readResource("netinfo/new-ifconfig-output") +SAMPLE_FREEBSD_IFCONFIG_OUT = readResource("netinfo/freebsd-ifconfig-output") +SAMPLE_IPADDRSHOW_OUT = readResource("netinfo/sample-ipaddrshow-output") +SAMPLE_ROUTE_OUT_V4 = readResource("netinfo/sample-route-output-v4") +SAMPLE_ROUTE_OUT_V6 = readResource("netinfo/sample-route-output-v6") +SAMPLE_IPROUTE_OUT_V4 = readResource("netinfo/sample-iproute-output-v4") +SAMPLE_IPROUTE_OUT_V6 = readResource("netinfo/sample-iproute-output-v6") +NETDEV_FORMATTED_OUT = readResource("netinfo/netdev-formatted-output") +ROUTE_FORMATTED_OUT = readResource("netinfo/route-formatted-output") +FREEBSD_NETDEV_OUT = readResource("netinfo/freebsd-netdev-formatted-output") + + +class TestNetInfo(CiTestCase): + + maxDiff = None + with_logs = True + + @mock.patch('cloudinit.netinfo.subp.which') + @mock.patch('cloudinit.netinfo.subp.subp') + def test_netdev_old_nettools_pformat(self, m_subp, m_which): + """netdev_pformat properly rendering old nettools info.""" + m_subp.return_value = (SAMPLE_OLD_IFCONFIG_OUT, '') + m_which.side_effect = lambda x: x if x == 'ifconfig' else None + content = netdev_pformat() + self.assertEqual(NETDEV_FORMATTED_OUT, content) + + @mock.patch('cloudinit.netinfo.subp.which') + @mock.patch('cloudinit.netinfo.subp.subp') + def test_netdev_new_nettools_pformat(self, m_subp, m_which): + """netdev_pformat properly rendering netdev new nettools info.""" + m_subp.return_value = (SAMPLE_NEW_IFCONFIG_OUT, '') + m_which.side_effect = lambda x: x if x == 'ifconfig' else None + content = netdev_pformat() + self.assertEqual(NETDEV_FORMATTED_OUT, content) + + @mock.patch('cloudinit.netinfo.subp.which') + @mock.patch('cloudinit.netinfo.subp.subp') + def test_netdev_freebsd_nettools_pformat(self, m_subp, m_which): + """netdev_pformat properly rendering netdev new nettools info.""" + m_subp.return_value = (SAMPLE_FREEBSD_IFCONFIG_OUT, '') + m_which.side_effect = lambda x: x if x == 'ifconfig' else None + content = netdev_pformat() + print() + print(content) + print() + self.assertEqual(FREEBSD_NETDEV_OUT, content) + + @mock.patch('cloudinit.netinfo.subp.which') + @mock.patch('cloudinit.netinfo.subp.subp') + def test_netdev_iproute_pformat(self, m_subp, m_which): + """netdev_pformat properly rendering ip route info.""" + m_subp.return_value = (SAMPLE_IPADDRSHOW_OUT, '') + m_which.side_effect = lambda x: x if x == 'ip' else None + content = netdev_pformat() + new_output = copy(NETDEV_FORMATTED_OUT) + # ip route show describes global scopes on ipv4 addresses + # whereas ifconfig does not. Add proper global/host scope to output. + new_output = new_output.replace('| . | 50:7b', '| global | 50:7b') + new_output = new_output.replace( + '255.0.0.0 | . |', '255.0.0.0 | host |') + self.assertEqual(new_output, content) + + @mock.patch('cloudinit.netinfo.subp.which') + @mock.patch('cloudinit.netinfo.subp.subp') + def test_netdev_warn_on_missing_commands(self, m_subp, m_which): + """netdev_pformat warns when missing both ip and 'netstat'.""" + m_which.return_value = None # Niether ip nor netstat found + content = netdev_pformat() + self.assertEqual('\n', content) + self.assertEqual( + "WARNING: Could not print networks: missing 'ip' and 'ifconfig'" + " commands\n", + self.logs.getvalue()) + m_subp.assert_not_called() + + @mock.patch('cloudinit.netinfo.subp.which') + @mock.patch('cloudinit.netinfo.subp.subp') + def test_netdev_info_nettools_down(self, m_subp, m_which): + """test netdev_info using nettools and down interfaces.""" + m_subp.return_value = ( + readResource("netinfo/new-ifconfig-output-down"), "") + m_which.side_effect = lambda x: x if x == 'ifconfig' else None + self.assertEqual( + {'eth0': {'ipv4': [], 'ipv6': [], + 'hwaddr': '00:16:3e:de:51:a6', 'up': False}, + 'lo': {'ipv4': [{'ip': '127.0.0.1', 'mask': '255.0.0.0'}], + 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}], + 'hwaddr': '.', 'up': True}}, + netdev_info(".")) + + @mock.patch('cloudinit.netinfo.subp.which') + @mock.patch('cloudinit.netinfo.subp.subp') + def test_netdev_info_iproute_down(self, m_subp, m_which): + """Test netdev_info with ip and down interfaces.""" + m_subp.return_value = ( + readResource("netinfo/sample-ipaddrshow-output-down"), "") + m_which.side_effect = lambda x: x if x == 'ip' else None + self.assertEqual( + {'lo': {'ipv4': [{'ip': '127.0.0.1', 'bcast': '.', + 'mask': '255.0.0.0', 'scope': 'host'}], + 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}], + 'hwaddr': '.', 'up': True}, + 'eth0': {'ipv4': [], 'ipv6': [], + 'hwaddr': '00:16:3e:de:51:a6', 'up': False}}, + netdev_info(".")) + + @mock.patch('cloudinit.netinfo.netdev_info') + def test_netdev_pformat_with_down(self, m_netdev_info): + """test netdev_pformat when netdev_info returns 'down' interfaces.""" + m_netdev_info.return_value = ( + {'lo': {'ipv4': [{'ip': '127.0.0.1', 'mask': '255.0.0.0', + 'scope': 'host'}], + 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}], + 'hwaddr': '.', 'up': True}, + 'eth0': {'ipv4': [], 'ipv6': [], + 'hwaddr': '00:16:3e:de:51:a6', 'up': False}}) + self.assertEqual( + readResource("netinfo/netdev-formatted-output-down"), + netdev_pformat()) + + @mock.patch('cloudinit.netinfo.subp.which') + @mock.patch('cloudinit.netinfo.subp.subp') + def test_route_nettools_pformat(self, m_subp, m_which): + """route_pformat properly rendering nettools route info.""" + + def subp_netstat_route_selector(*args, **kwargs): + if args[0] == ['netstat', '--route', '--numeric', '--extend']: + return (SAMPLE_ROUTE_OUT_V4, '') + if args[0] == ['netstat', '-A', 'inet6', '--route', '--numeric']: + return (SAMPLE_ROUTE_OUT_V6, '') + raise Exception('Unexpected subp call %s' % args[0]) + + m_subp.side_effect = subp_netstat_route_selector + m_which.side_effect = lambda x: x if x == 'netstat' else None + content = route_pformat() + self.assertEqual(ROUTE_FORMATTED_OUT, content) + + @mock.patch('cloudinit.netinfo.subp.which') + @mock.patch('cloudinit.netinfo.subp.subp') + def test_route_iproute_pformat(self, m_subp, m_which): + """route_pformat properly rendering ip route info.""" + + def subp_iproute_selector(*args, **kwargs): + if ['ip', '-o', 'route', 'list'] == args[0]: + return (SAMPLE_IPROUTE_OUT_V4, '') + v6cmd = ['ip', '--oneline', '-6', 'route', 'list', 'table', 'all'] + if v6cmd == args[0]: + return (SAMPLE_IPROUTE_OUT_V6, '') + raise Exception('Unexpected subp call %s' % args[0]) + + m_subp.side_effect = subp_iproute_selector + m_which.side_effect = lambda x: x if x == 'ip' else None + content = route_pformat() + self.assertEqual(ROUTE_FORMATTED_OUT, content) + + @mock.patch('cloudinit.netinfo.subp.which') + @mock.patch('cloudinit.netinfo.subp.subp') + def test_route_warn_on_missing_commands(self, m_subp, m_which): + """route_pformat warns when missing both ip and 'netstat'.""" + m_which.return_value = None # Niether ip nor netstat found + content = route_pformat() + self.assertEqual('\n', content) + self.assertEqual( + "WARNING: Could not print routes: missing 'ip' and 'netstat'" + " commands\n", + self.logs.getvalue()) + m_subp.assert_not_called() + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_pathprefix2dict.py b/tests/unittests/test_pathprefix2dict.py index abbb29b8..4e737ad7 100644 --- a/tests/unittests/test_pathprefix2dict.py +++ b/tests/unittests/test_pathprefix2dict.py @@ -2,7 +2,7 @@ from cloudinit import util -from cloudinit.tests.helpers import TestCase, populate_dir +from tests.unittests.helpers import TestCase, populate_dir import shutil import tempfile diff --git a/tests/unittests/test_persistence.py b/tests/unittests/test_persistence.py new file mode 100644 index 00000000..ec1152a9 --- /dev/null +++ b/tests/unittests/test_persistence.py @@ -0,0 +1,127 @@ +# Copyright (C) 2020 Canonical Ltd. +# +# Author: Daniel Watkins +# +# This file is part of cloud-init. See LICENSE file for license information. +""" +Tests for cloudinit.persistence. + +Per https://docs.python.org/3/library/pickle.html, only "classes that are +defined at the top level of a module" can be pickled. This means that all of +our ``CloudInitPickleMixin`` subclasses for testing must be defined at +module-level (rather than being defined inline or dynamically in the body of +test methods, as we would do without this constraint). + +``TestPickleMixin.test_subclasses`` iterates over a list of all of these +classes, and tests that they round-trip through a pickle dump/load. As the +interface we're testing is that ``_unpickle`` is called appropriately on +subclasses, our subclasses define their assertions in their ``_unpickle`` +implementation. (This means that the assertions will not be executed if +``_unpickle`` is not called at all; we have +``TestPickleMixin.test_unpickle_called`` to ensure it is called.) + +To avoid manually maintaining a list of classes for parametrization we use a +simple metaclass, ``_Collector``, to gather them up. +""" + +import pickle +from unittest import mock + +import pytest + +from cloudinit.persistence import CloudInitPickleMixin + + +class _Collector(type): + """Any class using this as a metaclass will be stored in test_classes.""" + + test_classes = [] + + def __new__(cls, *args): + new_cls = super().__new__(cls, *args) + _Collector.test_classes.append(new_cls) + return new_cls + + +class InstanceVersionNotUsed(CloudInitPickleMixin, metaclass=_Collector): + """Test that the class version is used over one set in instance state.""" + + _ci_pkl_version = 1 + + def __init__(self): + self._ci_pkl_version = 2 + + def _unpickle(self, ci_pkl_version: int) -> None: + assert 1 == ci_pkl_version + + +class MissingVersionHandled(CloudInitPickleMixin, metaclass=_Collector): + """Test that pickles without ``_ci_pkl_version`` are handled gracefully. + + This is tested by overriding ``__getstate__`` so the dumped pickle of this + class will not have ``_ci_pkl_version`` included. + """ + + def __getstate__(self): + return self.__dict__ + + def _unpickle(self, ci_pkl_version: int) -> None: + assert 0 == ci_pkl_version + + +class OverridenVersionHonored(CloudInitPickleMixin, metaclass=_Collector): + """Test that the subclass's version is used.""" + + _ci_pkl_version = 1 + + def _unpickle(self, ci_pkl_version: int) -> None: + assert 1 == ci_pkl_version + + +class StateIsRestored(CloudInitPickleMixin, metaclass=_Collector): + """Instance state should be restored before ``_unpickle`` is called.""" + + def __init__(self): + self.some_state = "some state" + + def _unpickle(self, ci_pkl_version: int) -> None: + assert "some state" == self.some_state + + +class UnpickleCanBeUnoverriden(CloudInitPickleMixin, metaclass=_Collector): + """Subclasses should not need to override ``_unpickle``.""" + + +class VersionDefaultsToZero(CloudInitPickleMixin, metaclass=_Collector): + """Test that the default version is 0.""" + + def _unpickle(self, ci_pkl_version: int) -> None: + assert 0 == ci_pkl_version + + +class VersionIsPoppedFromState(CloudInitPickleMixin, metaclass=_Collector): + """Test _ci_pkl_version is popped from state before being restored.""" + + def _unpickle(self, ci_pkl_version: int) -> None: + # `self._ci_pkl_version` returns the type's _ci_pkl_version if it isn't + # in instance state, so we need to explicitly check self.__dict__. + assert "_ci_pkl_version" not in self.__dict__ + + +class TestPickleMixin: + def test_unpickle_called(self): + """Test that self._unpickle is called on unpickle.""" + with mock.patch.object( + CloudInitPickleMixin, "_unpickle" + ) as m_unpickle: + pickle.loads(pickle.dumps(CloudInitPickleMixin())) + assert 1 == m_unpickle.call_count + + @pytest.mark.parametrize("cls", _Collector.test_classes) + def test_subclasses(self, cls): + """For each collected class, round-trip through pickle dump/load. + + Assertions are implemented in ``cls._unpickle``, and so are evoked as + part of the pickle load. + """ + pickle.loads(pickle.dumps(cls())) diff --git a/tests/unittests/test_registry.py b/tests/unittests/test_registry.py index 2b625026..4c7df186 100644 --- a/tests/unittests/test_registry.py +++ b/tests/unittests/test_registry.py @@ -2,7 +2,7 @@ from cloudinit.registry import DictRegistry -from cloudinit.tests.helpers import (mock, TestCase) +from tests.unittests.helpers import (mock, TestCase) class TestDictRegistry(TestCase): diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py index b78a6939..3aaeea43 100644 --- a/tests/unittests/test_reporting.py +++ b/tests/unittests/test_reporting.py @@ -8,7 +8,7 @@ from cloudinit import reporting from cloudinit.reporting import events from cloudinit.reporting import handlers -from cloudinit.tests.helpers import TestCase +from tests.unittests.helpers import TestCase def _fake_registry(): diff --git a/tests/unittests/test_reporting_hyperv.py b/tests/unittests/test_reporting_hyperv.py index 9324b78d..24a1dcc7 100644 --- a/tests/unittests/test_reporting_hyperv.py +++ b/tests/unittests/test_reporting_hyperv.py @@ -13,7 +13,7 @@ import re from unittest import mock from cloudinit import util -from cloudinit.tests.helpers import CiTestCase +from tests.unittests.helpers import CiTestCase from cloudinit.sources.helpers import azure diff --git a/tests/unittests/test_rh_subscription.py b/tests/unittests/test_rh_subscription.py deleted file mode 100644 index 53d3cd5a..00000000 --- a/tests/unittests/test_rh_subscription.py +++ /dev/null @@ -1,234 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -"""Tests for registering RHEL subscription via rh_subscription.""" - -import copy -import logging - -from cloudinit.config import cc_rh_subscription -from cloudinit import subp - -from cloudinit.tests.helpers import CiTestCase, mock - -SUBMGR = cc_rh_subscription.SubscriptionManager -SUB_MAN_CLI = 'cloudinit.config.cc_rh_subscription._sub_man_cli' - - -@mock.patch(SUB_MAN_CLI) -class GoodTests(CiTestCase): - with_logs = True - - def setUp(self): - super(GoodTests, self).setUp() - self.name = "cc_rh_subscription" - self.cloud_init = None - self.log = logging.getLogger("good_tests") - self.args = [] - self.handle = cc_rh_subscription.handle - - self.config = {'rh_subscription': - {'username': 'scooby@do.com', - 'password': 'scooby-snacks' - }} - self.config_full = {'rh_subscription': - {'username': 'scooby@do.com', - 'password': 'scooby-snacks', - 'auto-attach': True, - 'service-level': 'self-support', - 'add-pool': ['pool1', 'pool2', 'pool3'], - 'enable-repo': ['repo1', 'repo2', 'repo3'], - 'disable-repo': ['repo4', 'repo5'] - }} - - def test_already_registered(self, m_sman_cli): - ''' - Emulates a system that is already registered. Ensure it gets - a non-ProcessExecution error from is_registered() - ''' - self.handle(self.name, self.config, self.cloud_init, - self.log, self.args) - self.assertEqual(m_sman_cli.call_count, 1) - self.assertIn('System is already registered', self.logs.getvalue()) - - def test_simple_registration(self, m_sman_cli): - ''' - Simple registration with username and password - ''' - reg = "The system has been registered with ID:" \ - " 12345678-abde-abcde-1234-1234567890abc" - m_sman_cli.side_effect = [subp.ProcessExecutionError, (reg, 'bar')] - self.handle(self.name, self.config, self.cloud_init, - self.log, self.args) - self.assertIn(mock.call(['identity']), m_sman_cli.call_args_list) - self.assertIn(mock.call(['register', '--username=scooby@do.com', - '--password=scooby-snacks'], - logstring_val=True), - m_sman_cli.call_args_list) - self.assertIn('rh_subscription plugin completed successfully', - self.logs.getvalue()) - self.assertEqual(m_sman_cli.call_count, 2) - - @mock.patch.object(cc_rh_subscription.SubscriptionManager, "_getRepos") - def test_update_repos_disable_with_none(self, m_get_repos, m_sman_cli): - cfg = copy.deepcopy(self.config) - m_get_repos.return_value = ([], ['repo1']) - cfg['rh_subscription'].update( - {'enable-repo': ['repo1'], 'disable-repo': None}) - mysm = cc_rh_subscription.SubscriptionManager(cfg) - self.assertEqual(True, mysm.update_repos()) - m_get_repos.assert_called_with() - self.assertEqual(m_sman_cli.call_args_list, - [mock.call(['repos', '--enable=repo1'])]) - - def test_full_registration(self, m_sman_cli): - ''' - Registration with auto-attach, service-level, adding pools, - and enabling and disabling yum repos - ''' - call_lists = [] - call_lists.append(['attach', '--pool=pool1', '--pool=pool3']) - call_lists.append(['repos', '--disable=repo5', '--enable=repo2', - '--enable=repo3']) - call_lists.append(['attach', '--auto', '--servicelevel=self-support']) - reg = "The system has been registered with ID:" \ - " 12345678-abde-abcde-1234-1234567890abc" - m_sman_cli.side_effect = [ - subp.ProcessExecutionError, - (reg, 'bar'), - ('Service level set to: self-support', ''), - ('pool1\npool3\n', ''), ('pool2\n', ''), ('', ''), - ('Repo ID: repo1\nRepo ID: repo5\n', ''), - ('Repo ID: repo2\nRepo ID: repo3\nRepo ID: repo4', ''), - ('', '')] - self.handle(self.name, self.config_full, self.cloud_init, - self.log, self.args) - self.assertEqual(m_sman_cli.call_count, 9) - for call in call_lists: - self.assertIn(mock.call(call), m_sman_cli.call_args_list) - self.assertIn("rh_subscription plugin completed successfully", - self.logs.getvalue()) - - -@mock.patch(SUB_MAN_CLI) -class TestBadInput(CiTestCase): - with_logs = True - name = "cc_rh_subscription" - cloud_init = None - log = logging.getLogger("bad_tests") - args = [] - SM = cc_rh_subscription.SubscriptionManager - reg = "The system has been registered with ID:" \ - " 12345678-abde-abcde-1234-1234567890abc" - - config_no_password = {'rh_subscription': - {'username': 'scooby@do.com' - }} - - config_no_key = {'rh_subscription': - {'activation-key': '1234abcde', - }} - - config_service = {'rh_subscription': - {'username': 'scooby@do.com', - 'password': 'scooby-snacks', - 'service-level': 'self-support' - }} - - config_badpool = {'rh_subscription': - {'username': 'scooby@do.com', - 'password': 'scooby-snacks', - 'add-pool': 'not_a_list' - }} - config_badrepo = {'rh_subscription': - {'username': 'scooby@do.com', - 'password': 'scooby-snacks', - 'enable-repo': 'not_a_list' - }} - config_badkey = {'rh_subscription': - {'activation-key': 'abcdef1234', - 'fookey': 'bar', - 'org': '123', - }} - - def setUp(self): - super(TestBadInput, self).setUp() - self.handle = cc_rh_subscription.handle - - def assert_logged_warnings(self, warnings): - logs = self.logs.getvalue() - missing = [w for w in warnings if "WARNING: " + w not in logs] - self.assertEqual([], missing, "Missing expected warnings.") - - def test_no_password(self, m_sman_cli): - '''Attempt to register without the password key/value.''' - m_sman_cli.side_effect = [subp.ProcessExecutionError, - (self.reg, 'bar')] - self.handle(self.name, self.config_no_password, self.cloud_init, - self.log, self.args) - self.assertEqual(m_sman_cli.call_count, 0) - - def test_no_org(self, m_sman_cli): - '''Attempt to register without the org key/value.''' - m_sman_cli.side_effect = [subp.ProcessExecutionError] - self.handle(self.name, self.config_no_key, self.cloud_init, - self.log, self.args) - m_sman_cli.assert_called_with(['identity']) - self.assertEqual(m_sman_cli.call_count, 1) - self.assert_logged_warnings(( - 'Unable to register system due to incomplete information.', - 'Use either activationkey and org *or* userid and password', - 'Registration failed or did not run completely', - 'rh_subscription plugin did not complete successfully')) - - def test_service_level_without_auto(self, m_sman_cli): - '''Attempt to register using service-level without auto-attach key.''' - m_sman_cli.side_effect = [subp.ProcessExecutionError, - (self.reg, 'bar')] - self.handle(self.name, self.config_service, self.cloud_init, - self.log, self.args) - self.assertEqual(m_sman_cli.call_count, 1) - self.assert_logged_warnings(( - 'The service-level key must be used in conjunction with ', - 'rh_subscription plugin did not complete successfully')) - - def test_pool_not_a_list(self, m_sman_cli): - ''' - Register with pools that are not in the format of a list - ''' - m_sman_cli.side_effect = [subp.ProcessExecutionError, - (self.reg, 'bar')] - self.handle(self.name, self.config_badpool, self.cloud_init, - self.log, self.args) - self.assertEqual(m_sman_cli.call_count, 2) - self.assert_logged_warnings(( - 'Pools must in the format of a list', - 'rh_subscription plugin did not complete successfully')) - - def test_repo_not_a_list(self, m_sman_cli): - ''' - Register with repos that are not in the format of a list - ''' - m_sman_cli.side_effect = [subp.ProcessExecutionError, - (self.reg, 'bar')] - self.handle(self.name, self.config_badrepo, self.cloud_init, - self.log, self.args) - self.assertEqual(m_sman_cli.call_count, 2) - self.assert_logged_warnings(( - 'Repo IDs must in the format of a list.', - 'Unable to add or remove repos', - 'rh_subscription plugin did not complete successfully')) - - def test_bad_key_value(self, m_sman_cli): - ''' - Attempt to register with a key that we don't know - ''' - m_sman_cli.side_effect = [subp.ProcessExecutionError, - (self.reg, 'bar')] - self.handle(self.name, self.config_badkey, self.cloud_init, - self.log, self.args) - self.assertEqual(m_sman_cli.call_count, 1) - self.assert_logged_warnings(( - 'fookey is not a valid key for rh_subscription. Valid keys are:', - 'rh_subscription plugin did not complete successfully')) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_runs/__init__.py b/tests/unittests/test_runs/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/unittests/test_runs/test_merge_run.py b/tests/unittests/test_runs/test_merge_run.py deleted file mode 100644 index ff27a280..00000000 --- a/tests/unittests/test_runs/test_merge_run.py +++ /dev/null @@ -1,60 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import os -import shutil -import tempfile - -from cloudinit.tests import helpers - -from cloudinit.settings import PER_INSTANCE -from cloudinit import safeyaml -from cloudinit import stages -from cloudinit import util - - -class TestMergeRun(helpers.FilesystemMockingTestCase): - def _patchIn(self, root): - self.patchOS(root) - self.patchUtils(root) - - def test_none_ds(self): - new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, new_root) - self.replicateTestRoot('simple_ubuntu', new_root) - cfg = { - 'datasource_list': ['None'], - 'cloud_init_modules': ['write-files'], - 'system_info': {'paths': {'run_dir': new_root}} - } - ud = helpers.readResource('user_data.1.txt') - cloud_cfg = safeyaml.dumps(cfg) - util.ensure_dir(os.path.join(new_root, 'etc', 'cloud')) - util.write_file(os.path.join(new_root, 'etc', - 'cloud', 'cloud.cfg'), cloud_cfg) - self._patchIn(new_root) - - # Now start verifying whats created - initer = stages.Init() - initer.read_cfg() - initer.initialize() - initer.fetch() - initer.datasource.userdata_raw = ud - initer.instancify() - initer.update() - initer.cloudify().run('consume_data', - initer.consume_data, - args=[PER_INSTANCE], - freq=PER_INSTANCE) - mirrors = initer.distro.get_option('package_mirrors') - self.assertEqual(1, len(mirrors)) - mirror = mirrors[0] - self.assertEqual(mirror['arches'], ['i386', 'amd64', 'blah']) - mods = stages.Modules(initer) - (which_ran, failures) = mods.run_section('cloud_init_modules') - self.assertTrue(len(failures) == 0) - self.assertTrue(os.path.exists('/etc/blah.ini')) - self.assertIn('write-files', which_ran) - contents = util.load_file('/etc/blah.ini') - self.assertEqual(contents, 'blah') - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py deleted file mode 100644 index cb3aae60..00000000 --- a/tests/unittests/test_runs/test_simple_run.py +++ /dev/null @@ -1,182 +0,0 @@ -# This file is part of cloud-init. See LICENSE file for license information. - -import copy -import os - - -from cloudinit.settings import PER_INSTANCE -from cloudinit import safeyaml -from cloudinit import stages -from cloudinit.tests import helpers -from cloudinit import util - - -class TestSimpleRun(helpers.FilesystemMockingTestCase): - - with_logs = True - - def setUp(self): - super(TestSimpleRun, self).setUp() - self.new_root = self.tmp_dir() - self.replicateTestRoot('simple_ubuntu', self.new_root) - - # Seed cloud.cfg file for our tests - self.cfg = { - 'datasource_list': ['None'], - 'runcmd': ['ls /etc'], # test ALL_DISTROS - 'spacewalk': {}, # test non-ubuntu distros module definition - 'system_info': {'paths': {'run_dir': self.new_root}}, - 'write_files': [ - { - 'path': '/etc/blah.ini', - 'content': 'blah', - 'permissions': 0o755, - }, - ], - 'cloud_init_modules': ['write-files', 'spacewalk', 'runcmd'], - } - cloud_cfg = safeyaml.dumps(self.cfg) - util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) - util.write_file(os.path.join(self.new_root, 'etc', - 'cloud', 'cloud.cfg'), cloud_cfg) - self.patchOS(self.new_root) - self.patchUtils(self.new_root) - - def test_none_ds_populates_var_lib_cloud(self): - """Init and run_section default behavior creates appropriate dirs.""" - # Now start verifying whats created - initer = stages.Init() - initer.read_cfg() - initer.initialize() - self.assertTrue(os.path.exists("/var/lib/cloud")) - for d in ['scripts', 'seed', 'instances', 'handlers', 'sem', 'data']: - self.assertTrue(os.path.isdir(os.path.join("/var/lib/cloud", d))) - - initer.fetch() - iid = initer.instancify() - self.assertEqual(iid, 'iid-datasource-none') - initer.update() - self.assertTrue(os.path.islink("var/lib/cloud/instance")) - - def test_none_ds_runs_modules_which_do_not_define_distros(self): - """Any modules which do not define a distros attribute are run.""" - initer = stages.Init() - initer.read_cfg() - initer.initialize() - initer.fetch() - initer.instancify() - initer.update() - initer.cloudify().run('consume_data', initer.consume_data, - args=[PER_INSTANCE], freq=PER_INSTANCE) - - mods = stages.Modules(initer) - (which_ran, failures) = mods.run_section('cloud_init_modules') - self.assertTrue(len(failures) == 0) - self.assertTrue(os.path.exists('/etc/blah.ini')) - self.assertIn('write-files', which_ran) - contents = util.load_file('/etc/blah.ini') - self.assertEqual(contents, 'blah') - self.assertNotIn( - "Skipping modules ['write-files'] because they are not verified on" - " distro 'ubuntu'", - self.logs.getvalue()) - - def test_none_ds_skips_modules_which_define_unmatched_distros(self): - """Skip modules which define distros which don't match the current.""" - initer = stages.Init() - initer.read_cfg() - initer.initialize() - initer.fetch() - initer.instancify() - initer.update() - initer.cloudify().run('consume_data', initer.consume_data, - args=[PER_INSTANCE], freq=PER_INSTANCE) - - mods = stages.Modules(initer) - (which_ran, failures) = mods.run_section('cloud_init_modules') - self.assertTrue(len(failures) == 0) - self.assertIn( - "Skipping modules 'spacewalk' because they are not verified on" - " distro 'ubuntu'", - self.logs.getvalue()) - self.assertNotIn('spacewalk', which_ran) - - def test_none_ds_runs_modules_which_distros_all(self): - """Skip modules which define distros attribute as supporting 'all'. - - This is done in the module with the declaration: - distros = [ALL_DISTROS]. runcmd is an example. - """ - initer = stages.Init() - initer.read_cfg() - initer.initialize() - initer.fetch() - initer.instancify() - initer.update() - initer.cloudify().run('consume_data', initer.consume_data, - args=[PER_INSTANCE], freq=PER_INSTANCE) - - mods = stages.Modules(initer) - (which_ran, failures) = mods.run_section('cloud_init_modules') - self.assertTrue(len(failures) == 0) - self.assertIn('runcmd', which_ran) - self.assertNotIn( - "Skipping modules 'runcmd' because they are not verified on" - " distro 'ubuntu'", - self.logs.getvalue()) - - def test_none_ds_forces_run_via_unverified_modules(self): - """run_section forced skipped modules by using unverified_modules.""" - - # re-write cloud.cfg with unverified_modules override - cfg = copy.deepcopy(self.cfg) - cfg['unverified_modules'] = ['spacewalk'] # Would have skipped - cloud_cfg = safeyaml.dumps(cfg) - util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) - util.write_file(os.path.join(self.new_root, 'etc', - 'cloud', 'cloud.cfg'), cloud_cfg) - - initer = stages.Init() - initer.read_cfg() - initer.initialize() - initer.fetch() - initer.instancify() - initer.update() - initer.cloudify().run('consume_data', initer.consume_data, - args=[PER_INSTANCE], freq=PER_INSTANCE) - - mods = stages.Modules(initer) - (which_ran, failures) = mods.run_section('cloud_init_modules') - self.assertTrue(len(failures) == 0) - self.assertIn('spacewalk', which_ran) - self.assertIn( - "running unverified_modules: 'spacewalk'", - self.logs.getvalue()) - - def test_none_ds_run_with_no_config_modules(self): - """run_section will report no modules run when none are configured.""" - - # re-write cloud.cfg with unverified_modules override - cfg = copy.deepcopy(self.cfg) - # Represent empty configuration in /etc/cloud/cloud.cfg - cfg['cloud_init_modules'] = None - cloud_cfg = safeyaml.dumps(cfg) - util.ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) - util.write_file(os.path.join(self.new_root, 'etc', - 'cloud', 'cloud.cfg'), cloud_cfg) - - initer = stages.Init() - initer.read_cfg() - initer.initialize() - initer.fetch() - initer.instancify() - initer.update() - initer.cloudify().run('consume_data', initer.consume_data, - args=[PER_INSTANCE], freq=PER_INSTANCE) - - mods = stages.Modules(initer) - (which_ran, failures) = mods.run_section('cloud_init_modules') - self.assertTrue(len(failures) == 0) - self.assertEqual([], which_ran) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_simpletable.py b/tests/unittests/test_simpletable.py new file mode 100644 index 00000000..69b30f0e --- /dev/null +++ b/tests/unittests/test_simpletable.py @@ -0,0 +1,106 @@ +# Copyright (C) 2017 Amazon.com, Inc. or its affiliates +# +# Author: Andrew Jorgensen +# +# This file is part of cloud-init. See LICENSE file for license information. +"""Tests that SimpleTable works just like PrettyTable for cloud-init. + +Not all possible PrettyTable cases are tested because we're not trying to +reimplement the entire library, only the minimal parts we actually use. +""" + +from cloudinit.simpletable import SimpleTable +from tests.unittests.helpers import CiTestCase + +# Examples rendered by cloud-init using PrettyTable +NET_DEVICE_FIELDS = ( + 'Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address') +NET_DEVICE_ROWS = ( + ('ens3', True, '172.31.4.203', '255.255.240.0', '.', '0a:1f:07:15:98:70'), + ('ens3', True, 'fe80::81f:7ff:fe15:9870/64', '.', 'link', + '0a:1f:07:15:98:70'), + ('lo', True, '127.0.0.1', '255.0.0.0', '.', '.'), + ('lo', True, '::1/128', '.', 'host', '.'), +) +NET_DEVICE_TABLE = """\ ++--------+------+----------------------------+---------------+-------+-------------------+ +| Device | Up | Address | Mask | Scope | Hw-Address | ++--------+------+----------------------------+---------------+-------+-------------------+ +| ens3 | True | 172.31.4.203 | 255.255.240.0 | . | 0a:1f:07:15:98:70 | +| ens3 | True | fe80::81f:7ff:fe15:9870/64 | . | link | 0a:1f:07:15:98:70 | +| lo | True | 127.0.0.1 | 255.0.0.0 | . | . | +| lo | True | ::1/128 | . | host | . | ++--------+------+----------------------------+---------------+-------+-------------------+""" # noqa: E501 +ROUTE_IPV4_FIELDS = ( + 'Route', 'Destination', 'Gateway', 'Genmask', 'Interface', 'Flags') +ROUTE_IPV4_ROWS = ( + ('0', '0.0.0.0', '172.31.0.1', '0.0.0.0', 'ens3', 'UG'), + ('1', '169.254.0.0', '0.0.0.0', '255.255.0.0', 'ens3', 'U'), + ('2', '172.31.0.0', '0.0.0.0', '255.255.240.0', 'ens3', 'U'), +) +ROUTE_IPV4_TABLE = """\ ++-------+-------------+------------+---------------+-----------+-------+ +| Route | Destination | Gateway | Genmask | Interface | Flags | ++-------+-------------+------------+---------------+-----------+-------+ +| 0 | 0.0.0.0 | 172.31.0.1 | 0.0.0.0 | ens3 | UG | +| 1 | 169.254.0.0 | 0.0.0.0 | 255.255.0.0 | ens3 | U | +| 2 | 172.31.0.0 | 0.0.0.0 | 255.255.240.0 | ens3 | U | ++-------+-------------+------------+---------------+-----------+-------+""" + +AUTHORIZED_KEYS_FIELDS = ( + 'Keytype', 'Fingerprint (md5)', 'Options', 'Comment') +AUTHORIZED_KEYS_ROWS = ( + ('ssh-rsa', '24:c7:41:49:47:12:31:a0:de:6f:62:79:9b:13:06:36', '-', + 'ajorgens'), +) +AUTHORIZED_KEYS_TABLE = """\ ++---------+-------------------------------------------------+---------+----------+ +| Keytype | Fingerprint (md5) | Options | Comment | ++---------+-------------------------------------------------+---------+----------+ +| ssh-rsa | 24:c7:41:49:47:12:31:a0:de:6f:62:79:9b:13:06:36 | - | ajorgens | ++---------+-------------------------------------------------+---------+----------+""" # noqa: E501 + +# from prettytable import PrettyTable +# pt = PrettyTable(('HEADER',)) +# print(pt) +NO_ROWS_FIELDS = ('HEADER',) +NO_ROWS_TABLE = """\ ++--------+ +| HEADER | ++--------+ ++--------+""" + + +class TestSimpleTable(CiTestCase): + + def test_no_rows(self): + """An empty table is rendered as PrettyTable would have done it.""" + table = SimpleTable(NO_ROWS_FIELDS) + self.assertEqual(str(table), NO_ROWS_TABLE) + + def test_net_dev(self): + """Net device info is rendered as it was with PrettyTable.""" + table = SimpleTable(NET_DEVICE_FIELDS) + for row in NET_DEVICE_ROWS: + table.add_row(row) + self.assertEqual(str(table), NET_DEVICE_TABLE) + + def test_route_ipv4(self): + """Route IPv4 info is rendered as it was with PrettyTable.""" + table = SimpleTable(ROUTE_IPV4_FIELDS) + for row in ROUTE_IPV4_ROWS: + table.add_row(row) + self.assertEqual(str(table), ROUTE_IPV4_TABLE) + + def test_authorized_keys(self): + """SSH authorized keys are rendered as they were with PrettyTable.""" + table = SimpleTable(AUTHORIZED_KEYS_FIELDS) + for row in AUTHORIZED_KEYS_ROWS: + table.add_row(row) + + def test_get_string(self): + """get_string() method returns the same content as str().""" + table = SimpleTable(AUTHORIZED_KEYS_FIELDS) + for row in AUTHORIZED_KEYS_ROWS: + table.add_row(row) + self.assertEqual(table.get_string(), str(table)) diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py index 08e20050..b210bd3b 100644 --- a/tests/unittests/test_sshutil.py +++ b/tests/unittests/test_sshutil.py @@ -7,7 +7,7 @@ from functools import partial from unittest.mock import patch from cloudinit import ssh_util -from cloudinit.tests import helpers as test_helpers +from tests.unittests import helpers as test_helpers from cloudinit import util # https://stackoverflow.com/questions/11351032/ diff --git a/tests/unittests/test_stages.py b/tests/unittests/test_stages.py new file mode 100644 index 00000000..a722f03f --- /dev/null +++ b/tests/unittests/test_stages.py @@ -0,0 +1,478 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Tests related to cloudinit.stages module.""" +import os +import stat + +import pytest + +from cloudinit import stages +from cloudinit import sources +from cloudinit.sources import NetworkConfigSource + +from cloudinit.event import EventScope, EventType +from cloudinit.util import write_file + +from tests.unittests.helpers import CiTestCase, mock + +TEST_INSTANCE_ID = 'i-testing' + + +class FakeDataSource(sources.DataSource): + + def __init__(self, paths=None, userdata=None, vendordata=None, + network_config=''): + super(FakeDataSource, self).__init__({}, None, paths=paths) + self.metadata = {'instance-id': TEST_INSTANCE_ID} + self.userdata_raw = userdata + self.vendordata_raw = vendordata + self._network_config = None + if network_config: # Permit for None value to setup attribute + self._network_config = network_config + + @property + def network_config(self): + return self._network_config + + def _get_data(self): + return True + + +class TestInit(CiTestCase): + with_logs = True + allowed_subp = False + + def setUp(self): + super(TestInit, self).setUp() + self.tmpdir = self.tmp_dir() + self.init = stages.Init() + # Setup fake Paths for Init to reference + self.init._cfg = {'system_info': { + 'distro': 'ubuntu', 'paths': {'cloud_dir': self.tmpdir, + 'run_dir': self.tmpdir}}} + self.init.datasource = FakeDataSource(paths=self.init.paths) + self._real_is_new_instance = self.init.is_new_instance + self.init.is_new_instance = mock.Mock(return_value=True) + + def test_wb__find_networking_config_disabled(self): + """find_networking_config returns no config when disabled.""" + disable_file = os.path.join( + self.init.paths.get_cpath('data'), 'upgraded-network') + write_file(disable_file, '') + self.assertEqual( + (None, disable_file), + self.init._find_networking_config()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test_wb__find_networking_config_disabled_by_kernel( + self, m_cmdline, m_initramfs): + """find_networking_config returns when disabled by kernel cmdline.""" + m_cmdline.return_value = {'config': 'disabled'} + m_initramfs.return_value = {'config': ['fake_initrd']} + self.assertEqual( + (None, NetworkConfigSource.cmdline), + self.init._find_networking_config()) + self.assertEqual('DEBUG: network config disabled by cmdline\n', + self.logs.getvalue()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test_wb__find_networking_config_disabled_by_initrd( + self, m_cmdline, m_initramfs): + """find_networking_config returns when disabled by kernel cmdline.""" + m_cmdline.return_value = {} + m_initramfs.return_value = {'config': 'disabled'} + self.assertEqual( + (None, NetworkConfigSource.initramfs), + self.init._find_networking_config()) + self.assertEqual('DEBUG: network config disabled by initramfs\n', + self.logs.getvalue()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test_wb__find_networking_config_disabled_by_datasrc( + self, m_cmdline, m_initramfs): + """find_networking_config returns when disabled by datasource cfg.""" + m_cmdline.return_value = {} # Kernel doesn't disable networking + m_initramfs.return_value = {} # initramfs doesn't disable networking + self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, + 'network': {}} # system config doesn't disable + + self.init.datasource = FakeDataSource( + network_config={'config': 'disabled'}) + self.assertEqual( + (None, NetworkConfigSource.ds), + self.init._find_networking_config()) + self.assertEqual('DEBUG: network config disabled by ds\n', + self.logs.getvalue()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test_wb__find_networking_config_disabled_by_sysconfig( + self, m_cmdline, m_initramfs): + """find_networking_config returns when disabled by system config.""" + m_cmdline.return_value = {} # Kernel doesn't disable networking + m_initramfs.return_value = {} # initramfs doesn't disable networking + self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, + 'network': {'config': 'disabled'}} + self.assertEqual( + (None, NetworkConfigSource.system_cfg), + self.init._find_networking_config()) + self.assertEqual('DEBUG: network config disabled by system_cfg\n', + self.logs.getvalue()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test__find_networking_config_uses_datasrc_order( + self, m_cmdline, m_initramfs): + """find_networking_config should check sources in DS defined order""" + # cmdline and initramfs, which would normally be preferred over other + # sources, disable networking; in this case, though, the DS moves them + # later so its own config is preferred + m_cmdline.return_value = {'config': 'disabled'} + m_initramfs.return_value = {'config': 'disabled'} + + ds_net_cfg = {'config': {'needle': True}} + self.init.datasource = FakeDataSource(network_config=ds_net_cfg) + self.init.datasource.network_config_sources = [ + NetworkConfigSource.ds, NetworkConfigSource.system_cfg, + NetworkConfigSource.cmdline, NetworkConfigSource.initramfs] + + self.assertEqual( + (ds_net_cfg, NetworkConfigSource.ds), + self.init._find_networking_config()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test__find_networking_config_warns_if_datasrc_uses_invalid_src( + self, m_cmdline, m_initramfs): + """find_networking_config should check sources in DS defined order""" + ds_net_cfg = {'config': {'needle': True}} + self.init.datasource = FakeDataSource(network_config=ds_net_cfg) + self.init.datasource.network_config_sources = [ + 'invalid_src', NetworkConfigSource.ds] + + self.assertEqual( + (ds_net_cfg, NetworkConfigSource.ds), + self.init._find_networking_config()) + self.assertIn('WARNING: data source specifies an invalid network' + ' cfg_source: invalid_src', + self.logs.getvalue()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test__find_networking_config_warns_if_datasrc_uses_unavailable_src( + self, m_cmdline, m_initramfs): + """find_networking_config should check sources in DS defined order""" + ds_net_cfg = {'config': {'needle': True}} + self.init.datasource = FakeDataSource(network_config=ds_net_cfg) + self.init.datasource.network_config_sources = [ + NetworkConfigSource.fallback, NetworkConfigSource.ds] + + self.assertEqual( + (ds_net_cfg, NetworkConfigSource.ds), + self.init._find_networking_config()) + self.assertIn('WARNING: data source specifies an unavailable network' + ' cfg_source: fallback', + self.logs.getvalue()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test_wb__find_networking_config_returns_kernel( + self, m_cmdline, m_initramfs): + """find_networking_config returns kernel cmdline config if present.""" + expected_cfg = {'config': ['fakekernel']} + m_cmdline.return_value = expected_cfg + m_initramfs.return_value = {'config': ['fake_initrd']} + self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, + 'network': {'config': ['fakesys_config']}} + self.init.datasource = FakeDataSource( + network_config={'config': ['fakedatasource']}) + self.assertEqual( + (expected_cfg, NetworkConfigSource.cmdline), + self.init._find_networking_config()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test_wb__find_networking_config_returns_initramfs( + self, m_cmdline, m_initramfs): + """find_networking_config returns kernel cmdline config if present.""" + expected_cfg = {'config': ['fake_initrd']} + m_cmdline.return_value = {} + m_initramfs.return_value = expected_cfg + self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, + 'network': {'config': ['fakesys_config']}} + self.init.datasource = FakeDataSource( + network_config={'config': ['fakedatasource']}) + self.assertEqual( + (expected_cfg, NetworkConfigSource.initramfs), + self.init._find_networking_config()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test_wb__find_networking_config_returns_system_cfg( + self, m_cmdline, m_initramfs): + """find_networking_config returns system config when present.""" + m_cmdline.return_value = {} # No kernel network config + m_initramfs.return_value = {} # no initramfs network config + expected_cfg = {'config': ['fakesys_config']} + self.init._cfg = {'system_info': {'paths': {'cloud_dir': self.tmpdir}}, + 'network': expected_cfg} + self.init.datasource = FakeDataSource( + network_config={'config': ['fakedatasource']}) + self.assertEqual( + (expected_cfg, NetworkConfigSource.system_cfg), + self.init._find_networking_config()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test_wb__find_networking_config_returns_datasrc_cfg( + self, m_cmdline, m_initramfs): + """find_networking_config returns datasource net config if present.""" + m_cmdline.return_value = {} # No kernel network config + m_initramfs.return_value = {} # no initramfs network config + # No system config for network in setUp + expected_cfg = {'config': ['fakedatasource']} + self.init.datasource = FakeDataSource(network_config=expected_cfg) + self.assertEqual( + (expected_cfg, NetworkConfigSource.ds), + self.init._find_networking_config()) + + @mock.patch('cloudinit.stages.cmdline.read_initramfs_config') + @mock.patch('cloudinit.stages.cmdline.read_kernel_cmdline_config') + def test_wb__find_networking_config_returns_fallback( + self, m_cmdline, m_initramfs): + """find_networking_config returns fallback config if not defined.""" + m_cmdline.return_value = {} # Kernel doesn't disable networking + m_initramfs.return_value = {} # no initramfs network config + # Neither datasource nor system_info disable or provide network + + fake_cfg = {'config': [{'type': 'physical', 'name': 'eth9'}], + 'version': 1} + + def fake_generate_fallback(): + return fake_cfg + + # Monkey patch distro which gets cached on self.init + distro = self.init.distro + distro.generate_fallback_config = fake_generate_fallback + self.assertEqual( + (fake_cfg, NetworkConfigSource.fallback), + self.init._find_networking_config()) + self.assertNotIn('network config disabled', self.logs.getvalue()) + + def test_apply_network_config_disabled(self): + """Log when network is disabled by upgraded-network.""" + disable_file = os.path.join( + self.init.paths.get_cpath('data'), 'upgraded-network') + + def fake_network_config(): + return (None, disable_file) + + self.init._find_networking_config = fake_network_config + + self.init.apply_network_config(True) + self.assertIn( + 'INFO: network config is disabled by %s' % disable_file, + self.logs.getvalue()) + + @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch('cloudinit.distros.ubuntu.Distro') + def test_apply_network_on_new_instance(self, m_ubuntu, m_macs): + """Call distro apply_network_config methods on is_new_instance.""" + net_cfg = { + 'version': 1, 'config': [ + {'subnets': [{'type': 'dhcp'}], 'type': 'physical', + 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]} + + def fake_network_config(): + return net_cfg, NetworkConfigSource.fallback + + m_macs.return_value = {'42:42:42:42:42:42': 'eth9'} + + self.init._find_networking_config = fake_network_config + + self.init.apply_network_config(True) + self.init.distro.apply_network_config_names.assert_called_with(net_cfg) + self.init.distro.apply_network_config.assert_called_with( + net_cfg, bring_up=True) + + @mock.patch('cloudinit.distros.ubuntu.Distro') + def test_apply_network_on_same_instance_id(self, m_ubuntu): + """Only call distro.apply_network_config_names on same instance id.""" + self.init.is_new_instance = self._real_is_new_instance + old_instance_id = os.path.join( + self.init.paths.get_cpath('data'), 'instance-id') + write_file(old_instance_id, TEST_INSTANCE_ID) + net_cfg = { + 'version': 1, 'config': [ + {'subnets': [{'type': 'dhcp'}], 'type': 'physical', + 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]} + + def fake_network_config(): + return net_cfg, NetworkConfigSource.fallback + + self.init._find_networking_config = fake_network_config + + self.init.apply_network_config(True) + self.init.distro.apply_network_config_names.assert_called_with(net_cfg) + self.init.distro.apply_network_config.assert_not_called() + assert ( + "No network config applied. Neither a new instance nor datasource " + "network update allowed" + ) in self.logs.getvalue() + + # CiTestCase doesn't work with pytest.mark.parametrize, and moving this + # functionality to a separate class is more cumbersome than it'd be worth + # at the moment, so use this as a simple setup + def _apply_network_setup(self, m_macs): + old_instance_id = os.path.join( + self.init.paths.get_cpath('data'), 'instance-id') + write_file(old_instance_id, TEST_INSTANCE_ID) + net_cfg = { + 'version': 1, 'config': [ + {'subnets': [{'type': 'dhcp'}], 'type': 'physical', + 'name': 'eth9', 'mac_address': '42:42:42:42:42:42'}]} + + def fake_network_config(): + return net_cfg, NetworkConfigSource.fallback + + m_macs.return_value = {'42:42:42:42:42:42': 'eth9'} + + self.init._find_networking_config = fake_network_config + self.init.datasource = FakeDataSource(paths=self.init.paths) + self.init.is_new_instance = mock.Mock(return_value=False) + return net_cfg + + @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch('cloudinit.distros.ubuntu.Distro') + @mock.patch.dict(sources.DataSource.default_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE, EventType.BOOT}}) + def test_apply_network_allowed_when_default_boot( + self, m_ubuntu, m_macs + ): + """Apply network if datasource permits BOOT event.""" + net_cfg = self._apply_network_setup(m_macs) + + self.init.apply_network_config(True) + assert mock.call( + net_cfg + ) == self.init.distro.apply_network_config_names.call_args_list[-1] + assert mock.call( + net_cfg, bring_up=True + ) == self.init.distro.apply_network_config.call_args_list[-1] + + @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch('cloudinit.distros.ubuntu.Distro') + @mock.patch.dict(sources.DataSource.default_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) + def test_apply_network_disabled_when_no_default_boot( + self, m_ubuntu, m_macs + ): + """Don't apply network if datasource has no BOOT event.""" + self._apply_network_setup(m_macs) + self.init.apply_network_config(True) + self.init.distro.apply_network_config.assert_not_called() + assert ( + "No network config applied. Neither a new instance nor datasource " + "network update allowed" + ) in self.logs.getvalue() + + @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch('cloudinit.distros.ubuntu.Distro') + @mock.patch.dict(sources.DataSource.default_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) + def test_apply_network_allowed_with_userdata_overrides( + self, m_ubuntu, m_macs + ): + """Apply network if userdata overrides default config""" + net_cfg = self._apply_network_setup(m_macs) + self.init._cfg = {'updates': {'network': {'when': ['boot']}}} + self.init.apply_network_config(True) + self.init.distro.apply_network_config_names.assert_called_with( + net_cfg) + self.init.distro.apply_network_config.assert_called_with( + net_cfg, bring_up=True) + + @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch('cloudinit.distros.ubuntu.Distro') + @mock.patch.dict(sources.DataSource.supported_update_events, { + EventScope.NETWORK: {EventType.BOOT_NEW_INSTANCE}}) + def test_apply_network_disabled_when_unsupported( + self, m_ubuntu, m_macs + ): + """Don't apply network config if unsupported. + + Shouldn't work even when specified as userdata + """ + self._apply_network_setup(m_macs) + + self.init._cfg = {'updates': {'network': {'when': ['boot']}}} + self.init.apply_network_config(True) + self.init.distro.apply_network_config.assert_not_called() + assert ( + "No network config applied. Neither a new instance nor datasource " + "network update allowed" + ) in self.logs.getvalue() + + +class TestInit_InitializeFilesystem: + """Tests for cloudinit.stages.Init._initialize_filesystem. + + TODO: Expand these tests to cover all of _initialize_filesystem's behavior. + """ + + @pytest.yield_fixture + def init(self, paths): + """A fixture which yields a stages.Init instance with paths and cfg set + + As it is replaced with a mock, consumers of this fixture can set + `init._cfg` if the default empty dict configuration is not appropriate. + """ + with mock.patch("cloudinit.stages.util.ensure_dirs"): + init = stages.Init() + init._cfg = {} + init._paths = paths + yield init + + @mock.patch("cloudinit.stages.util.ensure_file") + def test_ensure_file_not_called_if_no_log_file_configured( + self, m_ensure_file, init + ): + """If no log file is configured, we should not ensure its existence.""" + init._cfg = {} + + init._initialize_filesystem() + + assert 0 == m_ensure_file.call_count + + def test_log_files_existence_is_ensured_if_configured(self, init, tmpdir): + """If a log file is configured, we should ensure its existence.""" + log_file = tmpdir.join("cloud-init.log") + init._cfg = {"def_log_file": str(log_file)} + + init._initialize_filesystem() + + assert log_file.exists() + # Assert we create it 0o640 by default if it doesn't already exist + assert 0o640 == stat.S_IMODE(log_file.stat().mode) + + def test_existing_file_permissions_are_not_modified(self, init, tmpdir): + """If the log file already exists, we should not modify its permissions + + See https://bugs.launchpad.net/cloud-init/+bug/1900837. + """ + # Use a mode that will never be made the default so this test will + # always be valid + mode = 0o606 + log_file = tmpdir.join("cloud-init.log") + log_file.ensure() + log_file.chmod(mode) + init._cfg = {"def_log_file": str(log_file)} + + init._initialize_filesystem() + + assert mode == stat.S_IMODE(log_file.stat().mode) + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_subp.py b/tests/unittests/test_subp.py new file mode 100644 index 00000000..ec513d01 --- /dev/null +++ b/tests/unittests/test_subp.py @@ -0,0 +1,286 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Tests for cloudinit.subp utility functions""" + +import json +import os +import sys +import stat + +from unittest import mock + +from cloudinit import subp, util +from tests.unittests.helpers import CiTestCase + + +BASH = subp.which('bash') +BOGUS_COMMAND = 'this-is-not-expected-to-be-a-program-name' + + +class TestPrependBaseCommands(CiTestCase): + + with_logs = True + + def test_prepend_base_command_errors_on_neither_string_nor_list(self): + """Raise an error for each command which is not a string or list.""" + orig_commands = ['ls', 1, {'not': 'gonna work'}, ['basecmd', 'list']] + with self.assertRaises(TypeError) as context_manager: + subp.prepend_base_command( + base_command='basecmd', commands=orig_commands) + self.assertEqual( + "Invalid basecmd config. These commands are not a string or" + " list:\n1\n{'not': 'gonna work'}", + str(context_manager.exception)) + + def test_prepend_base_command_warns_on_non_base_string_commands(self): + """Warn on each non-base for commands of type string.""" + orig_commands = [ + 'ls', 'basecmd list', 'touch /blah', 'basecmd install x'] + fixed_commands = subp.prepend_base_command( + base_command='basecmd', commands=orig_commands) + self.assertEqual( + 'WARNING: Non-basecmd commands in basecmd config:\n' + 'ls\ntouch /blah\n', + self.logs.getvalue()) + self.assertEqual(orig_commands, fixed_commands) + + def test_prepend_base_command_prepends_on_non_base_list_commands(self): + """Prepend 'basecmd' for each non-basecmd command of type list.""" + orig_commands = [['ls'], ['basecmd', 'list'], ['basecmda', '/blah'], + ['basecmd', 'install', 'x']] + expected = [['basecmd', 'ls'], ['basecmd', 'list'], + ['basecmd', 'basecmda', '/blah'], + ['basecmd', 'install', 'x']] + fixed_commands = subp.prepend_base_command( + base_command='basecmd', commands=orig_commands) + self.assertEqual('', self.logs.getvalue()) + self.assertEqual(expected, fixed_commands) + + def test_prepend_base_command_removes_first_item_when_none(self): + """Remove the first element of a non-basecmd when it is None.""" + orig_commands = [[None, 'ls'], ['basecmd', 'list'], + [None, 'touch', '/blah'], + ['basecmd', 'install', 'x']] + expected = [['ls'], ['basecmd', 'list'], + ['touch', '/blah'], + ['basecmd', 'install', 'x']] + fixed_commands = subp.prepend_base_command( + base_command='basecmd', commands=orig_commands) + self.assertEqual('', self.logs.getvalue()) + self.assertEqual(expected, fixed_commands) + + +class TestSubp(CiTestCase): + allowed_subp = [BASH, 'cat', CiTestCase.SUBP_SHELL_TRUE, + BOGUS_COMMAND, sys.executable] + + stdin2err = [BASH, '-c', 'cat >&2'] + stdin2out = ['cat'] + utf8_invalid = b'ab\xaadef' + utf8_valid = b'start \xc3\xa9 end' + utf8_valid_2 = b'd\xc3\xa9j\xc8\xa7' + printenv = [BASH, '-c', 'for n in "$@"; do echo "$n=${!n}"; done', '--'] + + def printf_cmd(self, *args): + # bash's printf supports \xaa. So does /usr/bin/printf + # but by using bash, we remove dependency on another program. + return([BASH, '-c', 'printf "$@"', 'printf'] + list(args)) + + def test_subp_handles_bytestrings(self): + """subp can run a bytestring command if shell is True.""" + tmp_file = self.tmp_path('test.out') + cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file) + (out, _err) = subp.subp(cmd.encode('utf-8'), shell=True) + self.assertEqual('', out) + self.assertEqual('', _err) + self.assertEqual('HI MOM\n', util.load_file(tmp_file)) + + def test_subp_handles_strings(self): + """subp can run a string command if shell is True.""" + tmp_file = self.tmp_path('test.out') + cmd = 'echo HI MOM >> {tmp_file}'.format(tmp_file=tmp_file) + (out, _err) = subp.subp(cmd, shell=True) + self.assertEqual('', out) + self.assertEqual('', _err) + self.assertEqual('HI MOM\n', util.load_file(tmp_file)) + + def test_subp_handles_utf8(self): + # The given bytes contain utf-8 accented characters as seen in e.g. + # the "deja dup" package in Ubuntu. + cmd = self.printf_cmd(self.utf8_valid_2) + (out, _err) = subp.subp(cmd, capture=True) + self.assertEqual(out, self.utf8_valid_2.decode('utf-8')) + + def test_subp_respects_decode_false(self): + (out, err) = subp.subp(self.stdin2out, capture=True, decode=False, + data=self.utf8_valid) + self.assertTrue(isinstance(out, bytes)) + self.assertTrue(isinstance(err, bytes)) + self.assertEqual(out, self.utf8_valid) + + def test_subp_decode_ignore(self): + # this executes a string that writes invalid utf-8 to stdout + (out, _err) = subp.subp(self.printf_cmd('abc\\xaadef'), + capture=True, decode='ignore') + self.assertEqual(out, 'abcdef') + + def test_subp_decode_strict_valid_utf8(self): + (out, _err) = subp.subp(self.stdin2out, capture=True, + decode='strict', data=self.utf8_valid) + self.assertEqual(out, self.utf8_valid.decode('utf-8')) + + def test_subp_decode_invalid_utf8_replaces(self): + (out, _err) = subp.subp(self.stdin2out, capture=True, + data=self.utf8_invalid) + expected = self.utf8_invalid.decode('utf-8', 'replace') + self.assertEqual(out, expected) + + def test_subp_decode_strict_raises(self): + args = [] + kwargs = {'args': self.stdin2out, 'capture': True, + 'decode': 'strict', 'data': self.utf8_invalid} + self.assertRaises(UnicodeDecodeError, subp.subp, *args, **kwargs) + + def test_subp_capture_stderr(self): + data = b'hello world' + (out, err) = subp.subp(self.stdin2err, capture=True, + decode=False, data=data, + update_env={'LC_ALL': 'C'}) + self.assertEqual(err, data) + self.assertEqual(out, b'') + + def test_subp_reads_env(self): + with mock.patch.dict("os.environ", values={'FOO': 'BAR'}): + out, _err = subp.subp(self.printenv + ['FOO'], capture=True) + self.assertEqual('FOO=BAR', out.splitlines()[0]) + + def test_subp_env_and_update_env(self): + out, _err = subp.subp( + self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True, + env={'FOO': 'BAR'}, + update_env={'HOME': '/myhome', 'K2': 'V2'}) + self.assertEqual( + ['FOO=BAR', 'HOME=/myhome', 'K1=', 'K2=V2'], out.splitlines()) + + def test_subp_update_env(self): + extra = {'FOO': 'BAR', 'HOME': '/root', 'K1': 'V1'} + with mock.patch.dict("os.environ", values=extra): + out, _err = subp.subp( + self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True, + update_env={'HOME': '/myhome', 'K2': 'V2'}) + + self.assertEqual( + ['FOO=BAR', 'HOME=/myhome', 'K1=V1', 'K2=V2'], out.splitlines()) + + def test_subp_warn_missing_shebang(self): + """Warn on no #! in script""" + noshebang = self.tmp_path('noshebang') + util.write_file(noshebang, 'true\n') + + print("os is %s" % os) + os.chmod(noshebang, os.stat(noshebang).st_mode | stat.S_IEXEC) + with self.allow_subp([noshebang]): + self.assertRaisesRegex(subp.ProcessExecutionError, + r'Missing #! in script\?', + subp.subp, (noshebang,)) + + def test_subp_combined_stderr_stdout(self): + """Providing combine_capture as True redirects stderr to stdout.""" + data = b'hello world' + (out, err) = subp.subp(self.stdin2err, capture=True, + combine_capture=True, decode=False, data=data) + self.assertEqual(b'', err) + self.assertEqual(data, out) + + def test_returns_none_if_no_capture(self): + (out, err) = subp.subp(self.stdin2out, data=b'', capture=False) + self.assertIsNone(err) + self.assertIsNone(out) + + def test_exception_has_out_err_are_bytes_if_decode_false(self): + """Raised exc should have stderr, stdout as bytes if no decode.""" + with self.assertRaises(subp.ProcessExecutionError) as cm: + subp.subp([BOGUS_COMMAND], decode=False) + self.assertTrue(isinstance(cm.exception.stdout, bytes)) + self.assertTrue(isinstance(cm.exception.stderr, bytes)) + + def test_exception_has_out_err_are_bytes_if_decode_true(self): + """Raised exc should have stderr, stdout as string if no decode.""" + with self.assertRaises(subp.ProcessExecutionError) as cm: + subp.subp([BOGUS_COMMAND], decode=True) + self.assertTrue(isinstance(cm.exception.stdout, str)) + self.assertTrue(isinstance(cm.exception.stderr, str)) + + def test_bunch_of_slashes_in_path(self): + self.assertEqual("/target/my/path/", + subp.target_path("/target/", "//my/path/")) + self.assertEqual("/target/my/path/", + subp.target_path("/target/", "///my/path/")) + + def test_c_lang_can_take_utf8_args(self): + """Independent of system LC_CTYPE, args can contain utf-8 strings. + + When python starts up, its default encoding gets set based on + the value of LC_CTYPE. If no system locale is set, the default + encoding for both python2 and python3 in some paths will end up + being ascii. + + Attempts to use setlocale or patching (or changing) os.environ + in the current environment seem to not be effective. + + This test starts up a python with LC_CTYPE set to C so that + the default encoding will be set to ascii. In such an environment + Popen(['command', 'non-ascii-arg']) would cause a UnicodeDecodeError. + """ + python_prog = '\n'.join([ + 'import json, sys', + 'from cloudinit.subp import subp', + 'data = sys.stdin.read()', + 'cmd = json.loads(data)', + 'subp(cmd, capture=False)', + '']) + cmd = [BASH, '-c', 'echo -n "$@"', '--', + self.utf8_valid.decode("utf-8")] + python_subp = [sys.executable, '-c', python_prog] + + out, _err = subp.subp( + python_subp, update_env={'LC_CTYPE': 'C'}, + data=json.dumps(cmd).encode("utf-8"), + decode=False) + self.assertEqual(self.utf8_valid, out) + + def test_bogus_command_logs_status_messages(self): + """status_cb gets status messages logs on bogus commands provided.""" + logs = [] + + def status_cb(log): + logs.append(log) + + with self.assertRaises(subp.ProcessExecutionError): + subp.subp([BOGUS_COMMAND], status_cb=status_cb) + + expected = [ + 'Begin run command: {cmd}\n'.format(cmd=BOGUS_COMMAND), + 'ERROR: End run command: invalid command provided\n'] + self.assertEqual(expected, logs) + + def test_command_logs_exit_codes_to_status_cb(self): + """status_cb gets status messages containing command exit code.""" + logs = [] + + def status_cb(log): + logs.append(log) + + with self.assertRaises(subp.ProcessExecutionError): + subp.subp([BASH, '-c', 'exit 2'], status_cb=status_cb) + subp.subp([BASH, '-c', 'exit 0'], status_cb=status_cb) + + expected = [ + 'Begin run command: %s -c exit 2\n' % BASH, + 'ERROR: End run command: exit(2)\n', + 'Begin run command: %s -c exit 0\n' % BASH, + 'End run command: exit(0)\n'] + self.assertEqual(expected, logs) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_temp_utils.py b/tests/unittests/test_temp_utils.py new file mode 100644 index 00000000..9d56d0d0 --- /dev/null +++ b/tests/unittests/test_temp_utils.py @@ -0,0 +1,117 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Tests for cloudinit.temp_utils""" + +from cloudinit.temp_utils import mkdtemp, mkstemp, tempdir +from tests.unittests.helpers import CiTestCase, wrap_and_call +import os + + +class TestTempUtils(CiTestCase): + + def test_mkdtemp_default_non_root(self): + """mkdtemp creates a dir under /tmp for the unprivileged.""" + calls = [] + + def fake_mkdtemp(*args, **kwargs): + calls.append(kwargs) + return '/fake/return/path' + + retval = wrap_and_call( + 'cloudinit.temp_utils', + {'os.getuid': 1000, + 'tempfile.mkdtemp': {'side_effect': fake_mkdtemp}, + '_TMPDIR': {'new': None}, + 'os.path.isdir': True}, + mkdtemp) + self.assertEqual('/fake/return/path', retval) + self.assertEqual([{'dir': '/tmp'}], calls) + + def test_mkdtemp_default_non_root_needs_exe(self): + """mkdtemp creates a dir under /var/tmp/cloud-init when needs_exe.""" + calls = [] + + def fake_mkdtemp(*args, **kwargs): + calls.append(kwargs) + return '/fake/return/path' + + retval = wrap_and_call( + 'cloudinit.temp_utils', + {'os.getuid': 1000, + 'tempfile.mkdtemp': {'side_effect': fake_mkdtemp}, + '_TMPDIR': {'new': None}, + 'os.path.isdir': True}, + mkdtemp, needs_exe=True) + self.assertEqual('/fake/return/path', retval) + self.assertEqual([{'dir': '/var/tmp/cloud-init'}], calls) + + def test_mkdtemp_default_root(self): + """mkdtemp creates a dir under /run/cloud-init for the privileged.""" + calls = [] + + def fake_mkdtemp(*args, **kwargs): + calls.append(kwargs) + return '/fake/return/path' + + retval = wrap_and_call( + 'cloudinit.temp_utils', + {'os.getuid': 0, + 'tempfile.mkdtemp': {'side_effect': fake_mkdtemp}, + '_TMPDIR': {'new': None}, + 'os.path.isdir': True}, + mkdtemp) + self.assertEqual('/fake/return/path', retval) + self.assertEqual([{'dir': '/run/cloud-init/tmp'}], calls) + + def test_mkstemp_default_non_root(self): + """mkstemp creates secure tempfile under /tmp for the unprivileged.""" + calls = [] + + def fake_mkstemp(*args, **kwargs): + calls.append(kwargs) + return '/fake/return/path' + + retval = wrap_and_call( + 'cloudinit.temp_utils', + {'os.getuid': 1000, + 'tempfile.mkstemp': {'side_effect': fake_mkstemp}, + '_TMPDIR': {'new': None}, + 'os.path.isdir': True}, + mkstemp) + self.assertEqual('/fake/return/path', retval) + self.assertEqual([{'dir': '/tmp'}], calls) + + def test_mkstemp_default_root(self): + """mkstemp creates a secure tempfile in /run/cloud-init for root.""" + calls = [] + + def fake_mkstemp(*args, **kwargs): + calls.append(kwargs) + return '/fake/return/path' + + retval = wrap_and_call( + 'cloudinit.temp_utils', + {'os.getuid': 0, + 'tempfile.mkstemp': {'side_effect': fake_mkstemp}, + '_TMPDIR': {'new': None}, + 'os.path.isdir': True}, + mkstemp) + self.assertEqual('/fake/return/path', retval) + self.assertEqual([{'dir': '/run/cloud-init/tmp'}], calls) + + def test_tempdir_error_suppression(self): + """test tempdir suppresses errors during directory removal.""" + + with self.assertRaises(OSError): + with tempdir(prefix='cloud-init-dhcp-') as tdir: + os.rmdir(tdir) + # As a result, the directory is already gone, + # so shutil.rmtree should raise OSError + + with tempdir(rmtree_ignore_errors=True, + prefix='cloud-init-dhcp-') as tdir: + os.rmdir(tdir) + # Since the directory is already gone, shutil.rmtree would raise + # OSError, but we suppress that + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py index cba09830..459e017b 100644 --- a/tests/unittests/test_templating.py +++ b/tests/unittests/test_templating.py @@ -4,7 +4,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.tests import helpers as test_helpers +from tests.unittests import helpers as test_helpers import textwrap from cloudinit import templater diff --git a/tests/unittests/test_upgrade.py b/tests/unittests/test_upgrade.py new file mode 100644 index 00000000..d7a721a2 --- /dev/null +++ b/tests/unittests/test_upgrade.py @@ -0,0 +1,52 @@ +# Copyright (C) 2020 Canonical Ltd. +# +# Author: Daniel Watkins +# +# This file is part of cloud-init. See LICENSE file for license information. + +"""Upgrade testing for cloud-init. + +This module tests cloud-init's behaviour across upgrades. Specifically, it +specifies a set of invariants that the current codebase expects to be true (as +tests in ``TestUpgrade``) and then checks that these hold true after unpickling +``obj.pkl``s from previous versions of cloud-init; those pickles are stored in +``tests/data/old_pickles/``. +""" + +import operator +import pathlib + +import pytest + +from cloudinit.stages import _pkl_load +from tests.unittests.helpers import resourceLocation + + +class TestUpgrade: + @pytest.fixture( + params=pathlib.Path(resourceLocation("old_pickles")).glob("*.pkl"), + scope="class", + ids=operator.attrgetter("name"), + ) + def previous_obj_pkl(self, request): + """Load each pickle to memory once, then run all tests against it. + + Test implementations _must not_ modify the ``previous_obj_pkl`` which + they are passed, as that will affect tests that run after them. + """ + return _pkl_load(str(request.param)) + + def test_networking_set_on_distro(self, previous_obj_pkl): + """We always expect to have ``.networking`` on ``Distro`` objects.""" + assert previous_obj_pkl.distro.networking is not None + + def test_blacklist_drivers_set_on_networking(self, previous_obj_pkl): + """We always expect Networking.blacklist_drivers to be initialised.""" + assert previous_obj_pkl.distro.networking.blacklist_drivers is None + + def test_paths_has_run_dir_attribute(self, previous_obj_pkl): + assert previous_obj_pkl.paths.run_dir is not None + + def test_vendordata_exists(self, previous_obj_pkl): + assert previous_obj_pkl.vendordata2 is None + assert previous_obj_pkl.vendordata2_raw is None diff --git a/tests/unittests/test_url_helper.py b/tests/unittests/test_url_helper.py new file mode 100644 index 00000000..501d9533 --- /dev/null +++ b/tests/unittests/test_url_helper.py @@ -0,0 +1,178 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.url_helper import ( + NOT_FOUND, UrlError, REDACTED, oauth_headers, read_file_or_url, + retry_on_url_exc) +from tests.unittests.helpers import CiTestCase, mock, skipIf +from cloudinit import util +from cloudinit import version + +import httpretty +import logging +import requests + + +try: + import oauthlib + assert oauthlib # avoid pyflakes error F401: import unused + _missing_oauthlib_dep = False +except ImportError: + _missing_oauthlib_dep = True + + +M_PATH = 'cloudinit.url_helper.' + + +class TestOAuthHeaders(CiTestCase): + + def test_oauth_headers_raises_not_implemented_when_oathlib_missing(self): + """oauth_headers raises a NotImplemented error when oauth absent.""" + with mock.patch.dict('sys.modules', {'oauthlib': None}): + with self.assertRaises(NotImplementedError) as context_manager: + oauth_headers(1, 2, 3, 4, 5) + self.assertEqual( + 'oauth support is not available', + str(context_manager.exception)) + + @skipIf(_missing_oauthlib_dep, "No python-oauthlib dependency") + @mock.patch('oauthlib.oauth1.Client') + def test_oauth_headers_calls_oathlibclient_when_available(self, m_client): + """oauth_headers calls oaut1.hClient.sign with the provided url.""" + class fakeclient(object): + def sign(self, url): + # The first and 3rd item of the client.sign tuple are ignored + return ('junk', url, 'junk2') + + m_client.return_value = fakeclient() + + return_value = oauth_headers( + 'url', 'consumer_key', 'token_key', 'token_secret', + 'consumer_secret') + self.assertEqual('url', return_value) + + +class TestReadFileOrUrl(CiTestCase): + + with_logs = True + + def test_read_file_or_url_str_from_file(self): + """Test that str(result.contents) on file is text version of contents. + It should not be "b'data'", but just "'data'" """ + tmpf = self.tmp_path("myfile1") + data = b'This is my file content\n' + util.write_file(tmpf, data, omode="wb") + result = read_file_or_url("file://%s" % tmpf) + self.assertEqual(result.contents, data) + self.assertEqual(str(result), data.decode('utf-8')) + + @httpretty.activate + def test_read_file_or_url_str_from_url(self): + """Test that str(result.contents) on url is text version of contents. + It should not be "b'data'", but just "'data'" """ + url = 'http://hostname/path' + data = b'This is my url content\n' + httpretty.register_uri(httpretty.GET, url, data) + result = read_file_or_url(url) + self.assertEqual(result.contents, data) + self.assertEqual(str(result), data.decode('utf-8')) + + @httpretty.activate + def test_read_file_or_url_str_from_url_redacting_headers_from_logs(self): + """Headers are redacted from logs but unredacted in requests.""" + url = 'http://hostname/path' + headers = {'sensitive': 'sekret', 'server': 'blah'} + httpretty.register_uri(httpretty.GET, url) + # By default, httpretty will log our request along with the header, + # so if we don't change this the secret will show up in the logs + logging.getLogger('httpretty.core').setLevel(logging.CRITICAL) + + read_file_or_url(url, headers=headers, headers_redact=['sensitive']) + logs = self.logs.getvalue() + for k in headers.keys(): + self.assertEqual(headers[k], httpretty.last_request().headers[k]) + self.assertIn(REDACTED, logs) + self.assertNotIn('sekret', logs) + + @httpretty.activate + def test_read_file_or_url_str_from_url_redacts_noheaders(self): + """When no headers_redact, header values are in logs and requests.""" + url = 'http://hostname/path' + headers = {'sensitive': 'sekret', 'server': 'blah'} + httpretty.register_uri(httpretty.GET, url) + + read_file_or_url(url, headers=headers) + for k in headers.keys(): + self.assertEqual(headers[k], httpretty.last_request().headers[k]) + logs = self.logs.getvalue() + self.assertNotIn(REDACTED, logs) + self.assertIn('sekret', logs) + + @mock.patch(M_PATH + 'readurl') + def test_read_file_or_url_passes_params_to_readurl(self, m_readurl): + """read_file_or_url passes all params through to readurl.""" + url = 'http://hostname/path' + response = 'This is my url content\n' + m_readurl.return_value = response + params = {'url': url, 'timeout': 1, 'retries': 2, + 'headers': {'somehdr': 'val'}, + 'data': 'data', 'sec_between': 1, + 'ssl_details': {'cert_file': '/path/cert.pem'}, + 'headers_cb': 'headers_cb', 'exception_cb': 'exception_cb'} + self.assertEqual(response, read_file_or_url(**params)) + params.pop('url') # url is passed in as a positional arg + self.assertEqual([mock.call(url, **params)], m_readurl.call_args_list) + + def test_wb_read_url_defaults_honored_by_read_file_or_url_callers(self): + """Readurl param defaults used when unspecified by read_file_or_url + + Param defaults tested are as follows: + retries: 0, additional headers None beyond default, method: GET, + data: None, check_status: True and allow_redirects: True + """ + url = 'http://hostname/path' + + m_response = mock.MagicMock() + + class FakeSession(requests.Session): + @classmethod + def request(cls, **kwargs): + self.assertEqual( + {'url': url, 'allow_redirects': True, 'method': 'GET', + 'headers': { + 'User-Agent': 'Cloud-Init/%s' % ( + version.version_string())}}, + kwargs) + return m_response + + with mock.patch(M_PATH + 'requests.Session') as m_session: + error = requests.exceptions.HTTPError('broke') + m_session.side_effect = [error, FakeSession()] + # assert no retries and check_status == True + with self.assertRaises(UrlError) as context_manager: + response = read_file_or_url(url) + self.assertEqual('broke', str(context_manager.exception)) + # assert default headers, method, url and allow_redirects True + # Success on 2nd call with FakeSession + response = read_file_or_url(url) + self.assertEqual(m_response, response._response) + + +class TestRetryOnUrlExc(CiTestCase): + + def test_do_not_retry_non_urlerror(self): + """When exception is not UrlError return False.""" + myerror = IOError('something unexcpected') + self.assertFalse(retry_on_url_exc(msg='', exc=myerror)) + + def test_perform_retries_on_not_found(self): + """When exception is UrlError with a 404 status code return True.""" + myerror = UrlError(cause=RuntimeError( + 'something was not found'), code=NOT_FOUND) + self.assertTrue(retry_on_url_exc(msg='', exc=myerror)) + + def test_perform_retries_on_timeout(self): + """When exception is a requests.Timout return True.""" + myerror = UrlError(cause=requests.Timeout('something timed out')) + self.assertTrue(retry_on_url_exc(msg='', exc=myerror)) + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index bc30c90b..1290cbc6 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -1,23 +1,1311 @@ # This file is part of cloud-init. See LICENSE file for license information. -import io +"""Tests for cloudinit.util""" + +import base64 import logging +import json +import platform +import pytest + +import io import os import re import shutil import stat import tempfile -import pytest import yaml from unittest import mock from cloudinit import subp from cloudinit import importer, util -from cloudinit.tests import helpers +from tests.unittests import helpers + + +from tests.unittests.helpers import CiTestCase +from textwrap import dedent + +LOG = logging.getLogger(__name__) + +MOUNT_INFO = [ + '68 0 8:3 / / ro,relatime shared:1 - btrfs /dev/sda1 ro,attr2,inode64', + '153 68 254:0 / /home rw,relatime shared:101 - xfs /dev/sda2 rw,attr2', +] + +OS_RELEASE_SLES = dedent( + """\ + NAME="SLES" + VERSION="12-SP3" + VERSION_ID="12.3" + PRETTY_NAME="SUSE Linux Enterprise Server 12 SP3" + ID="sles" + ANSI_COLOR="0;32" + CPE_NAME="cpe:/o:suse:sles:12:sp3" +""" +) + +OS_RELEASE_OPENSUSE = dedent( + """\ + NAME="openSUSE Leap" + VERSION="42.3" + ID=opensuse + ID_LIKE="suse" + VERSION_ID="42.3" + PRETTY_NAME="openSUSE Leap 42.3" + ANSI_COLOR="0;32" + CPE_NAME="cpe:/o:opensuse:leap:42.3" + BUG_REPORT_URL="https://bugs.opensuse.org" + HOME_URL="https://www.opensuse.org/" +""" +) + +OS_RELEASE_OPENSUSE_L15 = dedent( + """\ + NAME="openSUSE Leap" + VERSION="15.0" + ID="opensuse-leap" + ID_LIKE="suse opensuse" + VERSION_ID="15.0" + PRETTY_NAME="openSUSE Leap 15.0" + ANSI_COLOR="0;32" + CPE_NAME="cpe:/o:opensuse:leap:15.0" + BUG_REPORT_URL="https://bugs.opensuse.org" + HOME_URL="https://www.opensuse.org/" +""" +) + +OS_RELEASE_OPENSUSE_TW = dedent( + """\ + NAME="openSUSE Tumbleweed" + ID="opensuse-tumbleweed" + ID_LIKE="opensuse suse" + VERSION_ID="20180920" + PRETTY_NAME="openSUSE Tumbleweed" + ANSI_COLOR="0;32" + CPE_NAME="cpe:/o:opensuse:tumbleweed:20180920" + BUG_REPORT_URL="https://bugs.opensuse.org" + HOME_URL="https://www.opensuse.org/" +""" +) + +OS_RELEASE_CENTOS = dedent( + """\ + NAME="CentOS Linux" + VERSION="7 (Core)" + ID="centos" + ID_LIKE="rhel fedora" + VERSION_ID="7" + PRETTY_NAME="CentOS Linux 7 (Core)" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:centos:centos:7" + HOME_URL="https://www.centos.org/" + BUG_REPORT_URL="https://bugs.centos.org/" + + CENTOS_MANTISBT_PROJECT="CentOS-7" + CENTOS_MANTISBT_PROJECT_VERSION="7" + REDHAT_SUPPORT_PRODUCT="centos" + REDHAT_SUPPORT_PRODUCT_VERSION="7" +""" +) + +OS_RELEASE_REDHAT_7 = dedent( + """\ + NAME="Red Hat Enterprise Linux Server" + VERSION="7.5 (Maipo)" + ID="rhel" + ID_LIKE="fedora" + VARIANT="Server" + VARIANT_ID="server" + VERSION_ID="7.5" + PRETTY_NAME="Red Hat" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:redhat:enterprise_linux:7.5:GA:server" + HOME_URL="https://www.redhat.com/" + BUG_REPORT_URL="https://bugzilla.redhat.com/" + + REDHAT_BUGZILLA_PRODUCT="Red Hat Enterprise Linux 7" + REDHAT_BUGZILLA_PRODUCT_VERSION=7.5 + REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux" + REDHAT_SUPPORT_PRODUCT_VERSION="7.5" +""" +) + +OS_RELEASE_ALMALINUX_8 = dedent( + """\ + NAME="AlmaLinux" + VERSION="8.3 (Purple Manul)" + ID="almalinux" + ID_LIKE="rhel centos fedora" + VERSION_ID="8.3" + PLATFORM_ID="platform:el8" + PRETTY_NAME="AlmaLinux 8.3 (Purple Manul)" + ANSI_COLOR="0;34" + CPE_NAME="cpe:/o:almalinux:almalinux:8.3:GA" + HOME_URL="https://almalinux.org/" + BUG_REPORT_URL="https://bugs.almalinux.org/" + + ALMALINUX_MANTISBT_PROJECT="AlmaLinux-8" + ALMALINUX_MANTISBT_PROJECT_VERSION="8.3" +""" +) + +OS_RELEASE_EUROLINUX_7 = dedent( + """\ + VERSION="7.9 (Minsk)" + ID="eurolinux" + ID_LIKE="rhel scientific centos fedora" + VERSION_ID="7.9" + PRETTY_NAME="EuroLinux 7.9 (Minsk)" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:eurolinux:eurolinux:7.9:GA" + HOME_URL="http://www.euro-linux.com/" + BUG_REPORT_URL="mailto:support@euro-linux.com" + REDHAT_BUGZILLA_PRODUCT="EuroLinux 7" + REDHAT_BUGZILLA_PRODUCT_VERSION=7.9 + REDHAT_SUPPORT_PRODUCT="EuroLinux" + REDHAT_SUPPORT_PRODUCT_VERSION="7.9" +""" +) + +OS_RELEASE_EUROLINUX_8 = dedent( + """\ + NAME="EuroLinux" + VERSION="8.4 (Vaduz)" + ID="eurolinux" + ID_LIKE="rhel fedora centos" + VERSION_ID="8.4" + PLATFORM_ID="platform:el8" + PRETTY_NAME="EuroLinux 8.4 (Vaduz)" + ANSI_COLOR="0;34" + CPE_NAME="cpe:/o:eurolinux:eurolinux:8" + HOME_URL="https://www.euro-linux.com/" + BUG_REPORT_URL="https://github.com/EuroLinux/eurolinux-distro-bugs-and-rfc/" + REDHAT_SUPPORT_PRODUCT="EuroLinux" + REDHAT_SUPPORT_PRODUCT_VERSION="8" +""" +) + +OS_RELEASE_ROCKY_8 = dedent( + """\ + NAME="Rocky Linux" + VERSION="8.3 (Green Obsidian)" + ID="rocky" + ID_LIKE="rhel fedora" + VERSION_ID="8.3" + PLATFORM_ID="platform:el8" + PRETTY_NAME="Rocky Linux 8.3 (Green Obsidian)" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:rocky:rocky:8" + HOME_URL="https://rockylinux.org/" + BUG_REPORT_URL="https://bugs.rockylinux.org/" + ROCKY_SUPPORT_PRODUCT="Rocky Linux" + ROCKY_SUPPORT_PRODUCT_VERSION="8" +""" +) + +OS_RELEASE_VIRTUOZZO_8 = dedent( + """\ + NAME="Virtuozzo Linux" + VERSION="8" + ID="virtuozzo" + ID_LIKE="rhel fedora" + VERSION_ID="8" + PLATFORM_ID="platform:el8" + PRETTY_NAME="Virtuozzo Linux" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:virtuozzoproject:vzlinux:8" + HOME_URL="https://www.vzlinux.org" + BUG_REPORT_URL="https://bugs.openvz.org" +""" +) + +OS_RELEASE_CLOUDLINUX_8 = dedent( + """\ + NAME="CloudLinux" + VERSION="8.4 (Valery Rozhdestvensky)" + ID="cloudlinux" + ID_LIKE="rhel fedora centos" + VERSION_ID="8.4" + PLATFORM_ID="platform:el8" + PRETTY_NAME="CloudLinux 8.4 (Valery Rozhdestvensky)" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:cloudlinux:cloudlinux:8.4:GA:server" + HOME_URL="https://www.cloudlinux.com/" + BUG_REPORT_URL="https://www.cloudlinux.com/support" +""" +) + +OS_RELEASE_OPENEULER_20 = dedent( + """\ + NAME="openEuler" + VERSION="20.03 (LTS-SP2)" + ID="openEuler" + VERSION_ID="20.03" + PRETTY_NAME="openEuler 20.03 (LTS-SP2)" + ANSI_COLOR="0;31" +""" +) + +REDHAT_RELEASE_CENTOS_6 = "CentOS release 6.10 (Final)" +REDHAT_RELEASE_CENTOS_7 = "CentOS Linux release 7.5.1804 (Core)" +REDHAT_RELEASE_REDHAT_6 = ( + "Red Hat Enterprise Linux Server release 6.10 (Santiago)" +) +REDHAT_RELEASE_REDHAT_7 = "Red Hat Enterprise Linux Server release 7.5 (Maipo)" +REDHAT_RELEASE_ALMALINUX_8 = "AlmaLinux release 8.3 (Purple Manul)" +REDHAT_RELEASE_EUROLINUX_7 = "EuroLinux release 7.9 (Minsk)" +REDHAT_RELEASE_EUROLINUX_8 = "EuroLinux release 8.4 (Vaduz)" +REDHAT_RELEASE_ROCKY_8 = "Rocky Linux release 8.3 (Green Obsidian)" +REDHAT_RELEASE_VIRTUOZZO_8 = "Virtuozzo Linux release 8" +REDHAT_RELEASE_CLOUDLINUX_8 = "CloudLinux release 8.4 (Valery Rozhdestvensky)" +OS_RELEASE_DEBIAN = dedent( + """\ + PRETTY_NAME="Debian GNU/Linux 9 (stretch)" + NAME="Debian GNU/Linux" + VERSION_ID="9" + VERSION="9 (stretch)" + ID=debian + HOME_URL="https://www.debian.org/" + SUPPORT_URL="https://www.debian.org/support" + BUG_REPORT_URL="https://bugs.debian.org/" +""" +) + +OS_RELEASE_UBUNTU = dedent( + """\ + NAME="Ubuntu"\n + # comment test + VERSION="16.04.3 LTS (Xenial Xerus)"\n + ID=ubuntu\n + ID_LIKE=debian\n + PRETTY_NAME="Ubuntu 16.04.3 LTS"\n + VERSION_ID="16.04"\n + HOME_URL="http://www.ubuntu.com/"\n + SUPPORT_URL="http://help.ubuntu.com/"\n + BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"\n + VERSION_CODENAME=xenial\n + UBUNTU_CODENAME=xenial\n +""" +) + +OS_RELEASE_PHOTON = """\ + NAME="VMware Photon OS" + VERSION="4.0" + ID=photon + VERSION_ID=4.0 + PRETTY_NAME="VMware Photon OS/Linux" + ANSI_COLOR="1;34" + HOME_URL="https://vmware.github.io/photon/" + BUG_REPORT_URL="https://github.com/vmware/photon/issues" +""" + + +class FakeCloud(object): + def __init__(self, hostname, fqdn): + self.hostname = hostname + self.fqdn = fqdn + self.calls = [] + + def get_hostname(self, fqdn=None, metadata_only=None): + myargs = {} + if fqdn is not None: + myargs['fqdn'] = fqdn + if metadata_only is not None: + myargs['metadata_only'] = metadata_only + self.calls.append(myargs) + if fqdn: + return self.fqdn + return self.hostname + + +class TestUtil(CiTestCase): + def test_parse_mount_info_no_opts_no_arg(self): + result = util.parse_mount_info('/home', MOUNT_INFO, LOG) + self.assertEqual(('/dev/sda2', 'xfs', '/home'), result) + + def test_parse_mount_info_no_opts_arg(self): + result = util.parse_mount_info('/home', MOUNT_INFO, LOG, False) + self.assertEqual(('/dev/sda2', 'xfs', '/home'), result) + + def test_parse_mount_info_with_opts(self): + result = util.parse_mount_info('/', MOUNT_INFO, LOG, True) + self.assertEqual(('/dev/sda1', 'btrfs', '/', 'ro,relatime'), result) + + @mock.patch('cloudinit.util.get_mount_info') + def test_mount_is_rw(self, m_mount_info): + m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'rw,relatime') + is_rw = util.mount_is_read_write('/') + self.assertEqual(is_rw, True) + + @mock.patch('cloudinit.util.get_mount_info') + def test_mount_is_ro(self, m_mount_info): + m_mount_info.return_value = ('/dev/sda1', 'btrfs', '/', 'ro,relatime') + is_rw = util.mount_is_read_write('/') + self.assertEqual(is_rw, False) + + +class TestUptime(CiTestCase): + @mock.patch('cloudinit.util.boottime') + @mock.patch('cloudinit.util.os.path.exists') + @mock.patch('cloudinit.util.time.time') + def test_uptime_non_linux_path(self, m_time, m_exists, m_boottime): + boottime = 1000.0 + uptime = 10.0 + m_boottime.return_value = boottime + m_time.return_value = boottime + uptime + m_exists.return_value = False + result = util.uptime() + self.assertEqual(str(uptime), result) + + +class TestShellify(CiTestCase): + def test_input_dict_raises_type_error(self): + self.assertRaisesRegex( + TypeError, + 'Input.*was.*dict.*xpected', + util.shellify, + {'mykey': 'myval'}, + ) + def test_input_str_raises_type_error(self): + self.assertRaisesRegex( + TypeError, 'Input.*was.*str.*xpected', util.shellify, "foobar" + ) -class FakeSelinux(object): + def test_value_with_int_raises_type_error(self): + self.assertRaisesRegex( + TypeError, 'shellify.*int', util.shellify, ["foo", 1] + ) + + def test_supports_strings_and_lists(self): + self.assertEqual( + '\n'.join( + [ + "#!/bin/sh", + "echo hi mom", + "'echo' 'hi dad'", + "'echo' 'hi' 'sis'", + "", + ] + ), + util.shellify( + ["echo hi mom", ["echo", "hi dad"], ('echo', 'hi', 'sis')] + ), + ) + + def test_supports_comments(self): + self.assertEqual( + '\n'.join(["#!/bin/sh", "echo start", "echo end", ""]), + util.shellify(["echo start", None, "echo end"]), + ) + + +class TestGetHostnameFqdn(CiTestCase): + def test_get_hostname_fqdn_from_only_cfg_fqdn(self): + """When cfg only has the fqdn key, derive hostname and fqdn from it.""" + hostname, fqdn = util.get_hostname_fqdn( + cfg={'fqdn': 'myhost.domain.com'}, cloud=None + ) + self.assertEqual('myhost', hostname) + self.assertEqual('myhost.domain.com', fqdn) + + def test_get_hostname_fqdn_from_cfg_fqdn_and_hostname(self): + """When cfg has both fqdn and hostname keys, return them.""" + hostname, fqdn = util.get_hostname_fqdn( + cfg={'fqdn': 'myhost.domain.com', 'hostname': 'other'}, cloud=None + ) + self.assertEqual('other', hostname) + self.assertEqual('myhost.domain.com', fqdn) + + def test_get_hostname_fqdn_from_cfg_hostname_with_domain(self): + """When cfg has only hostname key which represents a fqdn, use that.""" + hostname, fqdn = util.get_hostname_fqdn( + cfg={'hostname': 'myhost.domain.com'}, cloud=None + ) + self.assertEqual('myhost', hostname) + self.assertEqual('myhost.domain.com', fqdn) + + def test_get_hostname_fqdn_from_cfg_hostname_without_domain(self): + """When cfg has a hostname without a '.' query cloud.get_hostname.""" + mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') + hostname, fqdn = util.get_hostname_fqdn( + cfg={'hostname': 'myhost'}, cloud=mycloud + ) + self.assertEqual('myhost', hostname) + self.assertEqual('cloudhost.mycloud.com', fqdn) + self.assertEqual( + [{'fqdn': True, 'metadata_only': False}], mycloud.calls + ) + + def test_get_hostname_fqdn_from_without_fqdn_or_hostname(self): + """When cfg has neither hostname nor fqdn cloud.get_hostname.""" + mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') + hostname, fqdn = util.get_hostname_fqdn(cfg={}, cloud=mycloud) + self.assertEqual('cloudhost', hostname) + self.assertEqual('cloudhost.mycloud.com', fqdn) + self.assertEqual( + [{'fqdn': True, 'metadata_only': False}, {'metadata_only': False}], + mycloud.calls, + ) + + def test_get_hostname_fqdn_from_passes_metadata_only_to_cloud(self): + """Calls to cloud.get_hostname pass the metadata_only parameter.""" + mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') + _hn, _fqdn = util.get_hostname_fqdn( + cfg={}, cloud=mycloud, metadata_only=True + ) + self.assertEqual( + [{'fqdn': True, 'metadata_only': True}, {'metadata_only': True}], + mycloud.calls, + ) + + +class TestBlkid(CiTestCase): + ids = { + "id01": "1111-1111", + "id02": "22222222-2222", + "id03": "33333333-3333", + "id04": "44444444-4444", + "id05": "55555555-5555-5555-5555-555555555555", + "id06": "66666666-6666-6666-6666-666666666666", + "id07": "52894610484658920398", + "id08": "86753098675309867530", + "id09": "99999999-9999-9999-9999-999999999999", + } + + blkid_out = dedent( + """\ + /dev/loop0: TYPE="squashfs" + /dev/loop1: TYPE="squashfs" + /dev/loop2: TYPE="squashfs" + /dev/loop3: TYPE="squashfs" + /dev/sda1: UUID="{id01}" TYPE="vfat" PARTUUID="{id02}" + /dev/sda2: UUID="{id03}" TYPE="ext4" PARTUUID="{id04}" + /dev/sda3: UUID="{id05}" TYPE="ext4" PARTUUID="{id06}" + /dev/sda4: LABEL="default" UUID="{id07}" UUID_SUB="{id08}" """ + """TYPE="zfs_member" PARTUUID="{id09}" + /dev/loop4: TYPE="squashfs" + """ + ) + + maxDiff = None + + def _get_expected(self): + return { + "/dev/loop0": {"DEVNAME": "/dev/loop0", "TYPE": "squashfs"}, + "/dev/loop1": {"DEVNAME": "/dev/loop1", "TYPE": "squashfs"}, + "/dev/loop2": {"DEVNAME": "/dev/loop2", "TYPE": "squashfs"}, + "/dev/loop3": {"DEVNAME": "/dev/loop3", "TYPE": "squashfs"}, + "/dev/loop4": {"DEVNAME": "/dev/loop4", "TYPE": "squashfs"}, + "/dev/sda1": { + "DEVNAME": "/dev/sda1", + "TYPE": "vfat", + "UUID": self.ids["id01"], + "PARTUUID": self.ids["id02"], + }, + "/dev/sda2": { + "DEVNAME": "/dev/sda2", + "TYPE": "ext4", + "UUID": self.ids["id03"], + "PARTUUID": self.ids["id04"], + }, + "/dev/sda3": { + "DEVNAME": "/dev/sda3", + "TYPE": "ext4", + "UUID": self.ids["id05"], + "PARTUUID": self.ids["id06"], + }, + "/dev/sda4": { + "DEVNAME": "/dev/sda4", + "TYPE": "zfs_member", + "LABEL": "default", + "UUID": self.ids["id07"], + "UUID_SUB": self.ids["id08"], + "PARTUUID": self.ids["id09"], + }, + } + + @mock.patch("cloudinit.subp.subp") + def test_functional_blkid(self, m_subp): + m_subp.return_value = (self.blkid_out.format(**self.ids), "") + self.assertEqual(self._get_expected(), util.blkid()) + m_subp.assert_called_with( + ["blkid", "-o", "full"], capture=True, decode="replace" + ) + + @mock.patch("cloudinit.subp.subp") + def test_blkid_no_cache_uses_no_cache(self, m_subp): + """blkid should turn off cache if disable_cache is true.""" + m_subp.return_value = (self.blkid_out.format(**self.ids), "") + self.assertEqual(self._get_expected(), util.blkid(disable_cache=True)) + m_subp.assert_called_with( + ["blkid", "-o", "full", "-c", "/dev/null"], + capture=True, + decode="replace", + ) + + +@mock.patch('cloudinit.subp.subp') +class TestUdevadmSettle(CiTestCase): + def test_with_no_params(self, m_subp): + """called with no parameters.""" + util.udevadm_settle() + m_subp.called_once_with(mock.call(['udevadm', 'settle'])) + + def test_with_exists_and_not_exists(self, m_subp): + """with exists=file where file does not exist should invoke subp.""" + mydev = self.tmp_path("mydev") + util.udevadm_settle(exists=mydev) + m_subp.called_once_with( + ['udevadm', 'settle', '--exit-if-exists=%s' % mydev] + ) + + def test_with_exists_and_file_exists(self, m_subp): + """with exists=file where file does exist should not invoke subp.""" + mydev = self.tmp_path("mydev") + util.write_file(mydev, "foo\n") + util.udevadm_settle(exists=mydev) + self.assertIsNone(m_subp.call_args) + + def test_with_timeout_int(self, m_subp): + """timeout can be an integer.""" + timeout = 9 + util.udevadm_settle(timeout=timeout) + m_subp.called_once_with( + ['udevadm', 'settle', '--timeout=%s' % timeout] + ) + + def test_with_timeout_string(self, m_subp): + """timeout can be a string.""" + timeout = "555" + util.udevadm_settle(timeout=timeout) + m_subp.assert_called_once_with( + ['udevadm', 'settle', '--timeout=%s' % timeout] + ) + + def test_with_exists_and_timeout(self, m_subp): + """test call with both exists and timeout.""" + mydev = self.tmp_path("mydev") + timeout = "3" + util.udevadm_settle(exists=mydev) + m_subp.called_once_with( + [ + 'udevadm', + 'settle', + '--exit-if-exists=%s' % mydev, + '--timeout=%s' % timeout, + ] + ) + + def test_subp_exception_raises_to_caller(self, m_subp): + m_subp.side_effect = subp.ProcessExecutionError("BOOM") + self.assertRaises(subp.ProcessExecutionError, util.udevadm_settle) + + +@mock.patch('os.path.exists') +class TestGetLinuxDistro(CiTestCase): + def setUp(self): + # python2 has no lru_cache, and therefore, no cache_clear() + if hasattr(util.get_linux_distro, "cache_clear"): + util.get_linux_distro.cache_clear() + + @classmethod + def os_release_exists(self, path): + """Side effect function""" + if path == '/etc/os-release': + return 1 + + @classmethod + def redhat_release_exists(self, path): + """Side effect function""" + if path == '/etc/redhat-release': + return 1 + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists): + """Verify we get the correct name if the os-release file has + the distro name in quotes""" + m_os_release.return_value = OS_RELEASE_SLES + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('sles', '12.3', platform.machine()), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_distro_bare_name(self, m_os_release, m_path_exists): + """Verify we get the correct name if the os-release file does not + have the distro name in quotes""" + m_os_release.return_value = OS_RELEASE_UBUNTU + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('ubuntu', '16.04', 'xenial'), dist) + + @mock.patch('platform.system') + @mock.patch('platform.release') + @mock.patch('cloudinit.util._parse_redhat_release') + def test_get_linux_freebsd( + self, + m_parse_redhat_release, + m_platform_release, + m_platform_system, + m_path_exists, + ): + """Verify we get the correct name and release name on FreeBSD.""" + m_path_exists.return_value = False + m_platform_release.return_value = '12.0-RELEASE-p10' + m_platform_system.return_value = 'FreeBSD' + m_parse_redhat_release.return_value = {} + util.is_BSD.cache_clear() + dist = util.get_linux_distro() + self.assertEqual(('freebsd', '12.0-RELEASE-p10', ''), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_centos6(self, m_os_release, m_path_exists): + """Verify we get the correct name and release name on CentOS 6.""" + m_os_release.return_value = REDHAT_RELEASE_CENTOS_6 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('centos', '6.10', 'Final'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_centos7_redhat_release(self, m_os_release, m_exists): + """Verify the correct release info on CentOS 7 without os-release.""" + m_os_release.return_value = REDHAT_RELEASE_CENTOS_7 + m_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('centos', '7.5.1804', 'Core'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_redhat7_osrelease(self, m_os_release, m_path_exists): + """Verify redhat 7 read from os-release.""" + m_os_release.return_value = OS_RELEASE_REDHAT_7 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('redhat', '7.5', 'Maipo'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_redhat7_rhrelease(self, m_os_release, m_path_exists): + """Verify redhat 7 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_REDHAT_7 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('redhat', '7.5', 'Maipo'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_redhat6_rhrelease(self, m_os_release, m_path_exists): + """Verify redhat 6 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_REDHAT_6 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('redhat', '6.10', 'Santiago'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_copr_centos(self, m_os_release, m_path_exists): + """Verify we get the correct name and release name on COPR CentOS.""" + m_os_release.return_value = OS_RELEASE_CENTOS + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('centos', '7', 'Core'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_almalinux8_rhrelease(self, m_os_release, m_path_exists): + """Verify almalinux 8 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_ALMALINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('almalinux', '8.3', 'Purple Manul'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_almalinux8_osrelease(self, m_os_release, m_path_exists): + """Verify almalinux 8 read from os-release.""" + m_os_release.return_value = OS_RELEASE_ALMALINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('almalinux', '8.3', 'Purple Manul'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_eurolinux7_rhrelease(self, m_os_release, m_path_exists): + """Verify eurolinux 7 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_7 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('eurolinux', '7.9', 'Minsk'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_eurolinux7_osrelease(self, m_os_release, m_path_exists): + """Verify eurolinux 7 read from os-release.""" + m_os_release.return_value = OS_RELEASE_EUROLINUX_7 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('eurolinux', '7.9', 'Minsk'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_eurolinux8_rhrelease(self, m_os_release, m_path_exists): + """Verify eurolinux 8 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('eurolinux', '8.4', 'Vaduz'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_eurolinux8_osrelease(self, m_os_release, m_path_exists): + """Verify eurolinux 8 read from os-release.""" + m_os_release.return_value = OS_RELEASE_EUROLINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('eurolinux', '8.4', 'Vaduz'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_rocky8_rhrelease(self, m_os_release, m_path_exists): + """Verify rocky linux 8 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_ROCKY_8 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('rocky', '8.3', 'Green Obsidian'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_rocky8_osrelease(self, m_os_release, m_path_exists): + """Verify rocky linux 8 read from os-release.""" + m_os_release.return_value = OS_RELEASE_ROCKY_8 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('rocky', '8.3', 'Green Obsidian'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_virtuozzo8_rhrelease(self, m_os_release, m_path_exists): + """Verify virtuozzo linux 8 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_VIRTUOZZO_8 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('virtuozzo', '8', 'Virtuozzo Linux'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_virtuozzo8_osrelease(self, m_os_release, m_path_exists): + """Verify virtuozzo linux 8 read from os-release.""" + m_os_release.return_value = OS_RELEASE_VIRTUOZZO_8 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('virtuozzo', '8', 'Virtuozzo Linux'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_cloud8_rhrelease(self, m_os_release, m_path_exists): + """Verify cloudlinux 8 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_CLOUDLINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('cloudlinux', '8.4', 'Valery Rozhdestvensky'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_cloud8_osrelease(self, m_os_release, m_path_exists): + """Verify cloudlinux 8 read from os-release.""" + m_os_release.return_value = OS_RELEASE_CLOUDLINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('cloudlinux', '8.4', 'Valery Rozhdestvensky'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_debian(self, m_os_release, m_path_exists): + """Verify we get the correct name and release name on Debian.""" + m_os_release.return_value = OS_RELEASE_DEBIAN + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('debian', '9', 'stretch'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_openeuler(self, m_os_release, m_path_exists): + """Verify get the correct name and release name on Openeuler.""" + m_os_release.return_value = OS_RELEASE_OPENEULER_20 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('openEuler', '20.03', 'LTS-SP2'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_opensuse(self, m_os_release, m_path_exists): + """Verify we get the correct name and machine arch on openSUSE + prior to openSUSE Leap 15. + """ + m_os_release.return_value = OS_RELEASE_OPENSUSE + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('opensuse', '42.3', platform.machine()), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_opensuse_l15(self, m_os_release, m_path_exists): + """Verify we get the correct name and machine arch on openSUSE + for openSUSE Leap 15.0 and later. + """ + m_os_release.return_value = OS_RELEASE_OPENSUSE_L15 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('opensuse-leap', '15.0', platform.machine()), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_opensuse_tw(self, m_os_release, m_path_exists): + """Verify we get the correct name and machine arch on openSUSE + for openSUSE Tumbleweed + """ + m_os_release.return_value = OS_RELEASE_OPENSUSE_TW + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual( + ('opensuse-tumbleweed', '20180920', platform.machine()), dist + ) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_photon_os_release(self, m_os_release, m_path_exists): + """Verify we get the correct name and machine arch on PhotonOS""" + m_os_release.return_value = OS_RELEASE_PHOTON + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('photon', '4.0', 'VMware Photon OS/Linux'), dist) + + @mock.patch('platform.system') + @mock.patch('platform.dist', create=True) + def test_get_linux_distro_no_data( + self, m_platform_dist, m_platform_system, m_path_exists + ): + """Verify we get no information if os-release does not exist""" + m_platform_dist.return_value = ('', '', '') + m_platform_system.return_value = "Linux" + m_path_exists.return_value = 0 + dist = util.get_linux_distro() + self.assertEqual(('', '', ''), dist) + + @mock.patch('platform.system') + @mock.patch('platform.dist', create=True) + def test_get_linux_distro_no_impl( + self, m_platform_dist, m_platform_system, m_path_exists + ): + """Verify we get an empty tuple when no information exists and + Exceptions are not propagated""" + m_platform_dist.side_effect = Exception() + m_platform_system.return_value = "Linux" + m_path_exists.return_value = 0 + dist = util.get_linux_distro() + self.assertEqual(('', '', ''), dist) + + @mock.patch('platform.system') + @mock.patch('platform.dist', create=True) + def test_get_linux_distro_plat_data( + self, m_platform_dist, m_platform_system, m_path_exists + ): + """Verify we get the correct platform information""" + m_platform_dist.return_value = ('foo', '1.1', 'aarch64') + m_platform_system.return_value = "Linux" + m_path_exists.return_value = 0 + dist = util.get_linux_distro() + self.assertEqual(('foo', '1.1', 'aarch64'), dist) + + +class TestGetVariant: + @pytest.mark.parametrize( + 'info, expected_variant', + [ + ({'system': 'Linux', 'dist': ('almalinux',)}, 'almalinux'), + ({'system': 'linux', 'dist': ('alpine',)}, 'alpine'), + ({'system': 'linux', 'dist': ('arch',)}, 'arch'), + ({'system': 'linux', 'dist': ('centos',)}, 'centos'), + ({'system': 'linux', 'dist': ('cloudlinux',)}, 'cloudlinux'), + ({'system': 'linux', 'dist': ('debian',)}, 'debian'), + ({'system': 'linux', 'dist': ('eurolinux',)}, 'eurolinux'), + ({'system': 'linux', 'dist': ('fedora',)}, 'fedora'), + ({'system': 'linux', 'dist': ('openEuler',)}, 'openeuler'), + ({'system': 'linux', 'dist': ('photon',)}, 'photon'), + ({'system': 'linux', 'dist': ('rhel',)}, 'rhel'), + ({'system': 'linux', 'dist': ('rocky',)}, 'rocky'), + ({'system': 'linux', 'dist': ('suse',)}, 'suse'), + ({'system': 'linux', 'dist': ('virtuozzo',)}, 'virtuozzo'), + ({'system': 'linux', 'dist': ('ubuntu',)}, 'ubuntu'), + ({'system': 'linux', 'dist': ('linuxmint',)}, 'ubuntu'), + ({'system': 'linux', 'dist': ('mint',)}, 'ubuntu'), + ({'system': 'linux', 'dist': ('redhat',)}, 'rhel'), + ({'system': 'linux', 'dist': ('opensuse',)}, 'suse'), + ({'system': 'linux', 'dist': ('opensuse-tumbleweed',)}, 'suse'), + ({'system': 'linux', 'dist': ('opensuse-leap',)}, 'suse'), + ({'system': 'linux', 'dist': ('sles',)}, 'suse'), + ({'system': 'linux', 'dist': ('sle_hpc',)}, 'suse'), + ({'system': 'linux', 'dist': ('my_distro',)}, 'linux'), + ({'system': 'Windows', 'dist': ('dontcare',)}, 'windows'), + ({'system': 'Darwin', 'dist': ('dontcare',)}, 'darwin'), + ({'system': 'Freebsd', 'dist': ('dontcare',)}, 'freebsd'), + ({'system': 'Netbsd', 'dist': ('dontcare',)}, 'netbsd'), + ({'system': 'Openbsd', 'dist': ('dontcare',)}, 'openbsd'), + ({'system': 'Dragonfly', 'dist': ('dontcare',)}, 'dragonfly'), + ], + ) + def test_get_variant(self, info, expected_variant): + """Verify we get the correct variant name""" + assert util._get_variant(info) == expected_variant + + +class TestJsonDumps(CiTestCase): + def test_is_str(self): + """json_dumps should return a string.""" + self.assertTrue(isinstance(util.json_dumps({'abc': '123'}), str)) + + def test_utf8(self): + smiley = '\\ud83d\\ude03' + self.assertEqual( + {'smiley': smiley}, json.loads(util.json_dumps({'smiley': smiley})) + ) + + def test_non_utf8(self): + blob = b'\xba\x03Qx-#y\xea' + self.assertEqual( + {'blob': 'ci-b64:' + base64.b64encode(blob).decode('utf-8')}, + json.loads(util.json_dumps({'blob': blob})), + ) + + +@mock.patch('os.path.exists') +class TestIsLXD(CiTestCase): + def test_is_lxd_true_on_sock_device(self, m_exists): + """When lxd's /dev/lxd/sock exists, is_lxd returns true.""" + m_exists.return_value = True + self.assertTrue(util.is_lxd()) + m_exists.assert_called_once_with('/dev/lxd/sock') + def test_is_lxd_false_when_sock_device_absent(self, m_exists): + """When lxd's /dev/lxd/sock is absent, is_lxd returns false.""" + m_exists.return_value = False + self.assertFalse(util.is_lxd()) + m_exists.assert_called_once_with('/dev/lxd/sock') + + +class TestReadCcFromCmdline: + @pytest.mark.parametrize( + "cmdline,expected_cfg", + [ + # Return None if cmdline has no cc:end_cc content. + (CiTestCase.random_string(), None), + # Return None if YAML content is empty string. + ('foo cc: end_cc bar', None), + # Return expected dictionary without trailing end_cc marker. + ('foo cc: ssh_pwauth: true', {'ssh_pwauth': True}), + # Return expected dictionary w escaped newline and no end_cc. + ('foo cc: ssh_pwauth: true\\n', {'ssh_pwauth': True}), + # Return expected dictionary of yaml between cc: and end_cc. + ('foo cc: ssh_pwauth: true end_cc bar', {'ssh_pwauth': True}), + # Return dict with list value w escaped newline, no end_cc. + ( + 'cc: ssh_import_id: [smoser, kirkland]\\n', + {'ssh_import_id': ['smoser', 'kirkland']}, + ), + # Parse urlencoded brackets in yaml content. + ( + 'cc: ssh_import_id: %5Bsmoser, kirkland%5D end_cc', + {'ssh_import_id': ['smoser', 'kirkland']}, + ), + # Parse complete urlencoded yaml content. + ( + 'cc: ssh_import_id%3A%20%5Buser1%2C%20user2%5D end_cc', + {'ssh_import_id': ['user1', 'user2']}, + ), + # Parse nested dictionary in yaml content. + ( + 'cc: ntp: {enabled: true, ntp_client: myclient} end_cc', + {'ntp': {'enabled': True, 'ntp_client': 'myclient'}}, + ), + # Parse single mapping value in yaml content. + ('cc: ssh_import_id: smoser end_cc', {'ssh_import_id': 'smoser'}), + # Parse multiline content with multiple mapping and nested lists. + ( + ( + 'cc: ssh_import_id: [smoser, bob]\\n' + 'runcmd: [ [ ls, -l ], echo hi ] end_cc' + ), + { + 'ssh_import_id': ['smoser', 'bob'], + 'runcmd': [['ls', '-l'], 'echo hi'], + }, + ), + # Parse multiline encoded content w/ mappings and nested lists. + ( + ( + 'cc: ssh_import_id: %5Bsmoser, bob%5D\\n' + 'runcmd: [ [ ls, -l ], echo hi ] end_cc' + ), + { + 'ssh_import_id': ['smoser', 'bob'], + 'runcmd': [['ls', '-l'], 'echo hi'], + }, + ), + # test encoded escaped newlines work. + # + # unquote(encoded_content) + # 'ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ]' + ( + ( + 'cc: ' + + ( + 'ssh_import_id%3A%20%5Bsmoser%2C%20bob%5D%5Cn' + 'runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%2C' + '%20echo%20hi%20%5D' + ) + + ' end_cc' + ), + { + 'ssh_import_id': ['smoser', 'bob'], + 'runcmd': [['ls', '-l'], 'echo hi'], + }, + ), + # test encoded newlines work. + # + # unquote(encoded_content) + # 'ssh_import_id: [smoser, bob]\nruncmd: [ [ ls, -l ], echo hi ]' + ( + ( + "cc: " + + ( + 'ssh_import_id%3A%20%5Bsmoser%2C%20bob%5D%0A' + 'runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%2C' + '%20echo%20hi%20%5D' + ) + + ' end_cc' + ), + { + 'ssh_import_id': ['smoser', 'bob'], + 'runcmd': [['ls', '-l'], 'echo hi'], + }, + ), + # Parse and merge multiple yaml content sections. + ( + ( + 'cc:ssh_import_id: [smoser, bob] end_cc ' + 'cc: runcmd: [ [ ls, -l ] ] end_cc' + ), + {'ssh_import_id': ['smoser', 'bob'], 'runcmd': [['ls', '-l']]}, + ), + # Parse and merge multiple encoded yaml content sections. + ( + ( + 'cc:ssh_import_id%3A%20%5Bsmoser%5D end_cc ' + 'cc:runcmd%3A%20%5B%20%5B%20ls%2C%20-l%20%5D%20%5D end_cc' + ), + {'ssh_import_id': ['smoser'], 'runcmd': [['ls', '-l']]}, + ), + ], + ) + def test_read_conf_from_cmdline_config(self, expected_cfg, cmdline): + assert expected_cfg == util.read_conf_from_cmdline(cmdline=cmdline) + + +class TestMountCb: + """Tests for ``util.mount_cb``. + + These tests consider the "unit" under test to be ``util.mount_cb`` and + ``util.unmounter``, which is only used by ``mount_cb``. + + TODO: Test default mtype determination + TODO: Test the if/else branch that actually performs the mounting operation + """ + + @pytest.yield_fixture + def already_mounted_device_and_mountdict(self): + """Mock an already-mounted device, and yield (device, mount dict)""" + device = "/dev/fake0" + mountpoint = "/mnt/fake" + with mock.patch("cloudinit.util.subp.subp"): + with mock.patch("cloudinit.util.mounts") as m_mounts: + mounts = {device: {"mountpoint": mountpoint}} + m_mounts.return_value = mounts + yield device, mounts[device] + + @pytest.fixture + def already_mounted_device(self, already_mounted_device_and_mountdict): + """already_mounted_device_and_mountdict, but return only the device""" + return already_mounted_device_and_mountdict[0] + + @pytest.mark.parametrize( + "mtype,expected", + [ + # While the filesystem is called iso9660, the mount type is cd9660 + ("iso9660", "cd9660"), + # vfat is generally called "msdos" on BSD + ("vfat", "msdos"), + # judging from man pages, only FreeBSD has this alias + ("msdosfs", "msdos"), + # Test happy path + ("ufs", "ufs"), + ], + ) + @mock.patch("cloudinit.util.is_Linux", autospec=True) + @mock.patch("cloudinit.util.is_BSD", autospec=True) + @mock.patch("cloudinit.util.subp.subp") + @mock.patch("cloudinit.temp_utils.tempdir", autospec=True) + def test_normalize_mtype_on_bsd( + self, m_tmpdir, m_subp, m_is_BSD, m_is_Linux, mtype, expected + ): + m_is_BSD.return_value = True + m_is_Linux.return_value = False + m_tmpdir.return_value.__enter__ = mock.Mock( + autospec=True, return_value="/tmp/fake" + ) + m_tmpdir.return_value.__exit__ = mock.Mock( + autospec=True, return_value=True + ) + callback = mock.Mock(autospec=True) + + util.mount_cb('/dev/fake0', callback, mtype=mtype) + assert ( + mock.call( + [ + "mount", + "-o", + "ro", + "-t", + expected, + "/dev/fake0", + "/tmp/fake", + ], + update_env=None, + ) + in m_subp.call_args_list + ) + + @pytest.mark.parametrize("invalid_mtype", [int(0), float(0.0), dict()]) + def test_typeerror_raised_for_invalid_mtype(self, invalid_mtype): + with pytest.raises(TypeError): + util.mount_cb(mock.Mock(), mock.Mock(), mtype=invalid_mtype) + + @mock.patch("cloudinit.util.subp.subp") + def test_already_mounted_does_not_mount_or_umount_anything( + self, m_subp, already_mounted_device + ): + util.mount_cb(already_mounted_device, mock.Mock()) + + assert 0 == m_subp.call_count + + @pytest.mark.parametrize("trailing_slash_in_mounts", ["/", ""]) + def test_already_mounted_calls_callback( + self, trailing_slash_in_mounts, already_mounted_device_and_mountdict + ): + device, mount_dict = already_mounted_device_and_mountdict + mountpoint = mount_dict["mountpoint"] + mount_dict["mountpoint"] += trailing_slash_in_mounts + + callback = mock.Mock() + util.mount_cb(device, callback) + + # The mountpoint passed to callback should always have a trailing + # slash, regardless of the input + assert [mock.call(mountpoint + "/")] == callback.call_args_list + + def test_already_mounted_calls_callback_with_data( + self, already_mounted_device + ): + callback = mock.Mock() + util.mount_cb( + already_mounted_device, callback, data=mock.sentinel.data + ) + + assert [ + mock.call(mock.ANY, mock.sentinel.data) + ] == callback.call_args_list + + +@mock.patch("cloudinit.util.write_file") +class TestEnsureFile: + """Tests for ``cloudinit.util.ensure_file``.""" + + def test_parameters_passed_through(self, m_write_file): + """Test the parameters in the signature are passed to write_file.""" + util.ensure_file( + mock.sentinel.path, + mode=mock.sentinel.mode, + preserve_mode=mock.sentinel.preserve_mode, + ) + + assert 1 == m_write_file.call_count + args, kwargs = m_write_file.call_args + assert (mock.sentinel.path,) == args + assert mock.sentinel.mode == kwargs["mode"] + assert mock.sentinel.preserve_mode == kwargs["preserve_mode"] + + @pytest.mark.parametrize( + "kwarg,expected", + [ + # Files should be world-readable by default + ("mode", 0o644), + # The previous behaviour of not preserving mode should be retained + ("preserve_mode", False), + ], + ) + def test_defaults(self, m_write_file, kwarg, expected): + """Test that ensure_file defaults appropriately.""" + util.ensure_file(mock.sentinel.path) + + assert 1 == m_write_file.call_count + _args, kwargs = m_write_file.call_args + assert expected == kwargs[kwarg] + + def test_static_parameters_are_passed(self, m_write_file): + """Test that the static write_files parameters are passed correctly.""" + util.ensure_file(mock.sentinel.path) + + assert 1 == m_write_file.call_count + _args, kwargs = m_write_file.call_args + assert "" == kwargs["content"] + assert "ab" == kwargs["omode"] + + +@mock.patch("cloudinit.util.grp.getgrnam") +@mock.patch("cloudinit.util.os.setgid") +@mock.patch("cloudinit.util.os.umask") +class TestRedirectOutputPreexecFn: + """This tests specifically the preexec_fn used in redirect_output.""" + + @pytest.fixture(params=["outfmt", "errfmt"]) + def preexec_fn(self, request): + """A fixture to gather the preexec_fn used by redirect_output. + + This enables simpler direct testing of it, and parameterises any tests + using it to cover both the stdout and stderr code paths. + """ + test_string = "| piped output to invoke subprocess" + if request.param == "outfmt": + args = (test_string, None) + elif request.param == "errfmt": + args = (None, test_string) + with mock.patch("cloudinit.util.subprocess.Popen") as m_popen: + util.redirect_output(*args) + + assert 1 == m_popen.call_count + _args, kwargs = m_popen.call_args + assert "preexec_fn" in kwargs, "preexec_fn not passed to Popen" + return kwargs["preexec_fn"] + + def test_preexec_fn_sets_umask( + self, m_os_umask, _m_setgid, _m_getgrnam, preexec_fn + ): + """preexec_fn should set a mask that avoids world-readable files.""" + preexec_fn() + + assert [mock.call(0o037)] == m_os_umask.call_args_list + + def test_preexec_fn_sets_group_id_if_adm_group_present( + self, _m_os_umask, m_setgid, m_getgrnam, preexec_fn + ): + """We should setgrp to adm if present, so files are owned by them.""" + fake_group = mock.Mock(gr_gid=mock.sentinel.gr_gid) + m_getgrnam.return_value = fake_group + + preexec_fn() + + assert [mock.call("adm")] == m_getgrnam.call_args_list + assert [mock.call(mock.sentinel.gr_gid)] == m_setgid.call_args_list + + def test_preexec_fn_handles_absent_adm_group_gracefully( + self, _m_os_umask, m_setgid, m_getgrnam, preexec_fn + ): + """We should handle an absent adm group gracefully.""" + m_getgrnam.side_effect = KeyError("getgrnam(): name not found: 'adm'") + + preexec_fn() + + assert 0 == m_setgid.call_count + + +class FakeSelinux(object): def __init__(self, match_what): self.match_what = match_what self.restored = [] @@ -175,8 +1463,9 @@ class TestWriteFile(helpers.TestCase): fake_se = FakeSelinux(my_file) - with mock.patch.object(importer, 'import_module', - return_value=fake_se) as mockobj: + with mock.patch.object( + importer, 'import_module', return_value=fake_se + ) as mockobj: with util.SeLinuxGuard(my_file) as is_on: self.assertTrue(is_on) @@ -261,8 +1550,9 @@ class TestKeyValStrings(helpers.TestCase): class TestGetCmdline(helpers.TestCase): def test_cmdline_reads_debug_env(self): - with mock.patch.dict("os.environ", - values={'DEBUG_PROC_CMDLINE': 'abcd 123'}): + with mock.patch.dict( + "os.environ", values={'DEBUG_PROC_CMDLINE': 'abcd 123'} + ): ret = util.get_cmdline() self.assertEqual("abcd 123", ret) @@ -279,52 +1569,68 @@ class TestLoadYaml(helpers.CiTestCase): '''Any unallowed types result in returning default; log the issue.''' # for now, anything not in the allowed list just returns the default. myyaml = yaml.dump({'1': "one"}) - self.assertEqual(util.load_yaml(blob=myyaml, - default=self.mydefault, - allowed=(str,)), - self.mydefault) + self.assertEqual( + util.load_yaml( + blob=myyaml, default=self.mydefault, allowed=(str,) + ), + self.mydefault, + ) regex = re.compile( r'Yaml load allows \(<(class|type) \'str\'>,\) root types, but' - r' got dict') - self.assertTrue(regex.search(self.logs.getvalue()), - msg='Missing expected yaml load error') + r' got dict' + ) + self.assertTrue( + regex.search(self.logs.getvalue()), + msg='Missing expected yaml load error', + ) def test_bogus_scan_error_returns_default(self): '''On Yaml scan error, load_yaml returns the default and logs issue.''' badyaml = "1\n 2:" - self.assertEqual(util.load_yaml(blob=badyaml, - default=self.mydefault), - self.mydefault) + self.assertEqual( + util.load_yaml(blob=badyaml, default=self.mydefault), + self.mydefault, + ) self.assertIn( 'Failed loading yaml blob. Invalid format at line 2 column 3:' ' "mapping values are not allowed here', - self.logs.getvalue()) + self.logs.getvalue(), + ) def test_bogus_parse_error_returns_default(self): '''On Yaml parse error, load_yaml returns default and logs issue.''' badyaml = "{}}" - self.assertEqual(util.load_yaml(blob=badyaml, - default=self.mydefault), - self.mydefault) + self.assertEqual( + util.load_yaml(blob=badyaml, default=self.mydefault), + self.mydefault, + ) self.assertIn( 'Failed loading yaml blob. Invalid format at line 1 column 3:' " \"expected \'\', but found \'}\'", - self.logs.getvalue()) + self.logs.getvalue(), + ) def test_unsafe_types(self): # should not load complex types - unsafe_yaml = yaml.dump((1, 2, 3,)) - self.assertEqual(util.load_yaml(blob=unsafe_yaml, - default=self.mydefault), - self.mydefault) + unsafe_yaml = yaml.dump( + ( + 1, + 2, + 3, + ) + ) + self.assertEqual( + util.load_yaml(blob=unsafe_yaml, default=self.mydefault), + self.mydefault, + ) def test_python_unicode(self): # complex type of python/unicode is explicitly allowed myobj = {'1': "FOOBAR"} safe_yaml = yaml.dump(myobj) - self.assertEqual(util.load_yaml(blob=safe_yaml, - default=self.mydefault), - myobj) + self.assertEqual( + util.load_yaml(blob=safe_yaml, default=self.mydefault), myobj + ) def test_none_returns_default(self): """If yaml.load returns None, then default should be returned.""" @@ -332,13 +1638,16 @@ class TestLoadYaml(helpers.CiTestCase): mdef = self.mydefault self.assertEqual( [(b, self.mydefault) for b in blobs], - [(b, util.load_yaml(blob=b, default=mdef)) for b in blobs]) + [(b, util.load_yaml(blob=b, default=mdef)) for b in blobs], + ) class TestMountinfoParsing(helpers.ResourceUsingTestCase): def test_invalid_mountinfo(self): - line = ("20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root" - "rw,errors=remount-ro,data=ordered") + line = ( + "20 1 252:1 / / rw,relatime - ext4 /dev/mapper/vg0-root" + "rw,errors=remount-ro,data=ordered" + ) elements = line.split() for i in range(len(elements) + 1): lines = [' '.join(elements[0:i])] @@ -398,7 +1707,8 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase): m_os.path.exists.return_value = True # mock subp command from util.get_mount_info_fs_on_zpool zpool_output.return_value = ( - helpers.readResource('zpool_status_simple.txt'), '' + helpers.readResource('zpool_status_simple.txt'), + '', ) # save function return values and do asserts ret = util.get_device_info_from_zpool('vmzroot') @@ -431,7 +1741,8 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase): m_os.path.exists.return_value = True # mock subp command from util.get_mount_info_fs_on_zpool zpool_output.return_value = ( - helpers.readResource('zpool_status_simple.txt'), 'error' + helpers.readResource('zpool_status_simple.txt'), + 'error', ) # save function return values and do asserts ret = util.get_device_info_from_zpool('vmzroot') @@ -440,7 +1751,9 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase): @mock.patch('cloudinit.subp.subp') def test_parse_mount_with_ext(self, mount_out): mount_out.return_value = ( - helpers.readResource('mount_parse_ext.txt'), '') + helpers.readResource('mount_parse_ext.txt'), + '', + ) # this one is valid and exists in mount_parse_ext.txt ret = util.parse_mount('/var') self.assertEqual(('/dev/mapper/vg00-lv_var', 'ext4', '/var'), ret) @@ -457,7 +1770,9 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase): @mock.patch('cloudinit.subp.subp') def test_parse_mount_with_zfs(self, mount_out): mount_out.return_value = ( - helpers.readResource('mount_parse_zfs.txt'), '') + helpers.readResource('mount_parse_zfs.txt'), + '', + ) # this one is valid and exists in mount_parse_zfs.txt ret = util.parse_mount('/var') self.assertEqual(('vmzroot/ROOT/freebsd/var', 'zfs', '/var'), ret) @@ -470,20 +1785,21 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase): class TestIsX86(helpers.CiTestCase): - def test_is_x86_matches_x86_types(self): """is_x86 returns True if CPU architecture matches.""" matched_arches = ['x86_64', 'i386', 'i586', 'i686'] for arch in matched_arches: self.assertTrue( - util.is_x86(arch), 'Expected is_x86 for arch "%s"' % arch) + util.is_x86(arch), 'Expected is_x86 for arch "%s"' % arch + ) def test_is_x86_unmatched_types(self): """is_x86 returns Fale on non-intel x86 architectures.""" unmatched_arches = ['ia64', '9000/800', 'arm64v71'] for arch in unmatched_arches: self.assertFalse( - util.is_x86(arch), 'Expected not is_x86 for arch "%s"' % arch) + util.is_x86(arch), 'Expected not is_x86 for arch "%s"' % arch + ) @mock.patch('cloudinit.util.os.uname') def test_is_x86_calls_uname_for_architecture(self, m_uname): @@ -493,7 +1809,6 @@ class TestIsX86(helpers.CiTestCase): class TestGetConfigLogfiles(helpers.CiTestCase): - def test_empty_cfg_returns_empty_list(self): """An empty config passed to get_config_logfiles returns empty list.""" self.assertEqual([], util.get_config_logfiles(None)) @@ -502,36 +1817,53 @@ class TestGetConfigLogfiles(helpers.CiTestCase): def test_default_log_file_present(self): """When default_log_file is set get_config_logfiles finds it.""" self.assertEqual( - ['/my.log'], - util.get_config_logfiles({'def_log_file': '/my.log'})) + ['/my.log'], util.get_config_logfiles({'def_log_file': '/my.log'}) + ) def test_output_logs_parsed_when_teeing_files(self): """When output configuration is parsed when teeing files.""" self.assertEqual( ['/himom.log', '/my.log'], - sorted(util.get_config_logfiles({ - 'def_log_file': '/my.log', - 'output': {'all': '|tee -a /himom.log'}}))) + sorted( + util.get_config_logfiles( + { + 'def_log_file': '/my.log', + 'output': {'all': '|tee -a /himom.log'}, + } + ) + ), + ) def test_output_logs_parsed_when_redirecting(self): """When output configuration is parsed when redirecting to a file.""" self.assertEqual( ['/my.log', '/test.log'], - sorted(util.get_config_logfiles({ - 'def_log_file': '/my.log', - 'output': {'all': '>/test.log'}}))) + sorted( + util.get_config_logfiles( + { + 'def_log_file': '/my.log', + 'output': {'all': '>/test.log'}, + } + ) + ), + ) def test_output_logs_parsed_when_appending(self): """When output configuration is parsed when appending to a file.""" self.assertEqual( ['/my.log', '/test.log'], - sorted(util.get_config_logfiles({ - 'def_log_file': '/my.log', - 'output': {'all': '>> /test.log'}}))) + sorted( + util.get_config_logfiles( + { + 'def_log_file': '/my.log', + 'output': {'all': '>> /test.log'}, + } + ) + ), + ) class TestMultiLog(helpers.FilesystemMockingTestCase): - def _createConsole(self, root): os.mkdir(os.path.join(root, 'dev')) open(os.path.join(root, 'dev', 'console'), 'a').close() @@ -580,8 +1912,9 @@ class TestMultiLog(helpers.FilesystemMockingTestCase): log = mock.MagicMock() logged_string = 'something very important' util.multi_log(logged_string, log=log) - self.assertEqual([((mock.ANY, logged_string), {})], - log.log.call_args_list) + self.assertEqual( + [((mock.ANY, logged_string), {})], log.log.call_args_list + ) def test_newlines_stripped_from_log_call(self): log = mock.MagicMock() @@ -602,7 +1935,6 @@ class TestMultiLog(helpers.FilesystemMockingTestCase): class TestMessageFromString(helpers.TestCase): - def test_unicode_not_messed_up(self): roundtripped = util.message_from_string('\n').as_string() self.assertNotIn('\x00', roundtripped) @@ -618,8 +1950,9 @@ class TestReadSeeded(helpers.TestCase): ud = b"userdatablob" vd = b"vendordatablob" helpers.populate_dir( - self.tmp, {'meta-data': "key1: val1", 'user-data': ud, - 'vendor-data': vd}) + self.tmp, + {'meta-data': "key1: val1", 'user-data': ud, 'vendor-data': vd}, + ) sdir = self.tmp + os.path.sep (found_md, found_ud, found_vd) = util.read_seeded(sdir) @@ -638,7 +1971,8 @@ class TestReadSeededWithoutVendorData(helpers.TestCase): ud = b"userdatablob" vd = None helpers.populate_dir( - self.tmp, {'meta-data': "key1: val1", 'user-data': ud}) + self.tmp, {'meta-data': "key1: val1", 'user-data': ud} + ) sdir = self.tmp + os.path.sep (found_md, found_ud, found_vd) = util.read_seeded(sdir) @@ -649,6 +1983,7 @@ class TestReadSeededWithoutVendorData(helpers.TestCase): class TestEncode(helpers.TestCase): """Test the encoding functions""" + def test_decode_binary_plain_text_with_hex(self): blob = 'BOOTABLE_FLAG=\x80init=/bin/systemd' text = util.decode_binary(blob) @@ -657,12 +1992,14 @@ class TestEncode(helpers.TestCase): class TestProcessExecutionError(helpers.TestCase): - template = ('{description}\n' - 'Command: {cmd}\n' - 'Exit code: {exit_code}\n' - 'Reason: {reason}\n' - 'Stdout: {stdout}\n' - 'Stderr: {stderr}') + template = ( + '{description}\n' + 'Command: {cmd}\n' + 'Exit code: {exit_code}\n' + 'Reason: {reason}\n' + 'Stdout: {stdout}\n' + 'Stderr: {stderr}' + ) empty_attr = '-' empty_description = 'Unexpected error while running command.' @@ -671,23 +2008,37 @@ class TestProcessExecutionError(helpers.TestCase): msg = 'abc\ndef' formatted = 'abc\n{0}def'.format(' ' * 4) self.assertEqual(error._indent_text(msg, indent_level=4), formatted) - self.assertEqual(error._indent_text(msg.encode(), indent_level=4), - formatted.encode()) + self.assertEqual( + error._indent_text(msg.encode(), indent_level=4), + formatted.encode(), + ) self.assertIsInstance( - error._indent_text(msg.encode()), type(msg.encode())) + error._indent_text(msg.encode()), type(msg.encode()) + ) def test_pexec_error_type(self): self.assertIsInstance(subp.ProcessExecutionError(), IOError) def test_pexec_error_empty_msgs(self): error = subp.ProcessExecutionError() - self.assertTrue(all(attr == self.empty_attr for attr in - (error.stderr, error.stdout, error.reason))) + self.assertTrue( + all( + attr == self.empty_attr + for attr in (error.stderr, error.stdout, error.reason) + ) + ) self.assertEqual(error.description, self.empty_description) - self.assertEqual(str(error), self.template.format( - description=self.empty_description, exit_code=self.empty_attr, - reason=self.empty_attr, stdout=self.empty_attr, - stderr=self.empty_attr, cmd=self.empty_attr)) + self.assertEqual( + str(error), + self.template.format( + description=self.empty_description, + exit_code=self.empty_attr, + reason=self.empty_attr, + stdout=self.empty_attr, + stderr=self.empty_attr, + cmd=self.empty_attr, + ), + ) def test_pexec_error_single_line_msgs(self): stdout_msg = 'out out' @@ -695,33 +2046,46 @@ class TestProcessExecutionError(helpers.TestCase): cmd = 'test command' exit_code = 3 error = subp.ProcessExecutionError( - stdout=stdout_msg, stderr=stderr_msg, exit_code=3, cmd=cmd) - self.assertEqual(str(error), self.template.format( - description=self.empty_description, stdout=stdout_msg, - stderr=stderr_msg, exit_code=str(exit_code), - reason=self.empty_attr, cmd=cmd)) + stdout=stdout_msg, stderr=stderr_msg, exit_code=3, cmd=cmd + ) + self.assertEqual( + str(error), + self.template.format( + description=self.empty_description, + stdout=stdout_msg, + stderr=stderr_msg, + exit_code=str(exit_code), + reason=self.empty_attr, + cmd=cmd, + ), + ) def test_pexec_error_multi_line_msgs(self): # make sure bytes is converted handled properly when formatting stdout_msg = 'multi\nline\noutput message'.encode() stderr_msg = 'multi\nline\nerror message\n\n\n' error = subp.ProcessExecutionError( - stdout=stdout_msg, stderr=stderr_msg) + stdout=stdout_msg, stderr=stderr_msg + ) self.assertEqual( str(error), - '\n'.join(( - '{description}', - 'Command: {empty_attr}', - 'Exit code: {empty_attr}', - 'Reason: {empty_attr}', - 'Stdout: multi', - ' line', - ' output message', - 'Stderr: multi', - ' line', - ' error message', - )).format(description=self.empty_description, - empty_attr=self.empty_attr)) + '\n'.join( + ( + '{description}', + 'Command: {empty_attr}', + 'Exit code: {empty_attr}', + 'Reason: {empty_attr}', + 'Stdout: multi', + ' line', + ' output message', + 'Stderr: multi', + ' line', + ' error message', + ) + ).format( + description=self.empty_description, empty_attr=self.empty_attr + ), + ) class TestSystemIsSnappy(helpers.FilesystemMockingTestCase): @@ -758,7 +2122,8 @@ class TestSystemIsSnappy(helpers.FilesystemMockingTestCase): "BOOT_IMAGE=(loop)/kernel.img root=LABEL=writable " "snap_core=core_x1.snap snap_kernel=pc-kernel_x1.snap ro " "net.ifnames=0 init=/lib/systemd/systemd console=tty1 " - "console=ttyS0 panic=-1") + "console=ttyS0 panic=-1" + ) m_cmdline.return_value = cmdline self.assertTrue(util.system_is_snappy()) self.assertTrue(m_cmdline.call_count > 0) @@ -777,8 +2142,7 @@ class TestSystemIsSnappy(helpers.FilesystemMockingTestCase): m_cmdline.return_value = 'root=/dev/sda' root_d = self.tmp_dir() content = '\n'.join(["[Foo]", "source = 'ubuntu-core'", ""]) - helpers.populate_dir( - root_d, {'etc/system-image/channel.ini': content}) + helpers.populate_dir(root_d, {'etc/system-image/channel.ini': content}) self.reRoot(root_d) self.assertTrue(util.system_is_snappy()) @@ -788,7 +2152,8 @@ class TestSystemIsSnappy(helpers.FilesystemMockingTestCase): m_cmdline.return_value = 'root=/dev/sda' root_d = self.tmp_dir() helpers.populate_dir( - root_d, {'etc/system-image/config.d/my.file': "_unused"}) + root_d, {'etc/system-image/config.d/my.file': "_unused"} + ) self.reRoot(root_d) self.assertTrue(util.system_is_snappy()) @@ -798,18 +2163,24 @@ class TestLoadShellContent(helpers.TestCase): """Shell comments should be allowed in the content.""" self.assertEqual( {'key1': 'val1', 'key2': 'val2', 'key3': 'val3 #tricky'}, - util.load_shell_content('\n'.join([ - "#top of file comment", - "key1=val1 #this is a comment", - "# second comment", - 'key2="val2" # inlin comment' - '#badkey=wark', - 'key3="val3 #tricky"', - '']))) + util.load_shell_content( + '\n'.join( + [ + "#top of file comment", + "key1=val1 #this is a comment", + "# second comment", + 'key2="val2" # inlin comment#badkey=wark', + 'key3="val3 #tricky"', + '', + ] + ) + ), + ) class TestGetProcEnv(helpers.TestCase): """test get_proc_env.""" + null = b'\x00' simple1 = b'HOME=/' simple2 = b'PATH=/bin:/sbin' @@ -824,14 +2195,19 @@ class TestGetProcEnv(helpers.TestCase): def test_non_utf8_in_environment(self, m_load_file): """env may have non utf-8 decodable content.""" content = self.null.join( - (self.bootflag, self.simple1, self.simple2, self.mixed)) + (self.bootflag, self.simple1, self.simple2, self.mixed) + ) m_load_file.return_value = content self.assertEqual( - {'BOOTABLE_FLAG': self._val_decoded(self.bootflag), - 'HOME': '/', 'PATH': '/bin:/sbin', - 'MIXED': self._val_decoded(self.mixed)}, - util.get_proc_env(1)) + { + 'BOOTABLE_FLAG': self._val_decoded(self.bootflag), + 'HOME': '/', + 'PATH': '/bin:/sbin', + 'MIXED': self._val_decoded(self.mixed), + }, + util.get_proc_env(1), + ) self.assertEqual(1, m_load_file.call_count) @mock.patch("cloudinit.util.load_file") @@ -843,7 +2219,8 @@ class TestGetProcEnv(helpers.TestCase): self.assertEqual( dict([t.split(b'=') for t in lines]), - util.get_proc_env(1, encoding=None)) + util.get_proc_env(1, encoding=None), + ) self.assertEqual(1, m_load_file.call_count) @mock.patch("cloudinit.util.load_file") @@ -852,8 +2229,8 @@ class TestGetProcEnv(helpers.TestCase): content = self.null.join((self.simple1, self.simple2)) m_load_file.return_value = content self.assertEqual( - {'HOME': '/', 'PATH': '/bin:/sbin'}, - util.get_proc_env(1)) + {'HOME': '/', 'PATH': '/bin:/sbin'}, util.get_proc_env(1) + ) self.assertEqual(1, m_load_file.call_count) @mock.patch("cloudinit.util.load_file") @@ -871,14 +2248,15 @@ class TestGetProcEnv(helpers.TestCase): self.assertEqual(my_ppid, util.get_proc_ppid(my_pid)) -class TestKernelVersion(): +class TestKernelVersion: """test kernel version function""" params = [ ('5.6.19-300.fc32.x86_64', (5, 6)), ('4.15.0-101-generic', (4, 15)), ('3.10.0-1062.12.1.vz7.131.10', (3, 10)), - ('4.18.0-144.el8.x86_64', (4, 18))] + ('4.18.0-144.el8.x86_64', (4, 18)), + ] @mock.patch('os.uname') @pytest.mark.parametrize("uname_release,expected", params) @@ -892,29 +2270,27 @@ class TestFindDevs: def test_find_devs_with(self, m_subp): m_subp.return_value = ( '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"', - '' + '', ) devlist = util.find_devs_with() assert devlist == [ - '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"'] + '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"' + ] devlist = util.find_devs_with("LABEL_FATBOOT=A_LABEL") assert devlist == [ - '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"'] + '/dev/sda1: UUID="some-uuid" TYPE="ext4" PARTUUID="some-partid"' + ] @mock.patch('cloudinit.subp.subp') def test_find_devs_with_openbsd(self, m_subp): - m_subp.return_value = ( - 'cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', '' - ) + m_subp.return_value = ('cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', '') devlist = util.find_devs_with_openbsd() assert devlist == ['/dev/cd0a', '/dev/sd1i'] @mock.patch('cloudinit.subp.subp') def test_find_devs_with_openbsd_with_criteria(self, m_subp): - m_subp.return_value = ( - 'cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', '' - ) + m_subp.return_value = ('cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', '') devlist = util.find_devs_with_openbsd(criteria="TYPE=iso9660") assert devlist == ['/dev/cd0a'] @@ -923,7 +2299,8 @@ class TestFindDevs: assert devlist == ['/dev/cd0a', '/dev/sd1i'] @pytest.mark.parametrize( - 'criteria,expected_devlist', ( + 'criteria,expected_devlist', + ( (None, ['/dev/msdosfs/EFISYS', '/dev/iso9660/config-2']), ('TYPE=iso9660', ['/dev/iso9660/config-2']), ('TYPE=vfat', ['/dev/msdosfs/EFISYS']), @@ -940,19 +2317,23 @@ class TestFindDevs: elif pattern == "/dev/iso9660/*": return iso9660 raise Exception + m_glob.side_effect = fake_glob devlist = util.find_devs_with_freebsd(criteria=criteria) assert devlist == expected_devlist @pytest.mark.parametrize( - 'criteria,expected_devlist', ( + 'criteria,expected_devlist', + ( (None, ['/dev/ld0', '/dev/dk0', '/dev/dk1', '/dev/cd0']), ('TYPE=iso9660', ['/dev/cd0']), ('TYPE=vfat', ["/dev/ld0", "/dev/dk0", "/dev/dk1"]), - ('LABEL_FATBOOT=A_LABEL', # lp: #1841466 - ['/dev/ld0', '/dev/dk0', '/dev/dk1', '/dev/cd0']), - ) + ( + 'LABEL_FATBOOT=A_LABEL', # lp: #1841466 + ['/dev/ld0', '/dev/dk0', '/dev/dk1', '/dev/cd0'], + ), + ), ) @mock.patch("cloudinit.subp.subp") def test_find_devs_with_netbsd(self, m_subp, criteria, expected_devlist): @@ -1000,21 +2381,24 @@ class TestFindDevs: assert devlist == expected_devlist @pytest.mark.parametrize( - 'criteria,expected_devlist', ( + 'criteria,expected_devlist', + ( (None, ['/dev/vbd0', '/dev/cd0', '/dev/acd0']), ('TYPE=iso9660', ['/dev/cd0', '/dev/acd0']), ('TYPE=vfat', ['/dev/vbd0']), - ('LABEL_FATBOOT=A_LABEL', # lp: #1841466 - ['/dev/vbd0', '/dev/cd0', '/dev/acd0']), - ) + ( + 'LABEL_FATBOOT=A_LABEL', # lp: #1841466 + ['/dev/vbd0', '/dev/cd0', '/dev/acd0'], + ), + ), ) @mock.patch("cloudinit.subp.subp") - def test_find_devs_with_dragonflybsd(self, m_subp, criteria, - expected_devlist): - m_subp.return_value = ( - 'md2 md1 cd0 vbd0 acd0 vn3 vn2 vn1 vn0 md0', '' - ) + def test_find_devs_with_dragonflybsd( + self, m_subp, criteria, expected_devlist + ): + m_subp.return_value = ('md2 md1 cd0 vbd0 acd0 vn3 vn2 vn1 vn0 md0', '') devlist = util.find_devs_with_dragonflybsd(criteria=criteria) assert devlist == expected_devlist + # vi: ts=4 expandtab diff --git a/tests/unittests/test_version.py b/tests/unittests/test_version.py new file mode 100644 index 00000000..ed66b09f --- /dev/null +++ b/tests/unittests/test_version.py @@ -0,0 +1,31 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from unittest import mock + +from tests.unittests.helpers import CiTestCase +from cloudinit import version + + +class TestExportsFeatures(CiTestCase): + def test_has_network_config_v1(self): + self.assertIn('NETWORK_CONFIG_V1', version.FEATURES) + + def test_has_network_config_v2(self): + self.assertIn('NETWORK_CONFIG_V2', version.FEATURES) + + +class TestVersionString(CiTestCase): + @mock.patch("cloudinit.version._PACKAGED_VERSION", + "17.2-3-gb05b9972-0ubuntu1") + def test_package_version_respected(self): + """If _PACKAGED_VERSION is filled in, then it should be returned.""" + self.assertEqual("17.2-3-gb05b9972-0ubuntu1", version.version_string()) + + @mock.patch("cloudinit.version._PACKAGED_VERSION", "@@PACKAGED_VERSION@@") + @mock.patch("cloudinit.version.__VERSION__", "17.2") + def test_package_version_skipped(self): + """If _PACKAGED_VERSION is not modified, then return __VERSION__.""" + self.assertEqual("17.2", version.version_string()) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_vmware/__init__.py b/tests/unittests/test_vmware/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/unittests/test_vmware/test_custom_script.py b/tests/unittests/test_vmware/test_custom_script.py deleted file mode 100644 index f89f8157..00000000 --- a/tests/unittests/test_vmware/test_custom_script.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright (C) 2015 Canonical Ltd. -# Copyright (C) 2017-2019 VMware INC. -# -# Author: Maitreyee Saikia -# -# This file is part of cloud-init. See LICENSE file for license information. - -import os -import stat -from cloudinit import util -from cloudinit.sources.helpers.vmware.imc.config_custom_script import ( - CustomScriptConstant, - CustomScriptNotFound, - PreCustomScript, - PostCustomScript, -) -from cloudinit.tests.helpers import CiTestCase, mock - - -class TestVmwareCustomScript(CiTestCase): - def setUp(self): - self.tmpDir = self.tmp_dir() - # Mock the tmpDir as the root dir in VM. - self.execDir = os.path.join(self.tmpDir, ".customization") - self.execScript = os.path.join(self.execDir, - ".customize.sh") - - def test_prepare_custom_script(self): - """ - This test is designed to verify the behavior based on the presence of - custom script. Mainly needed for scenario where a custom script is - expected, but was not properly copied. "CustomScriptNotFound" exception - is raised in such cases. - """ - # Custom script does not exist. - preCust = PreCustomScript("random-vmw-test", self.tmpDir) - self.assertEqual("random-vmw-test", preCust.scriptname) - self.assertEqual(self.tmpDir, preCust.directory) - self.assertEqual(self.tmp_path("random-vmw-test", self.tmpDir), - preCust.scriptpath) - with self.assertRaises(CustomScriptNotFound): - preCust.prepare_script() - - # Custom script exists. - custScript = self.tmp_path("test-cust", self.tmpDir) - util.write_file(custScript, "test-CR-strip\r\r") - with mock.patch.object(CustomScriptConstant, - "CUSTOM_TMP_DIR", - self.execDir): - with mock.patch.object(CustomScriptConstant, - "CUSTOM_SCRIPT", - self.execScript): - postCust = PostCustomScript("test-cust", - self.tmpDir, - self.tmpDir) - self.assertEqual("test-cust", postCust.scriptname) - self.assertEqual(self.tmpDir, postCust.directory) - self.assertEqual(custScript, postCust.scriptpath) - postCust.prepare_script() - - # Custom script is copied with exec privilege - self.assertTrue(os.path.exists(self.execScript)) - st = os.stat(self.execScript) - self.assertTrue(st.st_mode & stat.S_IEXEC) - with open(self.execScript, "r") as f: - content = f.read() - self.assertEqual(content, "test-CR-strip") - # Check if all carraige returns are stripped from script. - self.assertFalse("\r" in content) - - def test_execute_post_cust(self): - """ - This test is designed to verify the behavior after execute post - customization. - """ - # Prepare the customize package - postCustRun = self.tmp_path("post-customize-guest.sh", self.tmpDir) - util.write_file(postCustRun, "This is the script to run post cust") - userScript = self.tmp_path("test-cust", self.tmpDir) - util.write_file(userScript, "This is the post cust script") - - # Mock the cc_scripts_per_instance dir and marker file. - # Create another tmp dir for cc_scripts_per_instance. - ccScriptDir = self.tmp_dir() - ccScript = os.path.join(ccScriptDir, "post-customize-guest.sh") - markerFile = os.path.join(self.tmpDir, ".markerFile") - with mock.patch.object(CustomScriptConstant, - "CUSTOM_TMP_DIR", - self.execDir): - with mock.patch.object(CustomScriptConstant, - "CUSTOM_SCRIPT", - self.execScript): - with mock.patch.object(CustomScriptConstant, - "POST_CUSTOM_PENDING_MARKER", - markerFile): - postCust = PostCustomScript("test-cust", - self.tmpDir, - ccScriptDir) - postCust.execute() - # Check cc_scripts_per_instance and marker file - # are created. - self.assertTrue(os.path.exists(ccScript)) - with open(ccScript, "r") as f: - content = f.read() - self.assertEqual(content, - "This is the script to run post cust") - self.assertTrue(os.path.exists(markerFile)) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_vmware/test_guestcust_util.py b/tests/unittests/test_vmware/test_guestcust_util.py deleted file mode 100644 index c8b59d83..00000000 --- a/tests/unittests/test_vmware/test_guestcust_util.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright (C) 2019 Canonical Ltd. -# Copyright (C) 2019 VMware INC. -# -# Author: Xiaofeng Wang -# -# This file is part of cloud-init. See LICENSE file for license information. - -from cloudinit import subp -from cloudinit.sources.helpers.vmware.imc.config import Config -from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile -from cloudinit.sources.helpers.vmware.imc.guestcust_util import ( - get_tools_config, - set_gc_status, -) -from cloudinit.tests.helpers import CiTestCase, mock - - -class TestGuestCustUtil(CiTestCase): - def test_get_tools_config_not_installed(self): - """ - This test is designed to verify the behavior if vmware-toolbox-cmd - is not installed. - """ - with mock.patch.object(subp, 'which', return_value=None): - self.assertEqual( - get_tools_config('section', 'key', 'defaultVal'), 'defaultVal') - - def test_get_tools_config_internal_exception(self): - """ - This test is designed to verify the behavior if internal exception - is raised. - """ - with mock.patch.object(subp, 'which', return_value='/dummy/path'): - with mock.patch.object(subp, 'subp', - return_value=('key=value', b''), - side_effect=subp.ProcessExecutionError( - "subp failed", exit_code=99)): - # verify return value is 'defaultVal', not 'value'. - self.assertEqual( - get_tools_config('section', 'key', 'defaultVal'), - 'defaultVal') - - def test_get_tools_config_normal(self): - """ - This test is designed to verify the value could be parsed from - key = value of the given [section] - """ - with mock.patch.object(subp, 'which', return_value='/dummy/path'): - # value is not blank - with mock.patch.object(subp, 'subp', - return_value=('key = value ', b'')): - self.assertEqual( - get_tools_config('section', 'key', 'defaultVal'), - 'value') - # value is blank - with mock.patch.object(subp, 'subp', - return_value=('key = ', b'')): - self.assertEqual( - get_tools_config('section', 'key', 'defaultVal'), - '') - # value contains = - with mock.patch.object(subp, 'subp', - return_value=('key=Bar=Wark', b'')): - self.assertEqual( - get_tools_config('section', 'key', 'defaultVal'), - 'Bar=Wark') - - # value contains specific characters - with mock.patch.object(subp, 'subp', - return_value=('[a] b.c_d=e-f', b'')): - self.assertEqual( - get_tools_config('section', 'key', 'defaultVal'), - 'e-f') - - def test_set_gc_status(self): - """ - This test is designed to verify the behavior of set_gc_status - """ - # config is None, return None - self.assertEqual(set_gc_status(None, 'Successful'), None) - - # post gc status is NO, return None - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - conf = Config(cf) - self.assertEqual(set_gc_status(conf, 'Successful'), None) - - # post gc status is YES, subp is called to execute command - cf._insertKey("MISC|POST-GC-STATUS", "YES") - conf = Config(cf) - with mock.patch.object(subp, 'subp', - return_value=('ok', b'')) as mockobj: - self.assertEqual( - set_gc_status(conf, 'Successful'), ('ok', b'')) - mockobj.assert_called_once_with( - ['vmware-rpctool', 'info-set guestinfo.gc.status Successful'], - rcs=[0]) - -# vi: ts=4 expandtab diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/test_vmware_config_file.py deleted file mode 100644 index 430cc69f..00000000 --- a/tests/unittests/test_vmware_config_file.py +++ /dev/null @@ -1,545 +0,0 @@ -# Copyright (C) 2015 Canonical Ltd. -# Copyright (C) 2016 VMware INC. -# -# Author: Sankar Tanguturi -# Pengpeng Sun -# -# This file is part of cloud-init. See LICENSE file for license information. - -import logging -import os -import sys -import tempfile -import textwrap - -from cloudinit.sources.DataSourceOVF import get_network_config_from_conf -from cloudinit.sources.DataSourceOVF import read_vmware_imc -from cloudinit.sources.helpers.vmware.imc.boot_proto import BootProtoEnum -from cloudinit.sources.helpers.vmware.imc.config import Config -from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile -from cloudinit.sources.helpers.vmware.imc.config_nic import gen_subnet -from cloudinit.sources.helpers.vmware.imc.config_nic import NicConfigurator -from cloudinit.tests.helpers import CiTestCase - -logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) -logger = logging.getLogger(__name__) - - -class TestVmwareConfigFile(CiTestCase): - - def test_utility_methods(self): - """Tests basic utility methods of ConfigFile class""" - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - - cf.clear() - - self.assertEqual(0, len(cf), "clear size") - - cf._insertKey(" PASSWORD|-PASS ", " foo ") - cf._insertKey("BAR", " ") - - self.assertEqual(2, len(cf), "insert size") - self.assertEqual('foo', cf["PASSWORD|-PASS"], "password") - self.assertTrue("PASSWORD|-PASS" in cf, "hasPassword") - self.assertFalse(cf.should_keep_current_value("PASSWORD|-PASS"), - "keepPassword") - self.assertFalse(cf.should_remove_current_value("PASSWORD|-PASS"), - "removePassword") - self.assertFalse("FOO" in cf, "hasFoo") - self.assertTrue(cf.should_keep_current_value("FOO"), "keepFoo") - self.assertFalse(cf.should_remove_current_value("FOO"), "removeFoo") - self.assertTrue("BAR" in cf, "hasBar") - self.assertFalse(cf.should_keep_current_value("BAR"), "keepBar") - self.assertTrue(cf.should_remove_current_value("BAR"), "removeBar") - - def test_datasource_instance_id(self): - """Tests instance id for the DatasourceOVF""" - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - - instance_id_prefix = 'iid-vmware-' - - conf = Config(cf) - - (md1, _, _) = read_vmware_imc(conf) - self.assertIn(instance_id_prefix, md1["instance-id"]) - self.assertEqual(md1["instance-id"], 'iid-vmware-imc') - - (md2, _, _) = read_vmware_imc(conf) - self.assertIn(instance_id_prefix, md2["instance-id"]) - self.assertEqual(md2["instance-id"], 'iid-vmware-imc') - - self.assertEqual(md2["instance-id"], md1["instance-id"]) - - def test_configfile_static_2nics(self): - """Tests Config class for a configuration with two static NICs.""" - cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg") - - conf = Config(cf) - - self.assertEqual('myhost1', conf.host_name, "hostName") - self.assertEqual('Africa/Abidjan', conf.timezone, "tz") - self.assertTrue(conf.utc, "utc") - - self.assertEqual(['10.20.145.1', '10.20.145.2'], - conf.name_servers, - "dns") - self.assertEqual(['eng.vmware.com', 'proxy.vmware.com'], - conf.dns_suffixes, - "suffixes") - - nics = conf.nics - ipv40 = nics[0].staticIpv4 - - self.assertEqual(2, len(nics), "nics") - self.assertEqual('NIC1', nics[0].name, "nic0") - self.assertEqual('00:50:56:a6:8c:08', nics[0].mac, "mac0") - self.assertEqual(BootProtoEnum.STATIC, nics[0].bootProto, "bootproto0") - self.assertEqual('10.20.87.154', ipv40[0].ip, "ipv4Addr0") - self.assertEqual('255.255.252.0', ipv40[0].netmask, "ipv4Mask0") - self.assertEqual(2, len(ipv40[0].gateways), "ipv4Gw0") - self.assertEqual('10.20.87.253', ipv40[0].gateways[0], "ipv4Gw0_0") - self.assertEqual('10.20.87.105', ipv40[0].gateways[1], "ipv4Gw0_1") - - self.assertEqual(1, len(nics[0].staticIpv6), "ipv6Cnt0") - self.assertEqual('fc00:10:20:87::154', - nics[0].staticIpv6[0].ip, - "ipv6Addr0") - - self.assertEqual('NIC2', nics[1].name, "nic1") - self.assertTrue(not nics[1].staticIpv6, "ipv61 dhcp") - - def test_config_file_dhcp_2nics(self): - """Tests Config class for a configuration with two DHCP NICs.""" - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - - conf = Config(cf) - nics = conf.nics - self.assertEqual(2, len(nics), "nics") - self.assertEqual('NIC1', nics[0].name, "nic0") - self.assertEqual('00:50:56:a6:8c:08', nics[0].mac, "mac0") - self.assertEqual(BootProtoEnum.DHCP, nics[0].bootProto, "bootproto0") - - def test_config_password(self): - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - - cf._insertKey("PASSWORD|-PASS", "test-password") - cf._insertKey("PASSWORD|RESET", "no") - - conf = Config(cf) - self.assertEqual('test-password', conf.admin_password, "password") - self.assertFalse(conf.reset_password, "do not reset password") - - def test_config_reset_passwd(self): - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - - cf._insertKey("PASSWORD|-PASS", "test-password") - cf._insertKey("PASSWORD|RESET", "random") - - conf = Config(cf) - with self.assertRaises(ValueError): - pw = conf.reset_password - self.assertIsNone(pw) - - cf.clear() - cf._insertKey("PASSWORD|RESET", "yes") - self.assertEqual(1, len(cf), "insert size") - - conf = Config(cf) - self.assertTrue(conf.reset_password, "reset password") - - def test_get_config_nameservers(self): - """Tests DNS and nameserver settings in a configuration.""" - cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg") - - config = Config(cf) - - network_config = get_network_config_from_conf(config, False) - - self.assertEqual(1, network_config.get('version')) - - config_types = network_config.get('config') - name_servers = None - dns_suffixes = None - - for type in config_types: - if type.get('type') == 'nameserver': - name_servers = type.get('address') - dns_suffixes = type.get('search') - break - - self.assertEqual(['10.20.145.1', '10.20.145.2'], - name_servers, - "dns") - self.assertEqual(['eng.vmware.com', 'proxy.vmware.com'], - dns_suffixes, - "suffixes") - - def test_gen_subnet(self): - """Tests if gen_subnet properly calculates network subnet from - IPv4 address and netmask""" - ip_subnet_list = [['10.20.87.253', '255.255.252.0', '10.20.84.0'], - ['10.20.92.105', '255.255.252.0', '10.20.92.0'], - ['192.168.0.10', '255.255.0.0', '192.168.0.0']] - for entry in ip_subnet_list: - self.assertEqual(entry[2], gen_subnet(entry[0], entry[1]), - "Subnet for a specified ip and netmask") - - def test_get_config_dns_suffixes(self): - """Tests if get_network_config_from_conf properly - generates nameservers and dns settings from a - specified configuration""" - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - - config = Config(cf) - - network_config = get_network_config_from_conf(config, False) - - self.assertEqual(1, network_config.get('version')) - - config_types = network_config.get('config') - name_servers = None - dns_suffixes = None - - for type in config_types: - if type.get('type') == 'nameserver': - name_servers = type.get('address') - dns_suffixes = type.get('search') - break - - self.assertEqual([], - name_servers, - "dns") - self.assertEqual(['eng.vmware.com'], - dns_suffixes, - "suffixes") - - def test_get_nics_list_dhcp(self): - """Tests if NicConfigurator properly calculates network subnets - for a configuration with a list of DHCP NICs""" - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - - config = Config(cf) - - nicConfigurator = NicConfigurator(config.nics, False) - nics_cfg_list = nicConfigurator.generate() - - self.assertEqual(2, len(nics_cfg_list), "number of config elements") - - nic1 = {'name': 'NIC1'} - nic2 = {'name': 'NIC2'} - for cfg in nics_cfg_list: - if cfg.get('name') == nic1.get('name'): - nic1.update(cfg) - elif cfg.get('name') == nic2.get('name'): - nic2.update(cfg) - - self.assertEqual('physical', nic1.get('type'), 'type of NIC1') - self.assertEqual('NIC1', nic1.get('name'), 'name of NIC1') - self.assertEqual('00:50:56:a6:8c:08', nic1.get('mac_address'), - 'mac address of NIC1') - subnets = nic1.get('subnets') - self.assertEqual(1, len(subnets), 'number of subnets for NIC1') - subnet = subnets[0] - self.assertEqual('dhcp', subnet.get('type'), 'DHCP type for NIC1') - self.assertEqual('auto', subnet.get('control'), 'NIC1 Control type') - - self.assertEqual('physical', nic2.get('type'), 'type of NIC2') - self.assertEqual('NIC2', nic2.get('name'), 'name of NIC2') - self.assertEqual('00:50:56:a6:5a:de', nic2.get('mac_address'), - 'mac address of NIC2') - subnets = nic2.get('subnets') - self.assertEqual(1, len(subnets), 'number of subnets for NIC2') - subnet = subnets[0] - self.assertEqual('dhcp', subnet.get('type'), 'DHCP type for NIC2') - self.assertEqual('auto', subnet.get('control'), 'NIC2 Control type') - - def test_get_nics_list_static(self): - """Tests if NicConfigurator properly calculates network subnets - for a configuration with 2 static NICs""" - cf = ConfigFile("tests/data/vmware/cust-static-2nic.cfg") - - config = Config(cf) - - nicConfigurator = NicConfigurator(config.nics, False) - nics_cfg_list = nicConfigurator.generate() - - self.assertEqual(2, len(nics_cfg_list), "number of elements") - - nic1 = {'name': 'NIC1'} - nic2 = {'name': 'NIC2'} - route_list = [] - for cfg in nics_cfg_list: - cfg_type = cfg.get('type') - if cfg_type == 'physical': - if cfg.get('name') == nic1.get('name'): - nic1.update(cfg) - elif cfg.get('name') == nic2.get('name'): - nic2.update(cfg) - - self.assertEqual('physical', nic1.get('type'), 'type of NIC1') - self.assertEqual('NIC1', nic1.get('name'), 'name of NIC1') - self.assertEqual('00:50:56:a6:8c:08', nic1.get('mac_address'), - 'mac address of NIC1') - - subnets = nic1.get('subnets') - self.assertEqual(2, len(subnets), 'Number of subnets') - - static_subnet = [] - static6_subnet = [] - - for subnet in subnets: - subnet_type = subnet.get('type') - if subnet_type == 'static': - static_subnet.append(subnet) - elif subnet_type == 'static6': - static6_subnet.append(subnet) - else: - self.assertEqual(True, False, 'Unknown type') - if 'route' in subnet: - for route in subnet.get('routes'): - route_list.append(route) - - self.assertEqual(1, len(static_subnet), 'Number of static subnet') - self.assertEqual(1, len(static6_subnet), 'Number of static6 subnet') - - subnet = static_subnet[0] - self.assertEqual('10.20.87.154', subnet.get('address'), - 'IPv4 address of static subnet') - self.assertEqual('255.255.252.0', subnet.get('netmask'), - 'NetMask of static subnet') - self.assertEqual('auto', subnet.get('control'), - 'control for static subnet') - - subnet = static6_subnet[0] - self.assertEqual('fc00:10:20:87::154', subnet.get('address'), - 'IPv6 address of static subnet') - self.assertEqual('64', subnet.get('netmask'), - 'NetMask of static6 subnet') - - route_set = set(['10.20.87.253', '10.20.87.105', '192.168.0.10']) - for route in route_list: - self.assertEqual(10000, route.get('metric'), 'metric of route') - gateway = route.get('gateway') - if gateway in route_set: - route_set.discard(gateway) - else: - self.assertEqual(True, False, 'invalid gateway %s' % (gateway)) - - self.assertEqual('physical', nic2.get('type'), 'type of NIC2') - self.assertEqual('NIC2', nic2.get('name'), 'name of NIC2') - self.assertEqual('00:50:56:a6:ef:7d', nic2.get('mac_address'), - 'mac address of NIC2') - - subnets = nic2.get('subnets') - self.assertEqual(1, len(subnets), 'Number of subnets for NIC2') - - subnet = subnets[0] - self.assertEqual('static', subnet.get('type'), 'Subnet type') - self.assertEqual('192.168.6.102', subnet.get('address'), - 'Subnet address') - self.assertEqual('255.255.0.0', subnet.get('netmask'), - 'Subnet netmask') - - def test_custom_script(self): - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - conf = Config(cf) - self.assertIsNone(conf.custom_script_name) - cf._insertKey("CUSTOM-SCRIPT|SCRIPT-NAME", "test-script") - conf = Config(cf) - self.assertEqual("test-script", conf.custom_script_name) - - def test_post_gc_status(self): - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - conf = Config(cf) - self.assertFalse(conf.post_gc_status) - cf._insertKey("MISC|POST-GC-STATUS", "YES") - conf = Config(cf) - self.assertTrue(conf.post_gc_status) - - def test_no_default_run_post_script(self): - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - conf = Config(cf) - self.assertFalse(conf.default_run_post_script) - cf._insertKey("MISC|DEFAULT-RUN-POST-CUST-SCRIPT", "NO") - conf = Config(cf) - self.assertFalse(conf.default_run_post_script) - - def test_yes_default_run_post_script(self): - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - cf._insertKey("MISC|DEFAULT-RUN-POST-CUST-SCRIPT", "yes") - conf = Config(cf) - self.assertTrue(conf.default_run_post_script) - - -class TestVmwareNetConfig(CiTestCase): - """Test conversion of vmware config to cloud-init config.""" - - maxDiff = None - - def _get_NicConfigurator(self, text): - fp = None - try: - with tempfile.NamedTemporaryFile(mode="w", dir=self.tmp_dir(), - delete=False) as fp: - fp.write(text) - fp.close() - cfg = Config(ConfigFile(fp.name)) - return NicConfigurator(cfg.nics, use_system_devices=False) - finally: - if fp: - os.unlink(fp.name) - - def test_non_primary_nic_without_gateway(self): - """A non primary nic set is not required to have a gateway.""" - config = textwrap.dedent("""\ - [NETWORK] - NETWORKING = yes - BOOTPROTO = dhcp - HOSTNAME = myhost1 - DOMAINNAME = eng.vmware.com - - [NIC-CONFIG] - NICS = NIC1 - - [NIC1] - MACADDR = 00:50:56:a6:8c:08 - ONBOOT = yes - IPv4_MODE = BACKWARDS_COMPATIBLE - BOOTPROTO = static - IPADDR = 10.20.87.154 - NETMASK = 255.255.252.0 - """) - nc = self._get_NicConfigurator(config) - self.assertEqual( - [{'type': 'physical', 'name': 'NIC1', - 'mac_address': '00:50:56:a6:8c:08', - 'subnets': [ - {'control': 'auto', 'type': 'static', - 'address': '10.20.87.154', 'netmask': '255.255.252.0'}]}], - nc.generate()) - - def test_non_primary_nic_with_gateway(self): - """A non primary nic set can have a gateway.""" - config = textwrap.dedent("""\ - [NETWORK] - NETWORKING = yes - BOOTPROTO = dhcp - HOSTNAME = myhost1 - DOMAINNAME = eng.vmware.com - - [NIC-CONFIG] - NICS = NIC1 - - [NIC1] - MACADDR = 00:50:56:a6:8c:08 - ONBOOT = yes - IPv4_MODE = BACKWARDS_COMPATIBLE - BOOTPROTO = static - IPADDR = 10.20.87.154 - NETMASK = 255.255.252.0 - GATEWAY = 10.20.87.253 - """) - nc = self._get_NicConfigurator(config) - self.assertEqual( - [{'type': 'physical', 'name': 'NIC1', - 'mac_address': '00:50:56:a6:8c:08', - 'subnets': [ - {'control': 'auto', 'type': 'static', - 'address': '10.20.87.154', 'netmask': '255.255.252.0', - 'routes': - [{'type': 'route', 'destination': '10.20.84.0/22', - 'gateway': '10.20.87.253', 'metric': 10000}]}]}], - nc.generate()) - - def test_cust_non_primary_nic_with_gateway_(self): - """A customer non primary nic set can have a gateway.""" - config = textwrap.dedent("""\ - [NETWORK] - NETWORKING = yes - BOOTPROTO = dhcp - HOSTNAME = static-debug-vm - DOMAINNAME = cluster.local - - [NIC-CONFIG] - NICS = NIC1 - - [NIC1] - MACADDR = 00:50:56:ac:d1:8a - ONBOOT = yes - IPv4_MODE = BACKWARDS_COMPATIBLE - BOOTPROTO = static - IPADDR = 100.115.223.75 - NETMASK = 255.255.255.0 - GATEWAY = 100.115.223.254 - - - [DNS] - DNSFROMDHCP=no - - NAMESERVER|1 = 8.8.8.8 - - [DATETIME] - UTC = yes - """) - nc = self._get_NicConfigurator(config) - self.assertEqual( - [{'type': 'physical', 'name': 'NIC1', - 'mac_address': '00:50:56:ac:d1:8a', - 'subnets': [ - {'control': 'auto', 'type': 'static', - 'address': '100.115.223.75', 'netmask': '255.255.255.0', - 'routes': - [{'type': 'route', 'destination': '100.115.223.0/24', - 'gateway': '100.115.223.254', 'metric': 10000}]}]}], - nc.generate()) - - def test_a_primary_nic_with_gateway(self): - """A primary nic set can have a gateway.""" - config = textwrap.dedent("""\ - [NETWORK] - NETWORKING = yes - BOOTPROTO = dhcp - HOSTNAME = myhost1 - DOMAINNAME = eng.vmware.com - - [NIC-CONFIG] - NICS = NIC1 - - [NIC1] - MACADDR = 00:50:56:a6:8c:08 - ONBOOT = yes - IPv4_MODE = BACKWARDS_COMPATIBLE - BOOTPROTO = static - IPADDR = 10.20.87.154 - NETMASK = 255.255.252.0 - PRIMARY = true - GATEWAY = 10.20.87.253 - """) - nc = self._get_NicConfigurator(config) - self.assertEqual( - [{'type': 'physical', 'name': 'NIC1', - 'mac_address': '00:50:56:a6:8c:08', - 'subnets': [ - {'control': 'auto', 'type': 'static', - 'address': '10.20.87.154', 'netmask': '255.255.252.0', - 'gateway': '10.20.87.253'}]}], - nc.generate()) - - def test_meta_data(self): - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - conf = Config(cf) - self.assertIsNone(conf.meta_data_name) - cf._insertKey("CLOUDINIT|METADATA", "test-metadata") - conf = Config(cf) - self.assertEqual("test-metadata", conf.meta_data_name) - - def test_user_data(self): - cf = ConfigFile("tests/data/vmware/cust-dhcp-2nic.cfg") - conf = Config(cf) - self.assertIsNone(conf.user_data_name) - cf._insertKey("CLOUDINIT|USERDATA", "test-userdata") - conf = Config(cf) - self.assertEqual("test-userdata", conf.user_data_name) - - -# vi: ts=4 expandtab diff --git a/tests/unittests/util.py b/tests/unittests/util.py index 383f5f5c..2204c28f 100644 --- a/tests/unittests/util.py +++ b/tests/unittests/util.py @@ -15,7 +15,7 @@ def get_cloud(distro=None, paths=None, sys_cfg=None, metadata=None): """ paths = paths or helpers.Paths({}) sys_cfg = sys_cfg or {} - cls = distros.fetch(distro) if distro else TestingDistro + cls = distros.fetch(distro) if distro else MockDistro mydist = cls(distro, sys_cfg, paths) myds = DataSourceTesting(sys_cfg, mydist, paths) if metadata: @@ -49,14 +49,14 @@ class DataSourceTesting(DataSourceNone): return 'testing' -class TestingDistro(distros.Distro): - # TestingDistro is here to test base Distro class implementations +class MockDistro(distros.Distro): + # MockDistro is here to test base Distro class implementations def __init__(self, name="testingdistro", cfg=None, paths=None): if not cfg: cfg = {} if not paths: paths = {} - super(TestingDistro, self).__init__(name, cfg, paths) + super(MockDistro, self).__init__(name, cfg, paths) def install_packages(self, pkglist): pass diff --git a/tox.ini b/tox.ini index 874d3f20..ff888266 100644 --- a/tox.ini +++ b/tox.ini @@ -3,7 +3,7 @@ envlist = py3, xenial-dev, flake8, pylint recreate = True [testenv] -commands = {envpython} -m pytest {posargs:tests/unittests cloudinit} +commands = {envpython} -m pytest {posargs:tests/unittests} setenv = LC_ALL = en_US.utf-8 passenv= @@ -37,7 +37,7 @@ deps = commands = {envpython} -m pytest \ --durations 10 \ {posargs:--cov=cloudinit --cov-branch \ - tests/unittests cloudinit} + tests/unittests} [testenv:py27] basepython = python2.7 @@ -86,7 +86,7 @@ deps = # [testenv:xenial-dev]. See the comment there for details. commands = python ./tools/pipremove jsonschema - python -m pytest {posargs:tests/unittests cloudinit} + python -m pytest {posargs:tests/unittests} basepython = python3 deps = # Refer to the comment in [xenial-shared-deps] for details @@ -104,7 +104,7 @@ deps = # changes here are reflected in [testenv:xenial]. commands = python ./tools/pipremove jsonschema - python -m pytest {posargs:tests/unittests cloudinit} + python -m pytest {posargs:tests/unittests} basepython = {[testenv:xenial]basepython} deps = # Refer to the comment in [xenial-shared-deps] for details @@ -163,7 +163,7 @@ setenv = [pytest] # TODO: s/--strict/--strict-markers/ once xenial support is dropped -testpaths = cloudinit tests/unittests +testpaths = tests/unittests addopts = --strict log_format = %(asctime)s %(levelname)-9s %(name)s:%(filename)s:%(lineno)d %(message)s log_date_format = %Y-%m-%d %H:%M:%S -- cgit v1.2.3 From 0e25076b34fa995161b83996e866c0974cee431f Mon Sep 17 00:00:00 2001 From: Emanuele Giuseppe Esposito Date: Mon, 6 Dec 2021 18:34:26 +0100 Subject: cloudinit/net: handle two different routes for the same ip (#1124) If we set a dhcp server side like this: $ cat /var/tmp/cloud-init/cloud-init-dhcp-f0rie5tm/dhcp.leases lease { ... option classless-static-routes 31.169.254.169.254 0.0.0.0,31.169.254.169.254 10.112.143.127,22.10.112.140 0.0.0.0,0 10.112.140.1; ... } cloud-init fails to configure the routes via 'ip route add' because to there are two different routes for 169.254.169.254: $ ip -4 route add 192.168.1.1/32 via 0.0.0.0 dev eth0 $ ip -4 route add 192.168.1.1/32 via 10.112.140.248 dev eth0 But NetworkManager can handle such scenario successfully as it uses "ip route append". So change cloud-init to also use "ip route append" to fix the issue: $ ip -4 route append 192.168.1.1/32 via 0.0.0.0 dev eth0 $ ip -4 route append 192.168.1.1/32 via 10.112.140.248 dev eth0 Signed-off-by: Emanuele Giuseppe Esposito RHBZ: #2003231 --- cloudinit/net/__init__.py | 2 +- tests/unittests/net/test_init.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 7558745f..f81f3a7b 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -1157,7 +1157,7 @@ class EphemeralIPv4Network(object): if gateway != "0.0.0.0": via_arg = ['via', gateway] subp.subp( - ['ip', '-4', 'route', 'add', net_address] + via_arg + + ['ip', '-4', 'route', 'append', net_address] + via_arg + ['dev', self.interface], capture=True) self.cleanup_cmds.insert( 0, ['ip', '-4', 'route', 'del', net_address] + via_arg + diff --git a/tests/unittests/net/test_init.py b/tests/unittests/net/test_init.py index 666e8425..82854ab3 100644 --- a/tests/unittests/net/test_init.py +++ b/tests/unittests/net/test_init.py @@ -723,13 +723,13 @@ class TestEphemeralIPV4Network(CiTestCase): ['ip', '-family', 'inet', 'link', 'set', 'dev', 'eth0', 'up'], capture=True), mock.call( - ['ip', '-4', 'route', 'add', '192.168.2.1/32', + ['ip', '-4', 'route', 'append', '192.168.2.1/32', 'dev', 'eth0'], capture=True), mock.call( - ['ip', '-4', 'route', 'add', '169.254.169.254/32', + ['ip', '-4', 'route', 'append', '169.254.169.254/32', 'via', '192.168.2.1', 'dev', 'eth0'], capture=True), mock.call( - ['ip', '-4', 'route', 'add', '0.0.0.0/0', + ['ip', '-4', 'route', 'append', '0.0.0.0/0', 'via', '192.168.2.1', 'dev', 'eth0'], capture=True)] expected_teardown_calls = [ mock.call( -- cgit v1.2.3 From bedac77e9348e7a54c0ec364fb61df90cd893972 Mon Sep 17 00:00:00 2001 From: Brett Holman Date: Mon, 6 Dec 2021 15:27:12 -0700 Subject: Add Strict Metaschema Validation (#1101) Improve schema validation. This adds strict validation of config module definitions at testing time, with plumbing included for future runtime validation. This eliminates a class of bugs resulting from schemas that have definitions that are incorrect, but get interpreted by jsonschema as "additionalProperties" that are therefore ignored. - Add strict meta-schema for jsonschema unit test validation - Separate schema from module metadata structure - Improve type annotations for various functions and data types Cleanup: - Remove unused jsonschema "required" elements - Eliminate manual memoization in schema.py:get_schema(), reference module.__doc__ directly --- cloudinit/cmd/clean.py | 8 +- cloudinit/cmd/cloud_id.py | 6 +- cloudinit/config/cc_apk_configure.py | 11 +- cloudinit/config/cc_apt_configure.py | 11 +- cloudinit/config/cc_bootcmd.py | 11 +- cloudinit/config/cc_chef.py | 11 +- cloudinit/config/cc_install_hotplug.py | 9 +- cloudinit/config/cc_locale.py | 9 +- cloudinit/config/cc_ntp.py | 11 +- cloudinit/config/cc_resizefs.py | 10 +- cloudinit/config/cc_runcmd.py | 11 +- cloudinit/config/cc_snap.py | 11 +- cloudinit/config/cc_ubuntu_advantage.py | 10 +- cloudinit/config/cc_ubuntu_drivers.py | 10 +- cloudinit/config/cc_write_files.py | 10 +- cloudinit/config/cc_write_files_deferred.py | 41 ++-- cloudinit/config/cc_zypper_add_repo.py | 10 +- cloudinit/config/schema.py | 288 ++++++++++++++++------- cloudinit/importer.py | 24 +- cloudinit/util.py | 17 +- doc/rtd/conf.py | 13 +- tests/unittests/cmd/test_clean.py | 2 +- tests/unittests/cmd/test_cloud_id.py | 4 +- tests/unittests/config/test_schema.py | 339 ++++++++++++++++++++-------- tests/unittests/test_cli.py | 105 ++++++++- 25 files changed, 701 insertions(+), 291 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py index 928a8eea..3502dd56 100644 --- a/cloudinit/cmd/clean.py +++ b/cloudinit/cmd/clean.py @@ -11,11 +11,9 @@ import sys from cloudinit.stages import Init from cloudinit.subp import (ProcessExecutionError, subp) -from cloudinit.util import (del_dir, del_file, get_config_logfiles, is_link) - - -def error(msg): - sys.stderr.write("ERROR: " + msg + "\n") +from cloudinit.util import ( + del_dir, del_file, get_config_logfiles, is_link, error +) def get_parser(parser=None): diff --git a/cloudinit/cmd/cloud_id.py b/cloudinit/cmd/cloud_id.py index 97608921..0cdc9675 100755 --- a/cloudinit/cmd/cloud_id.py +++ b/cloudinit/cmd/cloud_id.py @@ -6,6 +6,7 @@ import argparse import json import sys +from cloudinit.util import error from cloudinit.sources import ( INSTANCE_JSON_FILE, METADATA_UNKNOWN, canonical_cloud_id) @@ -40,11 +41,6 @@ def get_parser(parser=None): return parser -def error(msg): - sys.stderr.write('ERROR: %s\n' % msg) - return 1 - - def handle_args(name, args): """Handle calls to 'cloud-id' cli. diff --git a/cloudinit/config/cc_apk_configure.py b/cloudinit/config/cc_apk_configure.py index 84d7a0b6..d227a58d 100644 --- a/cloudinit/config/cc_apk_configure.py +++ b/cloudinit/config/cc_apk_configure.py @@ -12,8 +12,7 @@ from cloudinit import log as logging from cloudinit import temp_utils from cloudinit import templater from cloudinit import util -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) @@ -56,7 +55,7 @@ REPOSITORIES_TEMPLATE = """\ frequency = PER_INSTANCE distros = ['alpine'] -schema = { +meta = { 'id': 'cc_apk_configure', 'name': 'APK Configure', 'title': 'Configure apk repositories file', @@ -95,6 +94,9 @@ schema = { """), ], 'frequency': frequency, +} + +schema = { 'type': 'object', 'properties': { 'apk_repos': { @@ -171,14 +173,13 @@ schema = { """) } }, - 'required': [], 'minProperties': 1, # Either preserve_repositories or alpine_repo 'additionalProperties': False, } } } -__doc__ = get_schema_doc(schema) +__doc__ = get_meta_doc(meta, schema) def handle(name, cfg, cloud, log, _args): diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 86d0feae..2e844c2c 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -14,8 +14,7 @@ import re import pathlib from textwrap import dedent -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit import gpg from cloudinit import log as logging from cloudinit import subp @@ -75,7 +74,8 @@ mirror_property = { } } } -schema = { + +meta = { 'id': 'cc_apt_configure', 'name': 'Apt Configure', 'title': 'Configure apt for the user', @@ -155,6 +155,9 @@ schema = { ------END PGP PUBLIC KEY BLOCK-------""")], 'frequency': frequency, +} + +schema = { 'type': 'object', 'properties': { 'apt': { @@ -398,7 +401,7 @@ schema = { } } -__doc__ = get_schema_doc(schema) +__doc__ = get_meta_doc(meta, schema) # place where apt stores cached repository data diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py index 246e4497..06f7a26e 100644 --- a/cloudinit/config/cc_bootcmd.py +++ b/cloudinit/config/cc_bootcmd.py @@ -12,8 +12,7 @@ import os from textwrap import dedent -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_ALWAYS from cloudinit import temp_utils from cloudinit import subp @@ -29,7 +28,7 @@ frequency = PER_ALWAYS distros = ['all'] -schema = { +meta = { 'id': 'cc_bootcmd', 'name': 'Bootcmd', 'title': 'Run arbitrary commands early in the boot process', @@ -57,6 +56,9 @@ schema = { - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ] """)], 'frequency': PER_ALWAYS, +} + +schema = { 'type': 'object', 'properties': { 'bootcmd': { @@ -69,12 +71,11 @@ schema = { 'additionalItems': False, # Reject items of non-string non-list 'additionalProperties': False, 'minItems': 1, - 'required': [], } } } -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() def handle(name, cfg, cloud, log, _args): diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index 7b20222e..ed734d1c 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -14,8 +14,7 @@ import os from textwrap import dedent from cloudinit import subp -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit import templater from cloudinit import temp_utils from cloudinit import url_helper @@ -89,7 +88,8 @@ CHEF_EXEC_DEF_ARGS = tuple(['-d', '-i', '1800', '-s', '20']) frequency = PER_ALWAYS distros = ["all"] -schema = { + +meta = { 'id': 'cc_chef', 'name': 'Chef', 'title': 'module that configures, starts and installs chef', @@ -126,6 +126,9 @@ schema = { ssl_verify_mode: :verify_peer validation_name: yourorg-validator""")], 'frequency': frequency, +} + +schema = { 'type': 'object', 'properties': { 'chef': { @@ -357,7 +360,7 @@ schema = { } } -__doc__ = get_schema_doc(schema) +__doc__ = get_meta_doc(meta, schema) def post_run_chef(chef_cfg, log): diff --git a/cloudinit/config/cc_install_hotplug.py b/cloudinit/config/cc_install_hotplug.py index da98c409..9b4075cc 100644 --- a/cloudinit/config/cc_install_hotplug.py +++ b/cloudinit/config/cc_install_hotplug.py @@ -6,7 +6,7 @@ from textwrap import dedent from cloudinit import util from cloudinit import subp from cloudinit import stages -from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.distros import ALL_DISTROS from cloudinit.event import EventType, EventScope from cloudinit.settings import PER_INSTANCE @@ -15,7 +15,7 @@ from cloudinit.settings import PER_INSTANCE frequency = PER_INSTANCE distros = [ALL_DISTROS] -schema = { +meta = { "id": "cc_install_hotplug", "name": "Install Hotplug", "title": "Install hotplug if supported and enabled", @@ -49,6 +49,9 @@ schema = { """), ], "frequency": frequency, +} + +schema = { "type": "object", "properties": { "updates": { @@ -81,7 +84,7 @@ schema = { } } -__doc__ = get_schema_doc(schema) +__doc__ = get_meta_doc(meta, schema) HOTPLUG_UDEV_PATH = "/etc/udev/rules.d/10-cloud-init-hook-hotplug.rules" diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py index 4f8b7bf6..7fed9abd 100644 --- a/cloudinit/config/cc_locale.py +++ b/cloudinit/config/cc_locale.py @@ -11,13 +11,13 @@ from textwrap import dedent from cloudinit import util -from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE frequency = PER_INSTANCE distros = ['all'] -schema = { +meta = { 'id': 'cc_locale', 'name': 'Locale', 'title': 'Set system locale', @@ -39,6 +39,9 @@ schema = { """), ], 'frequency': frequency, +} + +schema = { 'type': 'object', 'properties': { 'locale': { @@ -57,7 +60,7 @@ schema = { }, } -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() def handle(name, cfg, cloud, log, args): diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index c3aee798..9c085a04 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -16,7 +16,7 @@ from cloudinit import templater from cloudinit import type_utils from cloudinit import subp from cloudinit import util -from cloudinit.config.schema import get_schema_doc, validate_cloudconfig_schema +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) @@ -140,7 +140,7 @@ DISTRO_CLIENT_CONFIG = { # configuration options before actually attempting to deploy with said # configuration. -schema = { +meta = { 'id': 'cc_ntp', 'name': 'NTP', 'title': 'enable and configure ntp', @@ -190,6 +190,9 @@ schema = { - ntp.ubuntu.com - 192.168.23.2""")], 'frequency': PER_INSTANCE, +} + +schema = { 'type': 'object', 'properties': { 'ntp': { @@ -289,12 +292,10 @@ schema = { }, # Don't use REQUIRED_NTP_CONFIG_KEYS to allow for override # of builtin client values. - 'required': [], 'minProperties': 1, # If we have config, define something 'additionalProperties': False }, }, - 'required': [], 'additionalProperties': False } } @@ -303,7 +304,7 @@ REQUIRED_NTP_CONFIG_KEYS = frozenset([ 'check_exe', 'confpath', 'packages', 'service_name']) -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() def distro_ntp_client_configs(distro): diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 990a6939..00bb7ae7 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -13,8 +13,7 @@ import os import stat from textwrap import dedent -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_ALWAYS from cloudinit import subp from cloudinit import util @@ -24,7 +23,7 @@ NOBLOCK = "noblock" frequency = PER_ALWAYS distros = ['all'] -schema = { +meta = { 'id': 'cc_resizefs', 'name': 'Resizefs', 'title': 'Resize filesystem', @@ -42,6 +41,9 @@ schema = { 'examples': [ 'resize_rootfs: false # disable root filesystem resize operation'], 'frequency': PER_ALWAYS, +} + +schema = { 'type': 'object', 'properties': { 'resize_rootfs': { @@ -52,7 +54,7 @@ schema = { } } -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() def _resize_btrfs(mount_point, devpth): diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py index 15960c7d..2f5e02cb 100644 --- a/cloudinit/config/cc_runcmd.py +++ b/cloudinit/config/cc_runcmd.py @@ -8,8 +8,7 @@ """Runcmd: run arbitrary commands at rc.local with output to the console""" -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.distros import ALL_DISTROS from cloudinit.settings import PER_INSTANCE from cloudinit import util @@ -26,7 +25,7 @@ from textwrap import dedent distros = [ALL_DISTROS] -schema = { +meta = { 'id': 'cc_runcmd', 'name': 'Runcmd', 'title': 'Run arbitrary commands', @@ -58,6 +57,9 @@ schema = { - [ wget, "http://example.org", -O, /tmp/index.html ] """)], 'frequency': PER_INSTANCE, +} + +schema = { 'type': 'object', 'properties': { 'runcmd': { @@ -71,12 +73,11 @@ schema = { 'additionalItems': False, # Reject items of non-string non-list 'additionalProperties': False, 'minItems': 1, - 'required': [], } } } -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() def handle(name, cfg, cloud, log, _args): diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py index 20ed7d2f..21f30b57 100644 --- a/cloudinit/config/cc_snap.py +++ b/cloudinit/config/cc_snap.py @@ -8,8 +8,7 @@ import sys from textwrap import dedent from cloudinit import log as logging -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE from cloudinit.subp import prepend_base_command from cloudinit import subp @@ -21,7 +20,7 @@ frequency = PER_INSTANCE LOG = logging.getLogger(__name__) -schema = { +meta = { 'id': 'cc_snap', 'name': 'Snap', 'title': 'Install, configure and manage snapd and snap packages', @@ -103,6 +102,9 @@ schema = { signed_assertion_blob_here """)], 'frequency': PER_INSTANCE, +} + +schema = { 'type': 'object', 'properties': { 'snap': { @@ -139,13 +141,12 @@ schema = { } }, 'additionalProperties': False, # Reject keys not in schema - 'required': [], 'minProperties': 1 } } } -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() SNAP_CMD = "snap" ASSERTIONS_FILE = "/var/lib/cloud/instance/snapd.assertions" diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py index d61dc655..831a92a2 100644 --- a/cloudinit/config/cc_ubuntu_advantage.py +++ b/cloudinit/config/cc_ubuntu_advantage.py @@ -4,8 +4,7 @@ from textwrap import dedent -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE from cloudinit import subp @@ -16,7 +15,7 @@ UA_URL = 'https://ubuntu.com/advantage' distros = ['ubuntu'] -schema = { +meta = { 'id': 'cc_ubuntu_advantage', 'name': 'Ubuntu Advantage', 'title': 'Configure Ubuntu Advantage support services', @@ -61,6 +60,9 @@ schema = { - fips """)], 'frequency': PER_INSTANCE, +} + +schema = { 'type': 'object', 'properties': { 'ubuntu_advantage': { @@ -82,7 +84,7 @@ schema = { } } -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() LOG = logging.getLogger(__name__) diff --git a/cloudinit/config/cc_ubuntu_drivers.py b/cloudinit/config/cc_ubuntu_drivers.py index 2d1d2b32..7f617efe 100644 --- a/cloudinit/config/cc_ubuntu_drivers.py +++ b/cloudinit/config/cc_ubuntu_drivers.py @@ -5,8 +5,7 @@ import os from textwrap import dedent -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE from cloudinit import subp @@ -18,7 +17,7 @@ LOG = logging.getLogger(__name__) frequency = PER_INSTANCE distros = ['ubuntu'] -schema = { +meta = { 'id': 'cc_ubuntu_drivers', 'name': 'Ubuntu Drivers', 'title': 'Interact with third party drivers in Ubuntu.', @@ -32,6 +31,9 @@ schema = { license-accepted: true """)], 'frequency': frequency, +} + +schema = { 'type': 'object', 'properties': { 'drivers': { @@ -64,7 +66,7 @@ schema = { OLD_UBUNTU_DRIVERS_STDERR_NEEDLE = ( "ubuntu-drivers: error: argument : invalid choice: 'install'") -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() # Use a debconf template to configure a global debconf variable diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py index 41c75fa2..55f8c684 100644 --- a/cloudinit/config/cc_write_files.py +++ b/cloudinit/config/cc_write_files.py @@ -10,8 +10,7 @@ import base64 import os from textwrap import dedent -from cloudinit.config.schema import ( - get_schema_doc, validate_cloudconfig_schema) +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE from cloudinit import util @@ -38,7 +37,7 @@ supported_encoding_types = [ 'gz', 'gzip', 'gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64', 'b64', 'base64'] -schema = { +meta = { 'id': 'cc_write_files', 'name': 'Write Files', 'title': 'write arbitrary files', @@ -111,6 +110,9 @@ schema = { defer: true """)], 'frequency': frequency, +} + +schema = { 'type': 'object', 'properties': { 'write_files': { @@ -187,7 +189,7 @@ schema = { } } -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() def handle(name, cfg, _cloud, log, _args): diff --git a/cloudinit/config/cc_write_files_deferred.py b/cloudinit/config/cc_write_files_deferred.py index 0c75aa22..4fc8659c 100644 --- a/cloudinit/config/cc_write_files_deferred.py +++ b/cloudinit/config/cc_write_files_deferred.py @@ -4,34 +4,31 @@ """Defer writing certain files""" -from textwrap import dedent - from cloudinit.config.schema import validate_cloudconfig_schema from cloudinit import util from cloudinit.config.cc_write_files import ( schema as write_files_schema, write_files, DEFAULT_DEFER) +# meta is not used in this module, but it remains as code documentation +# +# id: cc_write_files_deferred' +# name: 'Write Deferred Files +# distros: ['all'], +# frequency: PER_INSTANCE, +# title: +# write certain files, whose creation as been deferred, during +# final stage +# description: +# This module is based on `'Write Files' `__, and +# will handle all files from the write_files list, that have been +# marked as deferred and thus are not being processed by the +# write-files module. +# +# *Please note that his module is not exposed to the user through +# its own dedicated top-level directive.* + +schema = write_files_schema -schema = util.mergemanydict([ - { - 'id': 'cc_write_files_deferred', - 'name': 'Write Deferred Files', - 'title': dedent("""\ - write certain files, whose creation as been deferred, during - final stage - """), - 'description': dedent("""\ - This module is based on `'Write Files' `__, and - will handle all files from the write_files list, that have been - marked as deferred and thus are not being processed by the - write-files module. - - *Please note that his module is not exposed to the user through - its own dedicated top-level directive.* - """) - }, - write_files_schema -]) # Not exposed, because related modules should document this behaviour __doc__ = None diff --git a/cloudinit/config/cc_zypper_add_repo.py b/cloudinit/config/cc_zypper_add_repo.py index 05855b0c..bf1638fb 100644 --- a/cloudinit/config/cc_zypper_add_repo.py +++ b/cloudinit/config/cc_zypper_add_repo.py @@ -9,14 +9,14 @@ import configobj import os from textwrap import dedent -from cloudinit.config.schema import get_schema_doc +from cloudinit.config.schema import get_meta_doc from cloudinit import log as logging from cloudinit.settings import PER_ALWAYS from cloudinit import util distros = ['opensuse', 'sles'] -schema = { +meta = { 'id': 'cc_zypper_add_repo', 'name': 'ZypperAddRepo', 'title': 'Configure zypper behavior and add zypper repositories', @@ -51,6 +51,9 @@ schema = { # any setting in /etc/zypp/zypp.conf """)], 'frequency': PER_ALWAYS, +} + +schema = { 'type': 'object', 'properties': { 'zypper': { @@ -86,14 +89,13 @@ schema = { /etc/zypp/zypp.conf'""") } }, - 'required': [], 'minProperties': 1, # Either config or repo must be provided 'additionalProperties': False, # only repos and config allowed } } } -__doc__ = get_schema_doc(schema) # Supplement python help() +__doc__ = get_meta_doc(meta, schema) # Supplement python help() LOG = logging.getLogger(__name__) diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index 456bab2c..d32b7c01 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -3,19 +3,22 @@ from cloudinit.cmd.devel import read_cfg_paths from cloudinit import importer -from cloudinit.util import find_modules, load_file +from cloudinit.importer import MetaSchema +from cloudinit.util import find_modules, load_file, error import argparse from collections import defaultdict from copy import deepcopy +from functools import partial import logging import os import re import sys import yaml +error = partial(error, sys_exit=True) + _YAML_MAP = {True: 'true', False: 'false', None: 'null'} -SCHEMA_UNDEFINED = b'UNDEFINED' CLOUD_CONFIG_HEADER = b'#cloud-config' SCHEMA_DOC_TMPL = """ {name} @@ -34,7 +37,7 @@ SCHEMA_DOC_TMPL = """ {property_doc} {examples} """ -SCHEMA_PROPERTY_TMPL = '{prefix}**{prop_name}:** ({type}) {description}' +SCHEMA_PROPERTY_TMPL = "{prefix}**{prop_name}:** ({prop_type}) {description}" SCHEMA_LIST_ITEM_TMPL = ( '{prefix}Each item in **{prop_name}** list supports the following keys:') SCHEMA_EXAMPLES_HEADER = '\n**Examples**::\n\n' @@ -72,45 +75,102 @@ def is_schema_byte_string(checker, instance): isinstance(instance, (bytes,))) -def validate_cloudconfig_schema(config, schema, strict=False): - """Validate provided config meets the schema definition. +def get_jsonschema_validator(): + """Get metaschema validator and format checker - @param config: Dict of cloud configuration settings validated against - schema. - @param schema: jsonschema dict describing the supported schema definition - for the cloud config module (config.cc_*). - @param strict: Boolean, when True raise SchemaValidationErrors instead of - logging warnings. + Older versions of jsonschema require some compatibility changes. - @raises: SchemaValidationError when provided config does not validate - against the provided schema. + @returns: Tuple: (jsonschema.Validator, FormatChecker) + @raises: ImportError when jsonschema is not present """ - try: - from jsonschema import Draft4Validator, FormatChecker - from jsonschema.validators import create, extend - except ImportError: - logging.debug( - 'Ignoring schema validation. python-jsonschema is not present') - return + from jsonschema import Draft4Validator, FormatChecker + from jsonschema.validators import create # Allow for bytes to be presented as an acceptable valid value for string # type jsonschema attributes in cloud-init's schema. # This allows #cloud-config to provide valid yaml "content: !!binary | ..." + + strict_metaschema = deepcopy(Draft4Validator.META_SCHEMA) + strict_metaschema['additionalProperties'] = False if hasattr(Draft4Validator, 'TYPE_CHECKER'): # jsonschema 3.0+ type_checker = Draft4Validator.TYPE_CHECKER.redefine( 'string', is_schema_byte_string) - cloudinitValidator = extend(Draft4Validator, type_checker=type_checker) + cloudinitValidator = create( + meta_schema=strict_metaschema, + validators=Draft4Validator.VALIDATORS, + version="draft4", + type_checker=type_checker) else: # jsonschema 2.6 workaround types = Draft4Validator.DEFAULT_TYPES - # Allow bytes as well as string (and disable a spurious - # unsupported-assignment-operation pylint warning which appears because - # this code path isn't written against the latest jsonschema). + # Allow bytes as well as string (and disable a spurious unsupported + # assignment-operation pylint warning which appears because this + # code path isn't written against the latest jsonschema). types['string'] = (str, bytes) # pylint: disable=E1137 cloudinitValidator = create( - meta_schema=Draft4Validator.META_SCHEMA, + meta_schema=strict_metaschema, validators=Draft4Validator.VALIDATORS, version="draft4", default_types=types) + return (cloudinitValidator, FormatChecker) + + +def validate_cloudconfig_metaschema(validator, schema: dict, throw=True): + """Validate provided schema meets the metaschema definition. Return strict + Validator and FormatChecker for use in validation + @param validator: Draft4Validator instance used to validate the schema + @param schema: schema to validate + @param throw: Sometimes the validator and checker are required, even if + the schema is invalid. Toggle for whether to raise + SchemaValidationError or log warnings. + + @raises: ImportError when jsonschema is not present + @raises: SchemaValidationError when the schema is invalid + """ + + from jsonschema.exceptions import SchemaError + + try: + validator.check_schema(schema) + except SchemaError as err: + # Raise SchemaValidationError to avoid jsonschema imports at call + # sites + if throw: + raise SchemaValidationError( + schema_errors=( + ('.'.join([str(p) for p in err.path]), err.message), + ) + ) from err + logging.warning( + "Meta-schema validation failed, attempting to validate config " + "anyway: %s", err) + + +def validate_cloudconfig_schema( + config: dict, schema: dict, strict=False, strict_metaschema=False +): + """Validate provided config meets the schema definition. + + @param config: Dict of cloud configuration settings validated against + schema. Ignored if strict_metaschema=True + @param schema: jsonschema dict describing the supported schema definition + for the cloud config module (config.cc_*). + @param strict: Boolean, when True raise SchemaValidationErrors instead of + logging warnings. + @param strict_metaschema: Boolean, when True validates schema using strict + metaschema definition at runtime (currently unused) + + @raises: SchemaValidationError when provided config does not validate + against the provided schema. + """ + try: + (cloudinitValidator, FormatChecker) = get_jsonschema_validator() + if strict_metaschema: + validate_cloudconfig_metaschema( + cloudinitValidator, schema, throw=False) + except ImportError: + logging.debug("Ignoring schema validation. jsonschema is not present") + return + validator = cloudinitValidator(schema, format_checker=FormatChecker()) errors = () for error in sorted(validator.iter_errors(config), key=lambda e: e.path): @@ -301,12 +361,15 @@ def _schemapath_for_cloudconfig(config, original_content): return schema_line_numbers -def _get_property_type(property_dict): - """Return a string representing a property type from a given jsonschema.""" - property_type = property_dict.get('type', SCHEMA_UNDEFINED) - if property_type == SCHEMA_UNDEFINED and property_dict.get('enum'): +def _get_property_type(property_dict: dict) -> str: + """Return a string representing a property type from a given + jsonschema. + """ + property_type = property_dict.get("type") + if property_type is None and property_dict.get("enum"): property_type = [ - str(_YAML_MAP.get(k, k)) for k in property_dict['enum']] + str(_YAML_MAP.get(k, k)) for k in property_dict["enum"] + ] if isinstance(property_type, list): property_type = '/'.join(property_type) items = property_dict.get('items', {}) @@ -317,12 +380,12 @@ def _get_property_type(property_dict): sub_property_type += '/' sub_property_type += '(' + _get_property_type(sub_item) + ')' if sub_property_type: - return '{0} of {1}'.format(property_type, sub_property_type) - return property_type + return "{0} of {1}".format(property_type, sub_property_type) + return property_type or "UNDEFINED" -def _parse_description(description, prefix): - """Parse description from the schema in a format that we can better +def _parse_description(description, prefix) -> str: + """Parse description from the meta in a format that we can better display in our docs. This parser does three things: - Guarantee that a paragraph will be in a single line @@ -330,7 +393,7 @@ def _parse_description(description, prefix): the first paragraph - Proper align lists of items - @param description: The original description in the schema. + @param description: The original description in the meta. @param prefix: The number of spaces used to align the current description """ list_paragraph = prefix * 3 @@ -343,20 +406,24 @@ def _parse_description(description, prefix): return description -def _get_property_doc(schema, prefix=' '): +def _get_property_doc(schema: dict, prefix=" ") -> str: """Return restructured text describing the supported schema properties.""" new_prefix = prefix + ' ' properties = [] for prop_key, prop_config in schema.get('properties', {}).items(): - # Define prop_name and dscription for SCHEMA_PROPERTY_TMPL + # Define prop_name and description for SCHEMA_PROPERTY_TMPL description = prop_config.get('description', '') - properties.append(SCHEMA_PROPERTY_TMPL.format( - prefix=prefix, - prop_name=prop_key, - type=_get_property_type(prop_config), - description=_parse_description(description, prefix))) - items = prop_config.get('items') + # Define prop_name and description for SCHEMA_PROPERTY_TMPL + properties.append( + SCHEMA_PROPERTY_TMPL.format( + prefix=prefix, + prop_name=prop_key, + description=_parse_description(description, prefix), + prop_type=_get_property_type(prop_config), + ) + ) + items = prop_config.get("items") if items: if isinstance(items, list): for item in items: @@ -373,9 +440,9 @@ def _get_property_doc(schema, prefix=' '): return '\n\n'.join(properties) -def _get_schema_examples(schema, prefix=''): - """Return restructured text describing the schema examples if present.""" - examples = schema.get('examples') +def _get_examples(meta: MetaSchema) -> str: + """Return restructured text describing the meta examples if present.""" + examples = meta.get("examples") if not examples: return '' rst_content = SCHEMA_EXAMPLES_HEADER @@ -390,48 +457,111 @@ def _get_schema_examples(schema, prefix=''): return rst_content -def get_schema_doc(schema): - """Return reStructured text rendering the provided jsonschema. +def get_meta_doc(meta: MetaSchema, schema: dict) -> str: + """Return reStructured text rendering the provided metadata. - @param schema: Dict of jsonschema to render. - @raise KeyError: If schema lacks an expected key. + @param meta: Dict of metadata to render. + @raise KeyError: If metadata lacks an expected key. """ - schema_copy = deepcopy(schema) - schema_copy['property_doc'] = _get_property_doc(schema) - schema_copy['examples'] = _get_schema_examples(schema) - schema_copy['distros'] = ', '.join(schema['distros']) + + if not meta or not schema: + raise ValueError("Expected meta and schema") + keys = set(meta.keys()) + expected = set( + { + "id", + "title", + "examples", + "frequency", + "distros", + "description", + "name", + } + ) + error_message = "" + if expected - keys: + error_message = "Missing expected keys in module meta: {}".format( + expected - keys + ) + elif keys - expected: + error_message = ( + "Additional unexpected keys found in module meta: {}".format( + keys - expected + ) + ) + if error_message: + raise KeyError(error_message) + + # cast away type annotation + meta_copy = dict(deepcopy(meta)) + meta_copy["property_doc"] = _get_property_doc(schema) + meta_copy["examples"] = _get_examples(meta) + meta_copy["distros"] = ", ".join(meta["distros"]) # Need an underbar of the same length as the name - schema_copy['title_underbar'] = re.sub(r'.', '-', schema['name']) - return SCHEMA_DOC_TMPL.format(**schema_copy) + meta_copy["title_underbar"] = re.sub(r".", "-", meta["name"]) + template = SCHEMA_DOC_TMPL.format(**meta_copy) + return template + + +def get_modules() -> dict: + configs_dir = os.path.dirname(os.path.abspath(__file__)) + return find_modules(configs_dir) + +def load_doc(requested_modules: list) -> str: + """Load module docstrings -FULL_SCHEMA = None + Docstrings are generated on module load. Reduce, reuse, recycle. + """ + docs = "" + all_modules = list(get_modules().values()) + ["all"] + invalid_docs = set(requested_modules).difference(set(all_modules)) + if invalid_docs: + error( + "Invalid --docs value {}. Must be one of: {}".format( + list(invalid_docs), ", ".join(all_modules), + ) + ) + for mod_name in all_modules: + if "all" in requested_modules or mod_name in requested_modules: + (mod_locs, _) = importer.find_module( + mod_name, ["cloudinit.config"], ["schema"] + ) + if mod_locs: + mod = importer.import_module(mod_locs[0]) + docs += mod.__doc__ or "" + return docs -def get_schema(): +def get_schema() -> dict: """Return jsonschema coalesced from all cc_* cloud-config module.""" - global FULL_SCHEMA - if FULL_SCHEMA: - return FULL_SCHEMA full_schema = { - '$schema': 'http://json-schema.org/draft-04/schema#', - 'id': 'cloud-config-schema', 'allOf': []} - - configs_dir = os.path.dirname(os.path.abspath(__file__)) - potential_handlers = find_modules(configs_dir) - for (_fname, mod_name) in potential_handlers.items(): - mod_locs, _looked_locs = importer.find_module( - mod_name, ['cloudinit.config'], ['schema']) + "$schema": "http://json-schema.org/draft-04/schema#", + "id": "cloud-config-schema", + "allOf": [], + } + + for (_, mod_name) in get_modules().items(): + (mod_locs, _) = importer.find_module( + mod_name, ["cloudinit.config"], ["schema"] + ) if mod_locs: mod = importer.import_module(mod_locs[0]) - full_schema['allOf'].append(mod.schema) - FULL_SCHEMA = full_schema + full_schema["allOf"].append(mod.schema) return full_schema -def error(message): - print(message, file=sys.stderr) - sys.exit(1) +def get_meta() -> dict: + """Return metadata coalesced from all cc_* cloud-config module.""" + full_meta = dict() + for (_, mod_name) in get_modules().items(): + mod_locs, _ = importer.find_module( + mod_name, ["cloudinit.config"], ["meta"] + ) + if mod_locs: + mod = importer.import_module(mod_locs[0]) + full_meta[mod.meta["id"]] = mod.meta + return full_meta def get_parser(parser=None): @@ -474,15 +604,7 @@ def handle_schema_args(name, args): cfg_name = args.config_file print("Valid cloud-config:", cfg_name) elif args.docs: - schema_ids = [subschema['id'] for subschema in full_schema['allOf']] - schema_ids += ['all'] - invalid_docs = set(args.docs).difference(set(schema_ids)) - if invalid_docs: - error('Invalid --docs value {0}. Must be one of: {1}'.format( - list(invalid_docs), ', '.join(schema_ids))) - for subschema in full_schema['allOf']: - if 'all' in args.docs or subschema['id'] in args.docs: - print(get_schema_doc(subschema)) + print(load_doc(args.docs)) def main(): diff --git a/cloudinit/importer.py b/cloudinit/importer.py index f1194fbe..4e677af3 100644 --- a/cloudinit/importer.py +++ b/cloudinit/importer.py @@ -9,6 +9,27 @@ # This file is part of cloud-init. See LICENSE file for license information. import sys +import typing + +# annotations add value for development, but don't break old versions +# pyver: 3.5 -> 3.8 +# pylint: disable=E1101 +if sys.version_info >= (3, 8) and hasattr(typing, "TypeDict"): + MetaSchema = typing.TypedDict( + "MetaSchema", + { + "name": str, + "id": str, + "title": str, + "description": str, + "distros": typing.List[str], + "examples": typing.List[str], + "frequency": str, + }, + ) +else: + MetaSchema = dict +# pylint: enable=E1101 def import_module(module_name): @@ -16,7 +37,8 @@ def import_module(module_name): return sys.modules[module_name] -def find_module(base_name, search_paths, required_attrs=None): +def find_module(base_name: str, search_paths, required_attrs=None) -> tuple: + """Finds and imports specified modules""" if not required_attrs: required_attrs = [] # NOTE(harlowja): translate the search paths to include the base name. diff --git a/cloudinit/util.py b/cloudinit/util.py index 2045a6ab..1b462a38 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -347,7 +347,7 @@ def extract_usergroup(ug_pair): return (u, g) -def find_modules(root_dir): +def find_modules(root_dir) -> dict: entries = dict() for fname in glob.glob(os.path.join(root_dir, "*.py")): if not os.path.isfile(fname): @@ -2751,4 +2751,19 @@ def get_proc_ppid(pid): ppid = int(parts[3]) return ppid + +def error(msg, rc=1, fmt='Error:\n{}', sys_exit=False): + """ + Print error to stderr and return or exit + + @param msg: message to print + @param rc: return code (default: 1) + @param fmt: format string for putting message in (default: 'Error:\n {}') + @param sys_exit: exit when called (default: false) + """ + print(fmt.format(msg), file=sys.stderr) + if sys_exit: + sys.exit(rc) + return rc + # vi: ts=4 expandtab diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py index 684822c2..4316b5d9 100644 --- a/doc/rtd/conf.py +++ b/doc/rtd/conf.py @@ -1,6 +1,8 @@ import os import sys +from cloudinit import version + # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. @@ -9,8 +11,6 @@ sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('./')) sys.path.insert(0, os.path.abspath('.')) -from cloudinit import version -from cloudinit.config.schema import get_schema_doc # Supress warnings for docs that aren't used yet # unused_docs = [ @@ -66,12 +66,3 @@ html_theme = 'sphinx_rtd_theme' # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = 'static/logo.png' - -def generate_docstring_from_schema(app, what, name, obj, options, lines): - """Override module docs from schema when present.""" - if what == 'module' and hasattr(obj, "schema"): - del lines[:] - lines.extend(get_schema_doc(obj.schema).split('\n')) - -def setup(app): - app.connect('autodoc-process-docstring', generate_docstring_from_schema) diff --git a/tests/unittests/cmd/test_clean.py b/tests/unittests/cmd/test_clean.py index 81fc930e..3bb0ee9b 100644 --- a/tests/unittests/cmd/test_clean.py +++ b/tests/unittests/cmd/test_clean.py @@ -137,7 +137,7 @@ class TestClean(CiTestCase): clean.remove_artifacts, remove_logs=False) self.assertEqual(1, retcode) self.assertEqual( - 'ERROR: Could not remove %s/dir1: oops\n' % self.artifact_dir, + 'Error:\nCould not remove %s/dir1: oops\n' % self.artifact_dir, m_stderr.getvalue()) def test_handle_clean_args_reboots(self): diff --git a/tests/unittests/cmd/test_cloud_id.py b/tests/unittests/cmd/test_cloud_id.py index 12fc80e8..9a010402 100644 --- a/tests/unittests/cmd/test_cloud_id.py +++ b/tests/unittests/cmd/test_cloud_id.py @@ -51,7 +51,7 @@ class TestCloudId(CiTestCase): cloud_id.main() self.assertEqual(1, context_manager.exception.code) self.assertIn( - "ERROR: File not found '%s'" % self.instance_data, + "Error:\nFile not found '%s'" % self.instance_data, m_stderr.getvalue()) def test_cloud_id_non_json_instance_data(self): @@ -64,7 +64,7 @@ class TestCloudId(CiTestCase): cloud_id.main() self.assertEqual(1, context_manager.exception.code) self.assertIn( - "ERROR: File '%s' is not valid json." % self.instance_data, + "Error:\nFile '%s' is not valid json." % self.instance_data, m_stderr.getvalue()) def test_cloud_id_from_cloud_name_in_instance_data(self): diff --git a/tests/unittests/config/test_schema.py b/tests/unittests/config/test_schema.py index b01f5eea..f90e0f62 100644 --- a/tests/unittests/config/test_schema.py +++ b/tests/unittests/config/test_schema.py @@ -1,13 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. -import cloudinit -from cloudinit.config.schema import ( - CLOUD_CONFIG_HEADER, SchemaValidationError, annotated_cloudconfig_file, - get_schema_doc, get_schema, validate_cloudconfig_file, - validate_cloudconfig_schema, main) -from cloudinit.util import write_file -from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema +import importlib +import sys +import inspect +import logging from copy import copy import itertools import pytest @@ -15,6 +12,63 @@ from pathlib import Path from textwrap import dedent from yaml import safe_load +import cloudinit +from cloudinit.config.schema import ( + CLOUD_CONFIG_HEADER, + SchemaValidationError, + annotated_cloudconfig_file, + get_meta_doc, + get_schema, + get_jsonschema_validator, + validate_cloudconfig_file, + validate_cloudconfig_metaschema, + validate_cloudconfig_schema, + main, + MetaSchema, +) +from cloudinit.util import write_file +from tests.unittests.helpers import CiTestCase, mock, skipUnlessJsonSchema + + +def get_schemas() -> dict: + """Return all module schemas + + Assumes that module schemas have the variable name "schema" + """ + return get_module_variable("schema") + + +def get_metas() -> dict: + """Return all module metas + + Assumes that module schemas have the variable name "schema" + """ + return get_module_variable("meta") + + +def get_module_variable(var_name) -> dict: + """Inspect modules and get variable from module matching var_name""" + schemas = {} + + files = list(Path("../../cloudinit/config/").glob("cc_*.py")) + modules = [mod.stem for mod in files] + + for module in modules: + importlib.import_module("cloudinit.config.{}".format(module)) + + for k, v in sys.modules.items(): + path = Path(k) + + if "cloudinit.config" == path.stem and path.suffix[1:4] == "cc_": + module_name = path.suffix[1:] + members = inspect.getmembers(v) + schemas[module_name] = None + for name, value in members: + if name == var_name: + schemas[module_name] = value + break + return schemas + class GetSchemaTest(CiTestCase): @@ -34,25 +88,17 @@ class GetSchemaTest(CiTestCase): 'cc_ubuntu_advantage', 'cc_ubuntu_drivers', 'cc_write_files', - 'cc_write_files_deferred', 'cc_zypper_add_repo', 'cc_chef', 'cc_install_hotplug', ], - [subschema['id'] for subschema in schema['allOf']]) - self.assertEqual('cloud-config-schema', schema['id']) + [meta["id"] for meta in get_metas().values() if meta is not None], + ) + self.assertEqual("cloud-config-schema", schema["id"]) self.assertEqual( - 'http://json-schema.org/draft-04/schema#', - schema['$schema']) - # FULL_SCHEMA is updated by the get_schema call - from cloudinit.config.schema import FULL_SCHEMA - self.assertCountEqual(['id', '$schema', 'allOf'], FULL_SCHEMA.keys()) - - def test_get_schema_returns_global_when_set(self): - """When FULL_SCHEMA global is already set, get_schema returns it.""" - m_schema_path = 'cloudinit.config.schema.FULL_SCHEMA' - with mock.patch(m_schema_path, {'here': 'iam'}): - self.assertEqual({'here': 'iam'}, get_schema()) + "http://json-schema.org/draft-04/schema#", schema["$schema"] + ) + self.assertCountEqual(["id", "$schema", "allOf"], get_schema().keys()) class SchemaValidationErrorTest(CiTestCase): @@ -93,8 +139,9 @@ class ValidateCloudConfigSchemaTest(CiTestCase): with mock.patch.dict('sys.modules', **{'jsonschema': ImportError()}): validate_cloudconfig_schema({'p1': -1}, schema, strict=True) self.assertIn( - 'Ignoring schema validation. python-jsonschema is not present', - self.logs.getvalue()) + "Ignoring schema validation. jsonschema is not present", + self.logs.getvalue(), + ) @skipUnlessJsonSchema() def test_validateconfig_schema_strict_raises_errors(self): @@ -117,14 +164,48 @@ class ValidateCloudConfigSchemaTest(CiTestCase): "Cloud config schema errors: p1: '-1' is not a 'email'", str(context_mgr.exception)) + @skipUnlessJsonSchema() + def test_validateconfig_schema_honors_formats_strict_metaschema(self): + """With strict True and strict_metascheam True, ensure errors on format + """ + schema = {"properties": {"p1": {"type": "string", "format": "email"}}} + with self.assertRaises(SchemaValidationError) as context_mgr: + validate_cloudconfig_schema( + {"p1": "-1"}, schema, strict=True, strict_metaschema=True + ) + self.assertEqual( + "Cloud config schema errors: p1: '-1' is not a 'email'", + str(context_mgr.exception), + ) + + @skipUnlessJsonSchema() + def test_validateconfig_strict_metaschema_do_not_raise_exception(self): + """With strict_metaschema=True, do not raise exceptions. + + This flag is currently unused, but is intended for run-time validation. + This should warn, but not raise. + """ + schema = {"properties": {"p1": {"types": "string", "format": "email"}}} + validate_cloudconfig_schema( + {"p1": "-1"}, schema, strict_metaschema=True + ) + assert ( + "Meta-schema validation failed, attempting to validate config" + in self.logs.getvalue() + ) + class TestCloudConfigExamples: - schema = get_schema() + schema = get_schemas() + metas = get_metas() params = [ - (schema["id"], example) - for schema in schema["allOf"] for example in schema["examples"]] + (meta["id"], example) + for meta in metas.values() + if meta and meta.get("examples") + for example in meta.get("examples") + ] - @pytest.mark.parametrize("schema_id,example", params) + @pytest.mark.parametrize("schema_id, example", params) @skipUnlessJsonSchema() def test_validateconfig_schema_of_example(self, schema_id, example): """ For a given example in a config module we test if it is valid @@ -201,22 +282,42 @@ class ValidateCloudConfigFileTest(CiTestCase): class GetSchemaDocTest(CiTestCase): - """Tests for get_schema_doc.""" + """Tests for get_meta_doc.""" def setUp(self): super(GetSchemaDocTest, self).setUp() self.required_schema = { - 'title': 'title', 'description': 'description', 'id': 'id', - 'name': 'name', 'frequency': 'frequency', - 'distros': ['debian', 'rhel']} + "title": "title", + "description": "description", + "id": "id", + "name": "name", + "frequency": "frequency", + "distros": ["debian", "rhel"], + } + self.meta = MetaSchema( + { + "title": "title", + "description": "description", + "id": "id", + "name": "name", + "frequency": "frequency", + "distros": ["debian", "rhel"], + "examples": [ + 'ex1:\n [don\'t, expand, "this"]', + "ex2: true", + ], + } + ) - def test_get_schema_doc_returns_restructured_text(self): - """get_schema_doc returns restructured text for a cloudinit schema.""" + def test_get_meta_doc_returns_restructured_text(self): + """get_meta_doc returns restructured text for a cloudinit schema.""" full_schema = copy(self.required_schema) full_schema.update( {'properties': { 'prop1': {'type': 'array', 'description': 'prop-description', 'items': {'type': 'integer'}}}}) + + doc = get_meta_doc(self.meta, full_schema) self.assertEqual( dedent(""" name @@ -232,47 +333,51 @@ class GetSchemaDocTest(CiTestCase): **Supported distros:** debian, rhel **Config schema**: - **prop1:** (array of integer) prop-description\n\n"""), - get_schema_doc(full_schema)) + **prop1:** (array of integer) prop-description - def test_get_schema_doc_handles_multiple_types(self): - """get_schema_doc delimits multiple property types with a '/'.""" - full_schema = copy(self.required_schema) - full_schema.update( - {'properties': { - 'prop1': {'type': ['string', 'integer'], - 'description': 'prop-description'}}}) + **Examples**:: + + ex1: + [don't, expand, "this"] + # --- Example2 --- + ex2: true + """), + doc, + ) + + def test_get_meta_doc_handles_multiple_types(self): + """get_meta_doc delimits multiple property types with a '/'.""" + schema = {"properties": {"prop1": {"type": ["string", "integer"]}}} self.assertIn( - '**prop1:** (string/integer) prop-description', - get_schema_doc(full_schema)) + "**prop1:** (string/integer)", get_meta_doc(self.meta, schema) + ) - def test_get_schema_doc_handles_enum_types(self): - """get_schema_doc converts enum types to yaml and delimits with '/'.""" - full_schema = copy(self.required_schema) - full_schema.update( - {'properties': { - 'prop1': {'enum': [True, False, 'stuff'], - 'description': 'prop-description'}}}) + def test_get_meta_doc_handles_enum_types(self): + """get_meta_doc converts enum types to yaml and delimits with '/'.""" + schema = {"properties": {"prop1": {"enum": [True, False, "stuff"]}}} self.assertIn( - '**prop1:** (true/false/stuff) prop-description', - get_schema_doc(full_schema)) + "**prop1:** (true/false/stuff)", get_meta_doc(self.meta, schema) + ) - def test_get_schema_doc_handles_nested_oneof_property_types(self): - """get_schema_doc describes array items oneOf declarations in type.""" - full_schema = copy(self.required_schema) - full_schema.update( - {'properties': { - 'prop1': {'type': 'array', - 'items': { - 'oneOf': [{'type': 'string'}, - {'type': 'integer'}]}, - 'description': 'prop-description'}}}) + def test_get_meta_doc_handles_nested_oneof_property_types(self): + """get_meta_doc describes array items oneOf declarations in type.""" + schema = { + "properties": { + "prop1": { + "type": "array", + "items": { + "oneOf": [{"type": "string"}, {"type": "integer"}] + }, + } + } + } self.assertIn( - '**prop1:** (array of (string)/(integer)) prop-description', - get_schema_doc(full_schema)) + "**prop1:** (array of (string)/(integer))", + get_meta_doc(self.meta, schema), + ) - def test_get_schema_doc_handles_string_examples(self): - """get_schema_doc properly indented examples as a list of strings.""" + def test_get_meta_doc_handles_string_examples(self): + """get_meta_doc properly indented examples as a list of strings.""" full_schema = copy(self.required_schema) full_schema.update( {'examples': ['ex1:\n [don\'t, expand, "this"]', 'ex2: true'], @@ -291,16 +396,17 @@ class GetSchemaDocTest(CiTestCase): # --- Example2 --- ex2: true """), - get_schema_doc(full_schema)) + get_meta_doc(self.meta, full_schema), + ) - def test_get_schema_doc_properly_parse_description(self): - """get_schema_doc description properly formatted""" - full_schema = copy(self.required_schema) - full_schema.update( - {'properties': { - 'p1': { - 'type': 'string', - 'description': dedent("""\ + def test_get_meta_doc_properly_parse_description(self): + """get_meta_doc description properly formatted""" + schema = { + "properties": { + "p1": { + "type": "string", + "description": dedent( + """\ This item has the following options: @@ -312,8 +418,8 @@ class GetSchemaDocTest(CiTestCase): The default value is option1""") } - }} - ) + } + } self.assertIn( dedent(""" @@ -325,16 +431,28 @@ class GetSchemaDocTest(CiTestCase): - option3 The default value is option1 + """), - get_schema_doc(full_schema)) + get_meta_doc(self.meta, schema), + ) - def test_get_schema_doc_raises_key_errors(self): - """get_schema_doc raises KeyErrors on missing keys.""" - for key in self.required_schema: - invalid_schema = copy(self.required_schema) - invalid_schema.pop(key) + def test_get_meta_doc_raises_key_errors(self): + """get_meta_doc raises KeyErrors on missing keys.""" + schema = { + "properties": { + "prop1": { + "type": "array", + "items": { + "oneOf": [{"type": "string"}, {"type": "integer"}] + }, + } + } + } + for key in self.meta: + invalid_meta = copy(self.meta) + invalid_meta.pop(key) with self.assertRaises(KeyError) as context_mgr: - get_schema_doc(invalid_schema) + get_meta_doc(invalid_meta, schema) self.assertIn(key, str(context_mgr.exception)) @@ -418,6 +536,7 @@ class TestMain: _out, err = capsys.readouterr() expected = ( + 'Error:\n' 'Expected one of --config-file, --system or --docs arguments\n' ) assert expected == err @@ -431,6 +550,7 @@ class TestMain: _out, err = capsys.readouterr() expected = ( + 'Error:\n' 'Expected one of --config-file, --system or --docs arguments\n' ) assert expected == err @@ -443,7 +563,7 @@ class TestMain: main() assert 1 == context_manager.value.code _out, err = capsys.readouterr() - assert 'Configfile NOT_A_FILE does not exist\n' == err + assert 'Error:\nConfigfile NOT_A_FILE does not exist\n' == err def test_main_prints_docs(self, capsys): """When --docs parameter is provided, main generates documentation.""" @@ -489,12 +609,13 @@ class TestMain: assert 1 == context_manager.value.code _out, err = capsys.readouterr() expected = ( - 'Unable to read system userdata as non-root user. Try using sudo\n' + 'Error:\nUnable to read system userdata as non-root user. ' + 'Try using sudo\n' ) assert expected == err -def _get_schema_doc_examples(): +def _get_meta_doc_examples(): examples_dir = Path( cloudinit.__file__).parent.parent / 'doc' / 'examples' assert examples_dir.is_dir() @@ -507,9 +628,49 @@ def _get_schema_doc_examples(): class TestSchemaDocExamples: schema = get_schema() - @pytest.mark.parametrize("example_path", _get_schema_doc_examples()) + @pytest.mark.parametrize("example_path", _get_meta_doc_examples()) @skipUnlessJsonSchema() def test_schema_doc_examples(self, example_path): validate_cloudconfig_file(str(example_path), self.schema) + +class TestStrictMetaschema: + """Validate that schemas follow a stricter metaschema definition than + the default. This disallows arbitrary key/value pairs. + """ + + @skipUnlessJsonSchema() + def test_modules(self): + """Validate all modules with a stricter metaschema""" + (validator, _) = get_jsonschema_validator() + for (name, value) in get_schemas().items(): + if value: + validate_cloudconfig_metaschema(validator, value) + else: + logging.warning("module %s has no schema definition", name) + + @skipUnlessJsonSchema() + def test_validate_bad_module(self): + """Throw exception by default, don't throw if throw=False + + item should be 'items' and is therefore interpreted as an additional + property which is invalid with a strict metaschema + """ + (validator, _) = get_jsonschema_validator() + schema = { + "type": "array", + "item": { + "type": "object", + }, + } + with pytest.raises( + SchemaValidationError, + match=(r"Additional properties are not allowed.*") + ): + + validate_cloudconfig_metaschema(validator, schema) + + validate_cloudconfig_metaschema(validator, schema, throw=False) + + # vi: ts=4 expandtab syntax=python diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index fd717f34..d0162673 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -1,6 +1,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import os +import contextlib import io from collections import namedtuple @@ -214,26 +215,106 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): self.assertEqual(1, exit_code) # Known whitebox output from schema subcommand self.assertEqual( + 'Error:\n' 'Expected one of --config-file, --system or --docs arguments\n', self.stderr.getvalue()) - def test_wb_devel_schema_subcommand_doc_content(self): - """Validate that doc content is sane from known examples.""" + def test_wb_devel_schema_subcommand_doc_all_spot_check(self): + """Validate that doc content has correct values from known examples. + + Ensure that schema doc is returned + """ + + # Note: patchStdoutAndStderr() is convenient for reducing boilerplate, + # but inspecting the code for debugging is not ideal + # contextlib.redirect_stdout() provides similar behavior as a context + # manager stdout = io.StringIO() - self.patchStdoutAndStderr(stdout=stdout) - self._call_main(['cloud-init', 'devel', 'schema', '--docs', 'all']) - expected_doc_sections = [ - '**Supported distros:** all', - ('**Supported distros:** almalinux, alpine, centos, cloudlinux, ' - 'debian, eurolinux, fedora, openEuler, opensuse, photon, rhel, ' - 'rocky, sles, ubuntu, virtuozzo'), - '**Config schema**:\n **resize_rootfs:** (true/false/noblock)', - '**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n' - ] + with contextlib.redirect_stdout(stdout): + self._call_main(["cloud-init", "devel", "schema", "--docs", "all"]) + expected_doc_sections = [ + "**Supported distros:** all", + ( + "**Supported distros:** almalinux, alpine, centos, " + "cloudlinux, debian, eurolinux, fedora, openEuler, " + "opensuse, photon, rhel, rocky, sles, ubuntu, virtuozzo" + ), + "**Config schema**:\n **resize_rootfs:** " + "(true/false/noblock)", + "**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n", + ] + stdout = stdout.getvalue() + for expected in expected_doc_sections: + self.assertIn(expected, stdout) + + def test_wb_devel_schema_subcommand_single_spot_check(self): + """Validate that doc content has correct values from known example. + + Validate 'all' arg + """ + + # Note: patchStdoutAndStderr() is convenient for reducing boilerplate, + # but inspecting the code for debugging is not ideal + # contextlib.redirect_stdout() provides similar behavior as a context + # manager + stdout = io.StringIO() + with contextlib.redirect_stdout(stdout): + self._call_main( + ["cloud-init", "devel", "schema", "--docs", "cc_runcmd"] + ) + expected_doc_sections = [ + "Runcmd\n------\n**Summary:** Run arbitrary commands" + ] stdout = stdout.getvalue() for expected in expected_doc_sections: self.assertIn(expected, stdout) + def test_wb_devel_schema_subcommand_multiple_spot_check(self): + """Validate that doc content has correct values from known example. + + Validate single arg + """ + + stdout = io.StringIO() + with contextlib.redirect_stdout(stdout): + self._call_main( + [ + "cloud-init", + "devel", + "schema", + "--docs", + "cc_runcmd", + "cc_resizefs", + ] + ) + expected_doc_sections = [ + "Runcmd\n------\n**Summary:** Run arbitrary commands", + "Resizefs\n--------\n**Summary:** Resize filesystem", + ] + stdout = stdout.getvalue() + for expected in expected_doc_sections: + self.assertIn(expected, stdout) + + def test_wb_devel_schema_subcommand_bad_arg_fails(self): + """Validate that doc content has correct values from known example. + + Validate multiple args + """ + + # Note: patchStdoutAndStderr() is convenient for reducing boilerplate, + # but inspecting the code for debugging is not ideal + # contextlib.redirect_stdout() provides similar behavior as a context + # manager + stderr = io.StringIO() + with contextlib.redirect_stderr(stderr): + self._call_main( + ["cloud-init", "devel", "schema", "--docs", "garbage_value"] + ) + expected_doc_sections = ["Invalid --docs value"] + stderr = stderr.getvalue() + for expected in expected_doc_sections: + self.assertIn(expected, stderr) + @mock.patch('cloudinit.cmd.main.main_single') def test_single_subcommand(self, m_main_single): """The subcommand 'single' calls main_single with valid args.""" -- cgit v1.2.3 From 0ed00ad9cdebc2d4dabd8bd6d7c901584963def5 Mon Sep 17 00:00:00 2001 From: Haruki TSURUMOTO Date: Tue, 7 Dec 2021 23:23:45 +0900 Subject: Add miraclelinux support (#1128) --- README.md | 2 +- cloudinit/config/cc_ntp.py | 4 ++-- cloudinit/config/cc_yum_add_repo.py | 2 +- cloudinit/distros/__init__.py | 3 ++- cloudinit/distros/miraclelinux.py | 8 ++++++++ cloudinit/net/sysconfig.py | 3 ++- cloudinit/util.py | 4 ++-- config/cloud.cfg.tmpl | 6 +++--- systemd/cloud-init-generator.tmpl | 2 +- systemd/cloud-init.service.tmpl | 2 +- tests/unittests/test_cli.py | 5 +++-- tests/unittests/test_util.py | 38 +++++++++++++++++++++++++++++++++++++ tools/.github-cla-signers | 1 + tools/read-dependencies | 4 ++++ tools/render-cloudcfg | 2 +- 15 files changed, 70 insertions(+), 16 deletions(-) create mode 100644 cloudinit/distros/miraclelinux.py (limited to 'cloudinit') diff --git a/README.md b/README.md index 27098b11..f2a745f8 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ get in contact with that distribution and send them our way! | Supported OSes | Supported Public Clouds | Supported Private Clouds | | --- | --- | --- | -| Alpine Linux
ArchLinux
Debian
DragonFlyBSD
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
openEuler
RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo/EuroLinux/CloudLinux
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
DigitalOcean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)
VMware















| +| Alpine Linux
ArchLinux
Debian
DragonFlyBSD
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
openEuler
RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo/EuroLinux/CloudLinux/MIRACLE LINUX
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
DigitalOcean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)
VMware















| ## To start developing cloud-init diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index 9c085a04..c55d5d86 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -25,8 +25,8 @@ frequency = PER_INSTANCE NTP_CONF = '/etc/ntp.conf' NR_POOL_SERVERS = 4 distros = ['almalinux', 'alpine', 'centos', 'cloudlinux', 'debian', - 'eurolinux', 'fedora', 'openEuler', 'opensuse', 'photon', - 'rhel', 'rocky', 'sles', 'ubuntu', 'virtuozzo'] + 'eurolinux', 'fedora', 'miraclelinux', 'openEuler', 'opensuse', + 'photon', 'rhel', 'rocky', 'sles', 'ubuntu', 'virtuozzo'] NTP_CLIENT_CONFIG = { 'chrony': { diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index d66d3ae4..046a2852 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -19,7 +19,7 @@ entry, the config entry will be skipped. **Module frequency:** always **Supported distros:** almalinux, centos, cloudlinux, eurolinux, fedora, - openEuler, photon, rhel, rocky, virtuozzo + miraclelinux, openEuler, photon, rhel, rocky, virtuozzo **Config keys**:: diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index fe44f20e..742804ea 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -50,7 +50,8 @@ OSFAMILIES = { 'freebsd': ['freebsd'], 'gentoo': ['gentoo'], 'redhat': ['almalinux', 'amazon', 'centos', 'cloudlinux', 'eurolinux', - 'fedora', 'openEuler', 'photon', 'rhel', 'rocky', 'virtuozzo'], + 'fedora', 'miraclelinux', 'openEuler', 'photon', 'rhel', + 'rocky', 'virtuozzo'], 'suse': ['opensuse', 'sles'], } diff --git a/cloudinit/distros/miraclelinux.py b/cloudinit/distros/miraclelinux.py new file mode 100644 index 00000000..c7753387 --- /dev/null +++ b/cloudinit/distros/miraclelinux.py @@ -0,0 +1,8 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.distros import rhel + + +class Distro(rhel.Distro): + pass +# vi: ts=4 expandtab diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index ef4543b4..85342219 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -19,7 +19,8 @@ from .network_state import ( LOG = logging.getLogger(__name__) KNOWN_DISTROS = ['almalinux', 'centos', 'cloudlinux', 'eurolinux', 'fedora', - 'openEuler', 'rhel', 'rocky', 'suse', 'virtuozzo'] + 'miraclelinux', 'openEuler', 'rhel', 'rocky', 'suse', + 'virtuozzo'] NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf" diff --git a/cloudinit/util.py b/cloudinit/util.py index 1b462a38..cad087a1 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -540,8 +540,8 @@ def _get_variant(info): linux_dist = info['dist'][0].lower() if linux_dist in ( 'almalinux', 'alpine', 'arch', 'centos', 'cloudlinux', - 'debian', 'eurolinux', 'fedora', 'openeuler', 'photon', - 'rhel', 'rocky', 'suse', 'virtuozzo'): + 'debian', 'eurolinux', 'fedora', 'miraclelinux', 'openeuler', + 'photon', 'rhel', 'rocky', 'suse', 'virtuozzo'): variant = linux_dist elif linux_dist in ('ubuntu', 'linuxmint', 'mint'): variant = 'ubuntu' diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index b66bbe60..741b23d5 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -33,7 +33,7 @@ disable_root: true {% endif %} {% if variant in ["almalinux", "alpine", "amazon", "centos", "cloudlinux", "eurolinux", - "fedora", "openEuler", "rhel", "rocky", "virtuozzo"] %} + "fedora", "miraclelinux", "openEuler", "rhel", "rocky", "virtuozzo"] %} mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2'] {% if variant == "amazon" %} resize_rootfs: noblock @@ -176,7 +176,7 @@ cloud_final_modules: system_info: # This will affect which distro class gets used {% if variant in ["almalinux", "alpine", "amazon", "arch", "centos", "cloudlinux", "debian", - "eurolinux", "fedora", "freebsd", "netbsd", "openbsd", "openEuler", + "eurolinux", "fedora", "freebsd", "netbsd", "miraclelinux", "openbsd", "openEuler", "photon", "rhel", "rocky", "suse", "ubuntu", "virtuozzo"] %} distro: {{ variant }} {% elif variant in ["dragonfly"] %} @@ -231,7 +231,7 @@ system_info: security: http://ports.ubuntu.com/ubuntu-ports ssh_svcname: ssh {% elif variant in ["almalinux", "alpine", "amazon", "arch", "centos", "cloudlinux", "eurolinux", - "fedora", "openEuler", "rhel", "rocky", "suse", "virtuozzo"] %} + "fedora", "miraclelinux", "openEuler", "rhel", "rocky", "suse", "virtuozzo"] %} # Default user name + that default users groups (if added/used) default_user: {% if variant == "amazon" %} diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl index 7d1e7256..74d47428 100644 --- a/systemd/cloud-init-generator.tmpl +++ b/systemd/cloud-init-generator.tmpl @@ -84,7 +84,7 @@ default() { check_for_datasource() { local ds_rc="" {% if variant in ["almalinux", "centos", "cloudlinux", "eurolinux", "fedora", - "openEuler", "rhel", "rocky", "virtuozzo"] %} + "miraclelinux", "openEuler", "rhel", "rocky", "virtuozzo"] %} local dsidentify="/usr/libexec/cloud-init/ds-identify" {% else %} local dsidentify="/usr/lib/cloud-init/ds-identify" diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl index de3f3d91..e71e5679 100644 --- a/systemd/cloud-init.service.tmpl +++ b/systemd/cloud-init.service.tmpl @@ -13,7 +13,7 @@ After=systemd-networkd-wait-online.service After=networking.service {% endif %} {% if variant in ["almalinux", "centos", "cloudlinux", "eurolinux", "fedora", - "openEuler", "rhel", "rocky", "virtuozzo"] %} + "miraclelinux", "openEuler", "rhel", "rocky", "virtuozzo"] %} After=network.service After=NetworkManager.service {% endif %} diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index d0162673..e30e89a7 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -236,8 +236,9 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): "**Supported distros:** all", ( "**Supported distros:** almalinux, alpine, centos, " - "cloudlinux, debian, eurolinux, fedora, openEuler, " - "opensuse, photon, rhel, rocky, sles, ubuntu, virtuozzo" + "cloudlinux, debian, eurolinux, fedora, miraclelinux, " + "openEuler, opensuse, photon, rhel, rocky, sles, ubuntu, " + "virtuozzo" ), "**Config schema**:\n **resize_rootfs:** " "(true/false/noblock)", diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 1290cbc6..3b76ead8 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -185,6 +185,25 @@ OS_RELEASE_EUROLINUX_8 = dedent( """ ) +OS_RELEASE_MIRACLELINUX_8 = dedent( + """\ + NAME="MIRACLE LINUX" + VERSION="8.4 (Peony)" + ID="miraclelinux" + ID_LIKE="rhel fedora" + PLATFORM_ID="platform:el8" + VERSION_ID="8" + PRETTY_NAME="MIRACLE LINUX 8.4 (Peony)" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:cybertrust_japan:miracle_linux:8" + HOME_URL="https://www.cybertrust.co.jp/miracle-linux/" + DOCUMENTATION_URL="https://www.miraclelinux.com/support/miraclelinux8" + BUG_REPORT_URL="https://bugzilla.asianux.com/" + MIRACLELINUX_SUPPORT_PRODUCT="MIRACLE LINUX" + MIRACLELINUX_SUPPORT_PRODUCT_VERSION="8" +""" +) + OS_RELEASE_ROCKY_8 = dedent( """\ NAME="Rocky Linux" @@ -255,6 +274,7 @@ REDHAT_RELEASE_REDHAT_7 = "Red Hat Enterprise Linux Server release 7.5 (Maipo)" REDHAT_RELEASE_ALMALINUX_8 = "AlmaLinux release 8.3 (Purple Manul)" REDHAT_RELEASE_EUROLINUX_7 = "EuroLinux release 7.9 (Minsk)" REDHAT_RELEASE_EUROLINUX_8 = "EuroLinux release 8.4 (Vaduz)" +REDHAT_RELEASE_MIRACLELINUX_8 = "MIRACLE LINUX release 8.4 (Peony)" REDHAT_RELEASE_ROCKY_8 = "Rocky Linux release 8.3 (Green Obsidian)" REDHAT_RELEASE_VIRTUOZZO_8 = "Virtuozzo Linux release 8" REDHAT_RELEASE_CLOUDLINUX_8 = "CloudLinux release 8.4 (Valery Rozhdestvensky)" @@ -754,6 +774,24 @@ class TestGetLinuxDistro(CiTestCase): dist = util.get_linux_distro() self.assertEqual(('eurolinux', '8.4', 'Vaduz'), dist) + @mock.patch('cloudinit.util.load_file') + def test_get_linux_miraclelinux8_rhrelease(self, m_os_release, + m_path_exists): + """Verify miraclelinux 8 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_MIRACLELINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('miracle', '8.4', 'Peony'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_miraclelinux8_osrelease(self, m_os_release, + m_path_exists): + """Verify miraclelinux 8 read from os-release.""" + m_os_release.return_value = OS_RELEASE_MIRACLELINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('miraclelinux', '8', 'Peony'), dist) + @mock.patch('cloudinit.util.load_file') def test_get_linux_rocky8_rhrelease(self, m_os_release, m_path_exists): """Verify rocky linux 8 read from redhat-release.""" diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index 492ed15e..a2da8a62 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -73,6 +73,7 @@ timothegenzmer tnt-dev tomponline tsanghan +tSU-RooT vteratipally Vultaire WebSpider diff --git a/tools/read-dependencies b/tools/read-dependencies index 810154e4..efa5879c 100755 --- a/tools/read-dependencies +++ b/tools/read-dependencies @@ -24,6 +24,7 @@ DEFAULT_REQUIREMENTS = 'requirements.txt' DISTRO_PKG_TYPE_MAP = { 'centos': 'redhat', 'eurolinux': 'redhat', + 'miraclelinux': 'redhat', 'rocky': 'redhat', 'redhat': 'redhat', 'debian': 'debian', @@ -70,12 +71,14 @@ DRY_DISTRO_INSTALL_PKG_CMD = { 'rocky': ['yum', 'install', '--assumeyes'], 'centos': ['yum', 'install', '--assumeyes'], 'eurolinux': ['yum', 'install', '--assumeyes'], + 'miraclelinux': ['yum', 'install', '--assumeyes'], 'redhat': ['yum', 'install', '--assumeyes'], } DISTRO_INSTALL_PKG_CMD = { 'rocky': MAYBE_RELIABLE_YUM_INSTALL, 'eurolinux': MAYBE_RELIABLE_YUM_INSTALL, + 'miraclelinux': MAYBE_RELIABLE_YUM_INSTALL, 'centos': MAYBE_RELIABLE_YUM_INSTALL, 'redhat': MAYBE_RELIABLE_YUM_INSTALL, 'debian': ['apt', 'install', '-y'], @@ -89,6 +92,7 @@ DISTRO_INSTALL_PKG_CMD = { CI_SYSTEM_BASE_PKGS = { 'common': ['make', 'sudo', 'tar'], 'eurolinux': ['python3-tox'], + 'miraclelinux': ['python3-tox'], 'redhat': ['python3-tox'], 'centos': ['python3-tox'], 'ubuntu': ['devscripts', 'python3-dev', 'libssl-dev', 'tox', 'sbuild'], diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg index 186d61b7..6642bd58 100755 --- a/tools/render-cloudcfg +++ b/tools/render-cloudcfg @@ -5,7 +5,7 @@ import os import sys VARIANTS = ["almalinux", "alpine", "amazon", "arch", "centos", "cloudlinux", "debian", - "eurolinux", "fedora", "freebsd", "netbsd", "openbsd", "openEuler", "photon", + "eurolinux", "fedora", "freebsd", "miraclelinux", "netbsd", "openbsd", "openEuler", "photon", "rhel", "suse","rocky", "ubuntu", "unknown", "virtuozzo"] -- cgit v1.2.3 From b591e9dba6c85f3934bc309032c3e436b8dcb3ac Mon Sep 17 00:00:00 2001 From: Ksenija Stanojevic Date: Thu, 9 Dec 2021 14:45:37 -0800 Subject: Improve error log message when mount failed (#1140) --- cloudinit/util.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/util.py b/cloudinit/util.py index cad087a1..d7208f11 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1752,8 +1752,10 @@ def mount_cb(device, callback, data=None, mtype=None, mountpoint = tmpd break except (IOError, OSError) as exc: - LOG.debug("Failed mount of '%s' as '%s': %s", - device, mtype, exc) + LOG.debug("Failed to mount device: '%s' with type: '%s' " + "using mount command: '%s', " + "which caused exception: %s", + device, mtype, ' '.join(mountcmd), exc) failure_reason = exc if not mountpoint: raise MountFailedError("Failed mounting %s to %s due to: %s" % -- cgit v1.2.3 From 24739592217e5ba91e09e8c28b852d31a2c0cc77 Mon Sep 17 00:00:00 2001 From: Gonéri Le Bouder Date: Thu, 9 Dec 2021 17:46:27 -0500 Subject: find_devs/openbsd: accept ISO on disk (#1132) When the metadata is an ISO image and is exposed through a disk, the device is called `/dev/sd?a` internally. For instance `/dev/sd1a`. It can then be mounted with `mount_cd9660 /dev/sd1a /mnt`. Metadata in the FAT32 format are exposed as `/dev/sd?i`. With this change, we try to mount `/dev/sd?a` in addition to `/dev/sd?i`. Closes: https://github.com/ContainerCraft/kmi/issues/12 --- cloudinit/util.py | 5 +++-- tests/unittests/test_util.py | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/util.py b/cloudinit/util.py index d7208f11..b9c584d1 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1217,8 +1217,9 @@ def find_devs_with_openbsd(criteria=None, oformat='device', continue if entry == 'fd0:': continue - part_id = 'a' if entry.startswith('cd') else 'i' - devlist.append(entry[:-1] + part_id) + devlist.append(entry[:-1] + 'a') + if not entry.startswith('cd'): + devlist.append(entry[:-1] + 'i') if criteria == "TYPE=iso9660": devlist = [i for i in devlist if i.startswith('cd')] elif criteria in ["LABEL=CONFIG-2", "TYPE=vfat"]: diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index eab374bc..c551835f 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -2330,7 +2330,7 @@ class TestFindDevs: def test_find_devs_with_openbsd(self, m_subp): m_subp.return_value = ('cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', '') devlist = util.find_devs_with_openbsd() - assert devlist == ['/dev/cd0a', '/dev/sd1i'] + assert devlist == ['/dev/cd0a', '/dev/sd1a', '/dev/sd1i'] @mock.patch('cloudinit.subp.subp') def test_find_devs_with_openbsd_with_criteria(self, m_subp): @@ -2340,7 +2340,7 @@ class TestFindDevs: # lp: #1841466 devlist = util.find_devs_with_openbsd(criteria="LABEL_FATBOOT=A_LABEL") - assert devlist == ['/dev/cd0a', '/dev/sd1i'] + assert devlist == ['/dev/cd0a', '/dev/sd1a', '/dev/sd1i'] @pytest.mark.parametrize( 'criteria,expected_devlist', -- cgit v1.2.3 From e9634266ea52bf184727fb0782d5dc35f9ed1468 Mon Sep 17 00:00:00 2001 From: Chris Patterson Date: Fri, 10 Dec 2021 12:16:16 -0500 Subject: sources/azure: remove unnecessary hostname bounce (#1143) Thanks to [1], the hostname is set prior to network bring-up. The Azure data source has been bouncing the hostname during setup(), occurring after the hostname has already been properly configured. Note that this doesn't prevent leaking the image's hostname during Azure's _get_data() when it brings up ephemeral DHCP. However, as are not guaranteed to have the hostname metadata available from a truly "local" source, this behavior is to be expected unless we disable `send host-name` from dhclient config. [1]: https://github.com/canonical/cloud-init/commit/133ad2cb327ad17b7b81319fac8f9f14577c04df Signed-off-by: Chris Patterson --- cloudinit/sources/DataSourceAzure.py | 126 -------------- doc/examples/cloud-config-datasources.txt | 6 - doc/rtd/topics/datasources/azure.rst | 20 --- tests/unittests/sources/test_azure.py | 263 ------------------------------ 4 files changed, 415 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 6c1bc085..eee98fa8 100755 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -6,7 +6,6 @@ import base64 from collections import namedtuple -import contextlib import crypt from functools import partial import os @@ -52,20 +51,10 @@ LOG = logging.getLogger(__name__) DS_NAME = 'Azure' DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"} -BOUNCE_COMMAND_IFUP = [ - 'sh', '-xc', - "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x" -] -BOUNCE_COMMAND_FREEBSD = [ - 'sh', '-xc', - ("i=$interface; x=0; ifconfig down $i || x=$?; " - "ifconfig up $i || x=$?; exit $x") -] # azure systems will always have a resource disk, and 66-azure-ephemeral.rules # ensures that it gets linked to this path. RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource' -DEFAULT_PRIMARY_NIC = 'eth0' LEASE_FILE = '/var/lib/dhcp/dhclient.eth0.leases' DEFAULT_FS = 'ext4' # DMI chassis-asset-tag is set static for all azure instances @@ -247,7 +236,6 @@ def get_resource_disk_on_freebsd(port_id): # update the FreeBSD specific information if util.is_FreeBSD(): - DEFAULT_PRIMARY_NIC = 'hn0' LEASE_FILE = '/var/db/dhclient.leases.hn0' DEFAULT_FS = 'freebsd-ufs' res_disk = get_resource_disk_on_freebsd(1) @@ -261,13 +249,6 @@ if util.is_FreeBSD(): BUILTIN_DS_CONFIG = { 'data_dir': AGENT_SEED_DIR, - 'set_hostname': True, - 'hostname_bounce': { - 'interface': DEFAULT_PRIMARY_NIC, - 'policy': True, - 'command': 'builtin', - 'hostname_command': 'hostname', - }, 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH}, 'dhclient_lease_file': LEASE_FILE, 'apply_network_config': True, # Use IMDS published network configuration @@ -293,46 +274,6 @@ DEF_EPHEMERAL_LABEL = 'Temporary Storage' DEF_PASSWD_REDACTION = 'REDACTED' -def get_hostname(hostname_command='hostname'): - if not isinstance(hostname_command, (list, tuple)): - hostname_command = (hostname_command,) - return subp.subp(hostname_command, capture=True)[0].strip() - - -def set_hostname(hostname, hostname_command='hostname'): - subp.subp([hostname_command, hostname]) - - -@azure_ds_telemetry_reporter -@contextlib.contextmanager -def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): - """ - Set a temporary hostname, restoring the previous hostname on exit. - - Will have the value of the previous hostname when used as a context - manager, or None if the hostname was not changed. - """ - policy = cfg['hostname_bounce']['policy'] - previous_hostname = get_hostname(hostname_command) - if (not util.is_true(cfg.get('set_hostname')) or - util.is_false(policy) or - (previous_hostname == temp_hostname and policy != 'force')): - yield None - return - try: - set_hostname(temp_hostname, hostname_command) - except Exception as e: - report_diagnostic_event( - 'Failed setting temporary hostname: %s' % e, - logger_func=LOG.warning) - yield None - return - try: - yield previous_hostname - finally: - set_hostname(previous_hostname, hostname_command) - - class DataSourceAzure(sources.DataSource): dsname = 'Azure' @@ -369,34 +310,6 @@ class DataSourceAzure(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s]" % (root, self.seed) - @azure_ds_telemetry_reporter - def bounce_network_with_azure_hostname(self): - # When using cloud-init to provision, we have to set the hostname from - # the metadata and "bounce" the network to force DDNS to update via - # dhclient - azure_hostname = self.metadata.get('local-hostname') - LOG.debug("Hostname in metadata is %s", azure_hostname) - hostname_command = self.ds_cfg['hostname_bounce']['hostname_command'] - - with temporary_hostname(azure_hostname, self.ds_cfg, - hostname_command=hostname_command) \ - as previous_hn: - if (previous_hn is not None and - util.is_true(self.ds_cfg.get('set_hostname'))): - cfg = self.ds_cfg['hostname_bounce'] - - # "Bouncing" the network - try: - return perform_hostname_bounce(hostname=azure_hostname, - cfg=cfg, - prev_hostname=previous_hn) - except Exception as e: - report_diagnostic_event( - "Failed publishing hostname: %s" % e, - logger_func=LOG.warning) - util.logexc(LOG, "handling set_hostname failed") - return False - def _get_subplatform(self): """Return the subplatform metadata source details.""" if self.seed.startswith('/dev'): @@ -1502,9 +1415,6 @@ class DataSourceAzure(sources.DataSource): On success, returns a dictionary including 'public_keys'. On failure, returns False. """ - - self.bounce_network_with_azure_hostname() - pubkey_info = None ssh_keys_and_source = self._get_public_ssh_keys_and_source() @@ -1763,42 +1673,6 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, return -@azure_ds_telemetry_reporter -def perform_hostname_bounce(hostname, cfg, prev_hostname): - # set the hostname to 'hostname' if it is not already set to that. - # then, if policy is not off, bounce the interface using command - # Returns True if the network was bounced, False otherwise. - command = cfg['command'] - interface = cfg['interface'] - policy = cfg['policy'] - - msg = ("hostname=%s policy=%s interface=%s" % - (hostname, policy, interface)) - env = os.environ.copy() - env['interface'] = interface - env['hostname'] = hostname - env['old_hostname'] = prev_hostname - - if command == "builtin": - if util.is_FreeBSD(): - command = BOUNCE_COMMAND_FREEBSD - elif subp.which('ifup'): - command = BOUNCE_COMMAND_IFUP - else: - LOG.debug( - "Skipping network bounce: ifupdown utils aren't present.") - # Don't bounce as networkd handles hostname DDNS updates - return False - LOG.debug("pubhname: publishing hostname [%s]", msg) - shell = not isinstance(command, (list, tuple)) - # capture=False, see comments in bug 1202758 and bug 1206164. - util.log_time(logfunc=LOG.debug, msg="publishing hostname", - get_uptime=True, func=subp.subp, - kwargs={'args': command, 'shell': shell, 'capture': False, - 'env': env}) - return True - - @azure_ds_telemetry_reporter def write_files(datadir, files, dirmode=None): diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt index d1a4d79e..7a8c4284 100644 --- a/doc/examples/cloud-config-datasources.txt +++ b/doc/examples/cloud-config-datasources.txt @@ -45,12 +45,6 @@ datasource: instance-id: i-87018aed local-hostname: myhost.internal - Azure: - set_hostname: True - hostname_bounce: - interface: eth0 - policy: on # [can be 'on', 'off' or 'force'] - SmartOS: # For KVM guests: # Smart OS datasource works over a serial console interacting with diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst index ad9f2236..bc672486 100644 --- a/doc/rtd/topics/datasources/azure.rst +++ b/doc/rtd/topics/datasources/azure.rst @@ -60,20 +60,6 @@ The settings that may be configured are: custom DHCP option 245 from Azure fabric. * **disk_aliases**: A dictionary defining which device paths should be interpreted as ephemeral images. See cc_disk_setup module for more info. - * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to - metadata changes. The '``hostname_bounce: command``' entry can be either - the literal string 'builtin' or a command to execute. The command will be - invoked after the hostname is set, and will have the 'interface' in its - environment. If ``set_hostname`` is not true, then ``hostname_bounce`` - will be ignored. An example might be: - - ``command: ["sh", "-c", "killall dhclient; dhclient $interface"]`` - - * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to - metadata changes. Azure will throttle ifup/down in some cases after metadata - has been updated to inform dhcp server about updated hostnames. - * **set_hostname**: Boolean set to True when we want Azure to set the hostname - based on metadata. Configuration for the datasource can also be read from a ``dscfg`` entry in the ``LinuxProvisioningConfigurationSet``. Content in @@ -91,12 +77,6 @@ An example configuration with the default values is provided below: dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases disk_aliases: ephemeral0: /dev/disk/cloud/azure_resource - hostname_bounce: - interface: eth0 - command: builtin - policy: true - hostname_command: hostname - set_hostname: true Userdata diff --git a/tests/unittests/sources/test_azure.py b/tests/unittests/sources/test_azure.py index 9728a1e7..ad8be04b 100644 --- a/tests/unittests/sources/test_azure.py +++ b/tests/unittests/sources/test_azure.py @@ -696,9 +696,6 @@ scbus-1 on xpt0 bus 0 self.apply_patches([ (dsaz, 'list_possible_azure_ds', self.m_list_possible_azure_ds), - (dsaz, 'perform_hostname_bounce', mock.MagicMock()), - (dsaz, 'get_hostname', mock.MagicMock()), - (dsaz, 'set_hostname', mock.MagicMock()), (dsaz, '_is_platform_viable', self.m_is_platform_viable), (dsaz, 'get_metadata_from_fabric', @@ -1794,21 +1791,6 @@ scbus-1 on xpt0 bus 0 m_net_get_interfaces.assert_called_with( blacklist_drivers=dsaz.BLACKLIST_DRIVERS) - @mock.patch(MOCKPATH + 'subp.subp', autospec=True) - def test_get_hostname_with_no_args(self, m_subp): - dsaz.get_hostname() - m_subp.assert_called_once_with(("hostname",), capture=True) - - @mock.patch(MOCKPATH + 'subp.subp', autospec=True) - def test_get_hostname_with_string_arg(self, m_subp): - dsaz.get_hostname(hostname_command="hostname") - m_subp.assert_called_once_with(("hostname",), capture=True) - - @mock.patch(MOCKPATH + 'subp.subp', autospec=True) - def test_get_hostname_with_iterable_arg(self, m_subp): - dsaz.get_hostname(hostname_command=("hostname",)) - m_subp.assert_called_once_with(("hostname",), capture=True) - @mock.patch( 'cloudinit.sources.helpers.azure.OpenSSLManager.parse_certificates') def test_get_public_ssh_keys_with_imds(self, m_parse_certificates): @@ -2023,251 +2005,6 @@ scbus-1 on xpt0 bus 0 self.assertEqual(dsrc.userdata_raw, userdataOVF.encode('utf-8')) -class TestAzureBounce(CiTestCase): - - with_logs = True - - def mock_out_azure_moving_parts(self): - - def _load_possible_azure_ds(seed_dir, cache_dir): - yield seed_dir - yield dsaz.DEFAULT_PROVISIONING_ISO_DEV - if cache_dir: - yield cache_dir - - self.patches.enter_context( - mock.patch.object(dsaz.util, 'wait_for_files')) - self.patches.enter_context( - mock.patch.object( - dsaz, 'list_possible_azure_ds', - mock.MagicMock(side_effect=_load_possible_azure_ds))) - self.patches.enter_context( - mock.patch.object(dsaz, 'get_metadata_from_fabric', - mock.MagicMock(return_value={}))) - self.patches.enter_context( - mock.patch.object(dsaz, 'get_metadata_from_imds', - mock.MagicMock(return_value={}))) - self.patches.enter_context( - mock.patch.object(dsaz.subp, 'which', lambda x: True)) - self.patches.enter_context(mock.patch.object( - dsaz, '_get_random_seed', return_value='wild')) - - def _dmi_mocks(key): - if key == 'system-uuid': - return 'D0DF4C54-4ECB-4A4B-9954-5BDF3ED5C3B8' - elif key == 'chassis-asset-tag': - return '7783-7084-3265-9085-8269-3286-77' - raise RuntimeError('should not get here') - - self.patches.enter_context( - mock.patch.object(dsaz.dmi, 'read_dmi_data', - mock.MagicMock(side_effect=_dmi_mocks))) - - def setUp(self): - super(TestAzureBounce, self).setUp() - self.tmp = self.tmp_dir() - self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent') - self.paths = helpers.Paths( - {'cloud_dir': self.tmp, 'run_dir': self.tmp}) - dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d - self.patches = ExitStack() - self.mock_out_azure_moving_parts() - self.get_hostname = self.patches.enter_context( - mock.patch.object(dsaz, 'get_hostname')) - self.set_hostname = self.patches.enter_context( - mock.patch.object(dsaz, 'set_hostname')) - self.subp = self.patches.enter_context( - mock.patch(MOCKPATH + 'subp.subp')) - self.find_fallback_nic = self.patches.enter_context( - mock.patch('cloudinit.net.find_fallback_nic', return_value='eth9')) - - def tearDown(self): - self.patches.close() - super(TestAzureBounce, self).tearDown() - - def _get_ds(self, ovfcontent=None): - if ovfcontent is not None: - populate_dir(os.path.join(self.paths.seed_dir, "azure"), - {'ovf-env.xml': ovfcontent}) - dsrc = dsaz.DataSourceAzure({}, distro=mock.Mock(), paths=self.paths) - return dsrc - - def _get_and_setup(self, dsrc): - ret = dsrc.get_data() - if ret: - dsrc.setup(True) - return ret - - def get_ovf_env_with_dscfg(self, hostname, cfg): - odata = { - 'HostName': hostname, - 'dscfg': { - 'text': b64e(yaml.dump(cfg)), - 'encoding': 'base64' - } - } - return construct_valid_ovf_env(data=odata) - - def test_disabled_bounce_does_not_change_hostname(self): - cfg = {'hostname_bounce': {'policy': 'off'}} - ds = self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)) - ds.get_data() - self.assertEqual(0, self.set_hostname.call_count) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_disabled_bounce_does_not_perform_bounce( - self, perform_hostname_bounce): - cfg = {'hostname_bounce': {'policy': 'off'}} - ds = self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)) - ds.get_data() - self.assertEqual(0, perform_hostname_bounce.call_count) - - def test_same_hostname_does_not_change_hostname(self): - host_name = 'unchanged-host-name' - self.get_hostname.return_value = host_name - cfg = {'hostname_bounce': {'policy': 'yes'}} - ds = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) - ds.get_data() - self.assertEqual(0, self.set_hostname.call_count) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_unchanged_hostname_does_not_perform_bounce( - self, perform_hostname_bounce): - host_name = 'unchanged-host-name' - self.get_hostname.return_value = host_name - cfg = {'hostname_bounce': {'policy': 'yes'}} - ds = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) - ds.get_data() - self.assertEqual(0, perform_hostname_bounce.call_count) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_force_performs_bounce_regardless(self, perform_hostname_bounce): - host_name = 'unchanged-host-name' - self.get_hostname.return_value = host_name - cfg = {'hostname_bounce': {'policy': 'force'}} - dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(1, perform_hostname_bounce.call_count) - - def test_bounce_skipped_on_ifupdown_absent(self): - host_name = 'unchanged-host-name' - self.get_hostname.return_value = host_name - cfg = {'hostname_bounce': {'policy': 'force'}} - dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)) - patch_path = MOCKPATH + 'subp.which' - with mock.patch(patch_path) as m_which: - m_which.return_value = None - ret = self._get_and_setup(dsrc) - self.assertEqual([mock.call('ifup')], m_which.call_args_list) - self.assertTrue(ret) - self.assertIn( - "Skipping network bounce: ifupdown utils aren't present.", - self.logs.getvalue()) - - def test_different_hostnames_sets_hostname(self): - expected_hostname = 'azure-expected-host-name' - self.get_hostname.return_value = 'default-host-name' - dsrc = self._get_ds( - self.get_ovf_env_with_dscfg(expected_hostname, {})) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(expected_hostname, - self.set_hostname.call_args_list[0][0][0]) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_different_hostnames_performs_bounce( - self, perform_hostname_bounce): - expected_hostname = 'azure-expected-host-name' - self.get_hostname.return_value = 'default-host-name' - dsrc = self._get_ds( - self.get_ovf_env_with_dscfg(expected_hostname, {})) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(1, perform_hostname_bounce.call_count) - - def test_different_hostnames_sets_hostname_back(self): - initial_host_name = 'default-host-name' - self.get_hostname.return_value = initial_host_name - dsrc = self._get_ds( - self.get_ovf_env_with_dscfg('some-host-name', {})) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(initial_host_name, - self.set_hostname.call_args_list[-1][0][0]) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_failure_in_bounce_still_resets_host_name( - self, perform_hostname_bounce): - perform_hostname_bounce.side_effect = Exception - initial_host_name = 'default-host-name' - self.get_hostname.return_value = initial_host_name - dsrc = self._get_ds( - self.get_ovf_env_with_dscfg('some-host-name', {})) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(initial_host_name, - self.set_hostname.call_args_list[-1][0][0]) - - @mock.patch.object(dsaz, 'get_boot_telemetry') - def test_environment_correct_for_bounce_command( - self, mock_get_boot_telemetry): - interface = 'int0' - hostname = 'my-new-host' - old_hostname = 'my-old-host' - self.get_hostname.return_value = old_hostname - cfg = {'hostname_bounce': {'interface': interface, 'policy': 'force'}} - data = self.get_ovf_env_with_dscfg(hostname, cfg) - dsrc = self._get_ds(data) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(1, self.subp.call_count) - bounce_env = self.subp.call_args[1]['env'] - self.assertEqual(interface, bounce_env['interface']) - self.assertEqual(hostname, bounce_env['hostname']) - self.assertEqual(old_hostname, bounce_env['old_hostname']) - - @mock.patch.object(dsaz, 'get_boot_telemetry') - def test_default_bounce_command_ifup_used_by_default( - self, mock_get_boot_telemetry): - cfg = {'hostname_bounce': {'policy': 'force'}} - data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - dsrc = self._get_ds(data) - ret = self._get_and_setup(dsrc) - self.assertTrue(ret) - self.assertEqual(1, self.subp.call_count) - bounce_args = self.subp.call_args[1]['args'] - self.assertEqual( - dsaz.BOUNCE_COMMAND_IFUP, bounce_args) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_set_hostname_option_can_disable_bounce( - self, perform_hostname_bounce): - cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}} - data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - self._get_ds(data).get_data() - - self.assertEqual(0, perform_hostname_bounce.call_count) - - def test_set_hostname_option_can_disable_hostname_set(self): - cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}} - data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - self._get_ds(data).get_data() - - self.assertEqual(0, self.set_hostname.call_count) - - @mock.patch(MOCKPATH + 'perform_hostname_bounce') - def test_set_hostname_failed_disable_bounce( - self, perform_hostname_bounce): - cfg = {'set_hostname': True, 'hostname_bounce': {'policy': 'force'}} - self.get_hostname.return_value = "old-hostname" - self.set_hostname.side_effect = Exception - data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - self._get_ds(data).get_data() - - self.assertEqual(0, perform_hostname_bounce.call_count) - - class TestLoadAzureDsDir(CiTestCase): """Tests for load_azure_ds_dir.""" -- cgit v1.2.3 From f4692c5d96323dc635fca26b742199d4c41f88d3 Mon Sep 17 00:00:00 2001 From: Gonéri Le Bouder Date: Mon, 13 Dec 2021 12:31:39 -0500 Subject: find_devs_with_openbsd: ensure we return the last entry (#1149) `sysctl -n hw.disknames` returns a trailing `\n`. We need to clean this up. In addition, the criteria matching system is a source of problem because: - we don't have a way to look up the label of the partition - we've got situation where an ISO image can be exposed through a virtio block device. So we just totally ignore the value of `criteria`. We end-up with a slightly longer loop of mount-retry. But this way we're sure we don't miss a configuration disk. Tested on Kubvirt with the help of Brady Pratt @jbpratt. --- cloudinit/util.py | 8 +------- tests/unittests/test_util.py | 2 +- 2 files changed, 2 insertions(+), 8 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/util.py b/cloudinit/util.py index b9c584d1..27821de5 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1211,7 +1211,7 @@ def find_devs_with_openbsd(criteria=None, oformat='device', tag=None, no_cache=False, path=None): out, _err = subp.subp(['sysctl', '-n', 'hw.disknames'], rcs=[0]) devlist = [] - for entry in out.split(','): + for entry in out.rstrip().split(','): if not entry.endswith(':'): # ffs partition with a serial, not a config-drive continue @@ -1220,12 +1220,6 @@ def find_devs_with_openbsd(criteria=None, oformat='device', devlist.append(entry[:-1] + 'a') if not entry.startswith('cd'): devlist.append(entry[:-1] + 'i') - if criteria == "TYPE=iso9660": - devlist = [i for i in devlist if i.startswith('cd')] - elif criteria in ["LABEL=CONFIG-2", "TYPE=vfat"]: - devlist = [i for i in devlist if not i.startswith('cd')] - elif criteria: - LOG.debug("Unexpected criteria: %s", criteria) return ['/dev/' + i for i in devlist] diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index c551835f..61b9e303 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -2336,7 +2336,7 @@ class TestFindDevs: def test_find_devs_with_openbsd_with_criteria(self, m_subp): m_subp.return_value = ('cd0:,sd0:630d98d32b5d3759,sd1:,fd0:', '') devlist = util.find_devs_with_openbsd(criteria="TYPE=iso9660") - assert devlist == ['/dev/cd0a'] + assert devlist == ['/dev/cd0a', '/dev/sd1a', '/dev/sd1i'] # lp: #1841466 devlist = util.find_devs_with_openbsd(criteria="LABEL_FATBOOT=A_LABEL") -- cgit v1.2.3 From 9a6e65a2a575055aadc1802004dbe3f343a54b89 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Mon, 13 Dec 2021 18:14:50 -0600 Subject: Schema processing changes (SC-676) (#1144) * Use proper logging * Add parsing for patternProperties * Add label to annotate patternProperties * Log warning if schema parsing fails during metaschema processing * Some schema test fixes --- cloudinit/config/schema.py | 97 +++++++++++++++++++++++------------ tests/unittests/config/test_schema.py | 55 +++++++++++++++++--- 2 files changed, 113 insertions(+), 39 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index d32b7c01..d772b4f9 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -17,6 +17,7 @@ import sys import yaml error = partial(error, sys_exit=True) +LOG = logging.getLogger(__name__) _YAML_MAP = {True: 'true', False: 'false', None: 'null'} CLOUD_CONFIG_HEADER = b'#cloud-config' @@ -91,7 +92,16 @@ def get_jsonschema_validator(): # This allows #cloud-config to provide valid yaml "content: !!binary | ..." strict_metaschema = deepcopy(Draft4Validator.META_SCHEMA) - strict_metaschema['additionalProperties'] = False + strict_metaschema["additionalProperties"] = False + + # This additional label allows us to specify a different name + # than the property key when generating docs. + # This is especially useful when using a "patternProperties" regex, + # otherwise the property label in the generated docs will be a + # regular expression. + # http://json-schema.org/understanding-json-schema/reference/object.html#pattern-properties + strict_metaschema["properties"]["label"] = {"type": "string"} + if hasattr(Draft4Validator, 'TYPE_CHECKER'): # jsonschema 3.0+ type_checker = Draft4Validator.TYPE_CHECKER.redefine( 'string', is_schema_byte_string) @@ -140,7 +150,7 @@ def validate_cloudconfig_metaschema(validator, schema: dict, throw=True): ('.'.join([str(p) for p in err.path]), err.message), ) ) from err - logging.warning( + LOG.warning( "Meta-schema validation failed, attempting to validate config " "anyway: %s", err) @@ -168,7 +178,7 @@ def validate_cloudconfig_schema( validate_cloudconfig_metaschema( cloudinitValidator, schema, throw=False) except ImportError: - logging.debug("Ignoring schema validation. jsonschema is not present") + LOG.debug("Ignoring schema validation. jsonschema is not present") return validator = cloudinitValidator(schema, format_checker=FormatChecker()) @@ -180,8 +190,8 @@ def validate_cloudconfig_schema( if strict: raise SchemaValidationError(errors) else: - messages = ['{0}: {1}'.format(k, msg) for k, msg in errors] - logging.warning('Invalid config:\n%s', '\n'.join(messages)) + messages = ["{0}: {1}".format(k, msg) for k, msg in errors] + LOG.warning("Invalid config:\n%s", "\n".join(messages)) def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors): @@ -410,34 +420,53 @@ def _get_property_doc(schema: dict, prefix=" ") -> str: """Return restructured text describing the supported schema properties.""" new_prefix = prefix + ' ' properties = [] - for prop_key, prop_config in schema.get('properties', {}).items(): - # Define prop_name and description for SCHEMA_PROPERTY_TMPL - description = prop_config.get('description', '') - - # Define prop_name and description for SCHEMA_PROPERTY_TMPL - properties.append( - SCHEMA_PROPERTY_TMPL.format( - prefix=prefix, - prop_name=prop_key, - description=_parse_description(description, prefix), - prop_type=_get_property_type(prop_config), + property_keys = [ + schema.get("properties", {}), + schema.get("patternProperties", {}), + ] + + for props in property_keys: + for prop_key, prop_config in props.items(): + # Define prop_name and description for SCHEMA_PROPERTY_TMPL + description = prop_config.get("description", "") + + # Define prop_name and description for SCHEMA_PROPERTY_TMPL + label = prop_config.get("label", prop_key) + properties.append( + SCHEMA_PROPERTY_TMPL.format( + prefix=prefix, + prop_name=label, + description=_parse_description(description, prefix), + prop_type=_get_property_type(prop_config), + ) ) - ) - items = prop_config.get("items") - if items: - if isinstance(items, list): - for item in items: + items = prop_config.get("items") + if items: + if isinstance(items, list): + for item in items: + properties.append( + _get_property_doc(item, prefix=new_prefix) + ) + elif isinstance(items, dict) and ( + items.get("properties") or items.get("patternProperties") + ): properties.append( - _get_property_doc(item, prefix=new_prefix)) - elif isinstance(items, dict) and items.get('properties'): - properties.append(SCHEMA_LIST_ITEM_TMPL.format( - prefix=new_prefix, prop_name=prop_key)) - new_prefix += ' ' - properties.append(_get_property_doc(items, prefix=new_prefix)) - if 'properties' in prop_config: - properties.append( - _get_property_doc(prop_config, prefix=new_prefix)) - return '\n\n'.join(properties) + SCHEMA_LIST_ITEM_TMPL.format( + prefix=new_prefix, prop_name=label + ) + ) + new_prefix += " " + properties.append( + _get_property_doc(items, prefix=new_prefix) + ) + if ( + "properties" in prop_config + or "patternProperties" in prop_config + ): + properties.append( + _get_property_doc(prop_config, prefix=new_prefix) + ) + return "\n\n".join(properties) def _get_examples(meta: MetaSchema) -> str: @@ -494,7 +523,11 @@ def get_meta_doc(meta: MetaSchema, schema: dict) -> str: # cast away type annotation meta_copy = dict(deepcopy(meta)) - meta_copy["property_doc"] = _get_property_doc(schema) + try: + meta_copy["property_doc"] = _get_property_doc(schema) + except AttributeError: + LOG.warning("Unable to render property_doc due to invalid schema") + meta_copy["property_doc"] = "" meta_copy["examples"] = _get_examples(meta) meta_copy["distros"] = ", ".join(meta["distros"]) # Need an underbar of the same length as the name diff --git a/tests/unittests/config/test_schema.py b/tests/unittests/config/test_schema.py index ed7ab527..40803cae 100644 --- a/tests/unittests/config/test_schema.py +++ b/tests/unittests/config/test_schema.py @@ -55,7 +55,7 @@ def get_module_variable(var_name) -> dict: schemas = {} files = list( - Path(cloud_init_project_dir("../../cloudinit/config/")).glob("cc_*.py") + Path(cloud_init_project_dir("cloudinit/config/")).glob("cc_*.py") ) modules = [mod.stem for mod in files] @@ -215,12 +215,13 @@ class TestCloudConfigExamples: @pytest.mark.parametrize("schema_id, example", params) @skipUnlessJsonSchema() def test_validateconfig_schema_of_example(self, schema_id, example): - """ For a given example in a config module we test if it is valid + """For a given example in a config module we test if it is valid according to the unified schema of all config modules """ config_load = safe_load(example) validate_cloudconfig_schema( - config_load, self.schema, strict=True) + config_load, self.schema[schema_id], strict=True + ) class ValidateCloudConfigFileTest(CiTestCase): @@ -462,6 +463,44 @@ class GetSchemaDocTest(CiTestCase): get_meta_doc(invalid_meta, schema) self.assertIn(key, str(context_mgr.exception)) + def test_label_overrides_property_name(self): + """get_meta_doc overrides property name with label.""" + schema = { + "properties": { + "prop1": { + "type": "string", + "label": "label1", + }, + "prop_no_label": { + "type": "string", + }, + "prop_array": { + "label": 'array_label', + "type": "array", + "items": { + "type": "object", + "properties": { + "some_prop": {"type": "number"}, + }, + }, + }, + }, + "patternProperties": { + "^.*$": { + "type": "string", + "label": "label2", + } + } + } + meta_doc = get_meta_doc(self.meta, schema) + assert "**label1:** (string)" in meta_doc + assert "**label2:** (string" in meta_doc + assert "**prop_no_label:** (string)" in meta_doc + assert "Each item in **array_label** list" in meta_doc + + assert "prop1" not in meta_doc + assert ".*" not in meta_doc + class AnnotatedCloudconfigFileTest(CiTestCase): maxDiff = None @@ -626,9 +665,11 @@ def _get_meta_doc_examples(): examples_dir = Path(cloud_init_project_dir('doc/examples')) assert examples_dir.is_dir() - all_text_files = (f for f in examples_dir.glob('cloud-config*.txt') - if not f.name.startswith('cloud-config-archive')) - return all_text_files + return ( + str(f) + for f in examples_dir.glob("cloud-config*.txt") + if not f.name.startswith("cloud-config-archive") + ) class TestSchemaDocExamples: @@ -637,7 +678,7 @@ class TestSchemaDocExamples: @pytest.mark.parametrize("example_path", _get_meta_doc_examples()) @skipUnlessJsonSchema() def test_schema_doc_examples(self, example_path): - validate_cloudconfig_file(str(example_path), self.schema) + validate_cloudconfig_file(example_path, self.schema) class TestStrictMetaschema: -- cgit v1.2.3 From 2bcf4fa972fde686c2e3141c58e640640b44dd00 Mon Sep 17 00:00:00 2001 From: James Falcon Date: Tue, 14 Dec 2021 21:26:20 -0600 Subject: Include dpkg frontend lock in APT_LOCK_FILES (#1153) --- cloudinit/distros/debian.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index f3901470..b2af0866 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -43,10 +43,17 @@ NETWORK_FILE_HEADER = """\ NETWORK_CONF_FN = "/etc/network/interfaces.d/50-cloud-init" LOCALE_CONF_FN = "/etc/default/locale" +# The frontend lock needs to be acquired first followed by the order that +# apt uses. /var/lib/apt/lists is locked independently of that install chain, +# and only locked during update, so you can acquire it either order. +# Also update does not acquire the dpkg frontend lock. +# More context: +# https://github.com/canonical/cloud-init/pull/1034#issuecomment-986971376 APT_LOCK_FILES = [ + '/var/lib/dpkg/lock-frontend', '/var/lib/dpkg/lock', - '/var/lib/apt/lists/lock', '/var/cache/apt/archives/lock', + '/var/lib/apt/lists/lock', ] -- cgit v1.2.3 From bae9b11da9ed7dd0b16fe5adeaf4774b7cc628cf Mon Sep 17 00:00:00 2001 From: James Falcon Date: Wed, 15 Dec 2021 20:16:38 -0600 Subject: Adopt Black and isort (SC-700) (#1157) Applied Black and isort, fixed any linting issues, updated tox.ini and CI. --- .travis.yml | 4 + CONTRIBUTING.rst | 5 + cloudinit/analyze/__main__.py | 269 +- cloudinit/analyze/dump.py | 71 +- cloudinit/analyze/show.py | 192 +- cloudinit/apport.py | 153 +- cloudinit/atomic_helper.py | 25 +- cloudinit/cloud.py | 14 +- cloudinit/cmd/clean.py | 59 +- cloudinit/cmd/cloud_id.py | 68 +- cloudinit/cmd/devel/__init__.py | 3 +- cloudinit/cmd/devel/hotplug_hook.py | 138 +- cloudinit/cmd/devel/logs.py | 120 +- cloudinit/cmd/devel/make_mime.py | 76 +- cloudinit/cmd/devel/net_convert.py | 145 +- cloudinit/cmd/devel/parser.py | 48 +- cloudinit/cmd/devel/render.py | 54 +- cloudinit/cmd/main.py | 595 ++- cloudinit/cmd/query.py | 170 +- cloudinit/cmd/status.py | 101 +- cloudinit/config/__init__.py | 20 +- cloudinit/config/cc_apk_configure.py | 195 +- cloudinit/config/cc_apt_configure.py | 618 +-- cloudinit/config/cc_apt_pipelining.py | 13 +- cloudinit/config/cc_bootcmd.py | 65 +- cloudinit/config/cc_byobu.py | 27 +- cloudinit/config/cc_ca_certs.py | 84 +- cloudinit/config/cc_chef.py | 659 +-- cloudinit/config/cc_debug.py | 21 +- cloudinit/config/cc_disable_ec2_metadata.py | 25 +- cloudinit/config/cc_disk_setup.py | 334 +- cloudinit/config/cc_emit_upstart.py | 24 +- cloudinit/config/cc_fan.py | 34 +- cloudinit/config/cc_final_message.py | 24 +- cloudinit/config/cc_foo.py | 1 + cloudinit/config/cc_growpart.py | 134 +- cloudinit/config/cc_grub_dpkg.py | 54 +- cloudinit/config/cc_install_hotplug.py | 48 +- cloudinit/config/cc_keys_to_console.py | 36 +- cloudinit/config/cc_landscape.py | 24 +- cloudinit/config/cc_locale.py | 51 +- cloudinit/config/cc_lxd.py | 186 +- cloudinit/config/cc_mcollective.py | 50 +- cloudinit/config/cc_migrator.py | 25 +- cloudinit/config/cc_mounts.py | 172 +- cloudinit/config/cc_ntp.py | 546 ++- .../config/cc_package_update_upgrade_install.py | 30 +- cloudinit/config/cc_phone_home.py | 98 +- cloudinit/config/cc_power_state_change.py | 58 +- cloudinit/config/cc_puppet.py | 194 +- cloudinit/config/cc_refresh_rmc_and_interface.py | 51 +- cloudinit/config/cc_reset_rmc.py | 43 +- cloudinit/config/cc_resizefs.py | 183 +- cloudinit/config/cc_resolv_conf.py | 41 +- cloudinit/config/cc_rh_subscription.py | 240 +- cloudinit/config/cc_rightscale_userdata.py | 31 +- cloudinit/config/cc_rsyslog.py | 86 +- cloudinit/config/cc_runcmd.py | 70 +- cloudinit/config/cc_salt_minion.py | 69 +- cloudinit/config/cc_scripts_per_boot.py | 14 +- cloudinit/config/cc_scripts_per_instance.py | 14 +- cloudinit/config/cc_scripts_per_once.py | 14 +- cloudinit/config/cc_scripts_user.py | 12 +- cloudinit/config/cc_scripts_vendor.py | 22 +- cloudinit/config/cc_seed_random.py | 41 +- cloudinit/config/cc_set_hostname.py | 30 +- cloudinit/config/cc_set_passwords.py | 65 +- cloudinit/config/cc_snap.py | 168 +- cloudinit/config/cc_spacewalk.py | 67 +- cloudinit/config/cc_ssh.py | 106 +- cloudinit/config/cc_ssh_authkey_fingerprints.py | 73 +- cloudinit/config/cc_ssh_import_id.py | 23 +- cloudinit/config/cc_timezone.py | 2 +- cloudinit/config/cc_ubuntu_advantage.py | 154 +- cloudinit/config/cc_ubuntu_drivers.py | 133 +- cloudinit/config/cc_update_etc_hosts.py | 42 +- cloudinit/config/cc_update_hostname.py | 25 +- cloudinit/config/cc_users_groups.py | 39 +- cloudinit/config/cc_write_files.py | 242 +- cloudinit/config/cc_write_files_deferred.py | 22 +- cloudinit/config/cc_yum_add_repo.py | 65 +- cloudinit/config/cc_zypper_add_repo.py | 159 +- cloudinit/config/schema.py | 239 +- cloudinit/cs_utils.py | 20 +- cloudinit/dhclient_hook.py | 21 +- cloudinit/distros/__init__.py | 420 +- cloudinit/distros/almalinux.py | 1 + cloudinit/distros/alpine.py | 45 +- cloudinit/distros/amazon.py | 1 - cloudinit/distros/arch.py | 147 +- cloudinit/distros/bsd.py | 66 +- cloudinit/distros/bsd_utils.py | 18 +- cloudinit/distros/centos.py | 1 + cloudinit/distros/cloudlinux.py | 1 + cloudinit/distros/debian.py | 168 +- cloudinit/distros/dragonflybsd.py | 2 +- cloudinit/distros/eurolinux.py | 1 + cloudinit/distros/fedora.py | 1 + cloudinit/distros/freebsd.py | 93 +- cloudinit/distros/gentoo.py | 140 +- cloudinit/distros/miraclelinux.py | 2 + cloudinit/distros/net_util.py | 68 +- cloudinit/distros/netbsd.py | 85 +- cloudinit/distros/networking.py | 13 +- cloudinit/distros/openEuler.py | 1 + cloudinit/distros/openbsd.py | 20 +- cloudinit/distros/opensuse.py | 119 +- cloudinit/distros/parsers/__init__.py | 3 +- cloudinit/distros/parsers/hostname.py | 24 +- cloudinit/distros/parsers/hosts.py | 24 +- cloudinit/distros/parsers/networkmanager_conf.py | 6 +- cloudinit/distros/parsers/resolv_conf.py | 73 +- cloudinit/distros/parsers/sys_conf.py | 38 +- cloudinit/distros/photon.py | 86 +- cloudinit/distros/rhel.py | 76 +- cloudinit/distros/rhel_util.py | 4 +- cloudinit/distros/rocky.py | 1 + cloudinit/distros/sles.py | 1 + cloudinit/distros/ubuntu.py | 33 +- cloudinit/distros/ug_util.py | 106 +- cloudinit/distros/virtuozzo.py | 1 + cloudinit/dmi.py | 68 +- cloudinit/ec2_utils.py | 165 +- cloudinit/event.py | 8 +- cloudinit/filters/launch_index.py | 12 +- cloudinit/gpg.py | 48 +- cloudinit/handlers/__init__.py | 152 +- cloudinit/handlers/boot_hook.py | 21 +- cloudinit/handlers/cloud_config.py | 29 +- cloudinit/handlers/jinja_template.py | 87 +- cloudinit/handlers/shell_script.py | 15 +- cloudinit/handlers/upstart_job.py | 22 +- cloudinit/helpers.py | 111 +- cloudinit/importer.py | 3 +- cloudinit/log.py | 21 +- cloudinit/mergers/__init__.py | 43 +- cloudinit/mergers/m_dict.py | 34 +- cloudinit/mergers/m_list.py | 37 +- cloudinit/mergers/m_str.py | 5 +- cloudinit/net/__init__.py | 579 ++- cloudinit/net/activators.py | 87 +- cloudinit/net/bsd.py | 112 +- cloudinit/net/cmdline.py | 97 +- cloudinit/net/dhcp.py | 194 +- cloudinit/net/eni.py | 454 +- cloudinit/net/freebsd.py | 44 +- cloudinit/net/netbsd.py | 27 +- cloudinit/net/netplan.py | 313 +- cloudinit/net/network_state.py | 734 +-- cloudinit/net/networkd.py | 208 +- cloudinit/net/openbsd.py | 33 +- cloudinit/net/renderer.py | 31 +- cloudinit/net/renderers.py | 40 +- cloudinit/net/sysconfig.py | 886 ++-- cloudinit/net/udev.py | 23 +- cloudinit/netinfo.py | 403 +- cloudinit/patcher.py | 9 +- cloudinit/registry.py | 4 +- cloudinit/reporting/__init__.py | 9 +- cloudinit/reporting/events.py | 97 +- cloudinit/reporting/handlers.py | 128 +- cloudinit/safeyaml.py | 25 +- cloudinit/serial.py | 25 +- cloudinit/settings.py | 82 +- cloudinit/signal_handler.py | 12 +- cloudinit/simpletable.py | 26 +- cloudinit/sources/DataSourceAliYun.py | 18 +- cloudinit/sources/DataSourceAltCloud.py | 113 +- cloudinit/sources/DataSourceAzure.py | 1350 +++--- cloudinit/sources/DataSourceBigstep.py | 9 +- cloudinit/sources/DataSourceCloudSigma.py | 39 +- cloudinit/sources/DataSourceCloudStack.py | 135 +- cloudinit/sources/DataSourceConfigDrive.py | 117 +- cloudinit/sources/DataSourceDigitalOcean.py | 65 +- cloudinit/sources/DataSourceEc2.py | 461 +- cloudinit/sources/DataSourceExoscale.py | 171 +- cloudinit/sources/DataSourceGCE.py | 221 +- cloudinit/sources/DataSourceHetzner.py | 74 +- cloudinit/sources/DataSourceIBMCloud.py | 128 +- cloudinit/sources/DataSourceLXD.py | 61 +- cloudinit/sources/DataSourceMAAS.py | 180 +- cloudinit/sources/DataSourceNoCloud.py | 154 +- cloudinit/sources/DataSourceNone.py | 15 +- cloudinit/sources/DataSourceOVF.py | 311 +- cloudinit/sources/DataSourceOpenNebula.py | 190 +- cloudinit/sources/DataSourceOpenStack.py | 129 +- cloudinit/sources/DataSourceOracle.py | 125 +- cloudinit/sources/DataSourceRbxCloud.py | 194 +- cloudinit/sources/DataSourceScaleway.py | 131 +- cloudinit/sources/DataSourceSmartOS.py | 555 ++- cloudinit/sources/DataSourceUpCloud.py | 7 +- cloudinit/sources/DataSourceVMware.py | 13 +- cloudinit/sources/DataSourceVultr.py | 86 +- cloudinit/sources/__init__.py | 385 +- cloudinit/sources/helpers/azure.py | 693 +-- cloudinit/sources/helpers/digitalocean.py | 195 +- cloudinit/sources/helpers/hetzner.py | 15 +- cloudinit/sources/helpers/netlink.py | 187 +- cloudinit/sources/helpers/openstack.py | 438 +- cloudinit/sources/helpers/upcloud.py | 12 +- cloudinit/sources/helpers/vmware/imc/boot_proto.py | 5 +- cloudinit/sources/helpers/vmware/imc/config.py | 59 +- .../helpers/vmware/imc/config_custom_script.py | 45 +- .../sources/helpers/vmware/imc/config_file.py | 7 +- .../sources/helpers/vmware/imc/config_namespace.py | 1 + cloudinit/sources/helpers/vmware/imc/config_nic.py | 84 +- .../sources/helpers/vmware/imc/config_passwd.py | 38 +- .../sources/helpers/vmware/imc/config_source.py | 1 + .../sources/helpers/vmware/imc/guestcust_error.py | 1 + .../sources/helpers/vmware/imc/guestcust_event.py | 1 + .../sources/helpers/vmware/imc/guestcust_state.py | 1 + .../sources/helpers/vmware/imc/guestcust_util.py | 46 +- cloudinit/sources/helpers/vmware/imc/ipv4_mode.py | 11 +- cloudinit/sources/helpers/vmware/imc/nic.py | 33 +- cloudinit/sources/helpers/vmware/imc/nic_base.py | 29 +- cloudinit/sources/helpers/vultr.py | 172 +- cloudinit/ssh_util.py | 172 +- cloudinit/stages.py | 649 +-- cloudinit/subp.py | 165 +- cloudinit/temp_utils.py | 20 +- cloudinit/templater.py | 96 +- cloudinit/type_utils.py | 4 +- cloudinit/url_helper.py | 273 +- cloudinit/user_data.py | 121 +- cloudinit/util.py | 873 ++-- cloudinit/version.py | 9 +- cloudinit/warnings.py | 21 +- conftest.py | 5 +- doc/rtd/conf.py | 30 +- pyproject.toml | 8 + setup.py | 263 +- tests/integration_tests/__init__.py | 8 +- tests/integration_tests/bugs/test_gh570.py | 13 +- tests/integration_tests/bugs/test_gh626.py | 25 +- tests/integration_tests/bugs/test_gh632.py | 20 +- tests/integration_tests/bugs/test_gh668.py | 15 +- tests/integration_tests/bugs/test_gh671.py | 35 +- tests/integration_tests/bugs/test_gh868.py | 3 +- tests/integration_tests/bugs/test_lp1813396.py | 3 +- tests/integration_tests/bugs/test_lp1835584.py | 19 +- tests/integration_tests/bugs/test_lp1886531.py | 2 - tests/integration_tests/bugs/test_lp1897099.py | 13 +- tests/integration_tests/bugs/test_lp1898997.py | 14 +- tests/integration_tests/bugs/test_lp1900837.py | 2 +- tests/integration_tests/bugs/test_lp1901011.py | 49 +- tests/integration_tests/bugs/test_lp1910835.py | 1 - tests/integration_tests/bugs/test_lp1912844.py | 4 +- tests/integration_tests/clouds.py | 163 +- tests/integration_tests/conftest.py | 130 +- .../datasources/test_lxd_discovery.py | 43 +- .../datasources/test_network_dependency.py | 17 +- tests/integration_tests/instances.py | 83 +- tests/integration_tests/integration_settings.py | 13 +- tests/integration_tests/modules/test_apt.py | 88 +- tests/integration_tests/modules/test_ca_certs.py | 1 - tests/integration_tests/modules/test_cli.py | 9 +- tests/integration_tests/modules/test_combined.py | 155 +- .../modules/test_command_output.py | 5 +- tests/integration_tests/modules/test_disk_setup.py | 76 +- tests/integration_tests/modules/test_growpart.py | 38 +- tests/integration_tests/modules/test_hotplug.py | 55 +- .../modules/test_jinja_templating.py | 11 +- .../modules/test_keys_to_console.py | 9 +- tests/integration_tests/modules/test_lxd_bridge.py | 2 - .../integration_tests/modules/test_ntp_servers.py | 30 +- .../modules/test_package_update_upgrade_install.py | 18 +- .../integration_tests/modules/test_persistence.py | 26 +- .../modules/test_power_state_change.py | 48 +- tests/integration_tests/modules/test_puppet.py | 6 +- .../integration_tests/modules/test_set_hostname.py | 10 +- .../integration_tests/modules/test_set_password.py | 15 +- .../modules/test_ssh_auth_key_fingerprints.py | 13 +- .../integration_tests/modules/test_ssh_generate.py | 16 +- .../modules/test_ssh_keys_provided.py | 58 +- .../integration_tests/modules/test_ssh_keysfile.py | 159 +- .../integration_tests/modules/test_user_events.py | 50 +- .../integration_tests/modules/test_users_groups.py | 21 +- .../modules/test_version_change.py | 45 +- .../integration_tests/modules/test_write_files.py | 32 +- tests/integration_tests/test_upgrade.py | 120 +- tests/integration_tests/util.py | 39 +- tests/unittests/__init__.py | 1 + tests/unittests/analyze/test_boot.py | 135 +- tests/unittests/analyze/test_dump.py | 213 +- tests/unittests/cmd/devel/test_hotplug_hook.py | 162 +- tests/unittests/cmd/devel/test_logs.py | 232 +- tests/unittests/cmd/devel/test_render.py | 152 +- tests/unittests/cmd/test_clean.py | 179 +- tests/unittests/cmd/test_cloud_id.py | 99 +- tests/unittests/cmd/test_main.py | 223 +- tests/unittests/cmd/test_query.py | 403 +- tests/unittests/cmd/test_status.py | 561 ++- tests/unittests/config/test_apt_conf_v1.py | 68 +- .../config/test_apt_configure_sources_list_v1.py | 131 +- .../config/test_apt_configure_sources_list_v3.py | 158 +- tests/unittests/config/test_apt_key.py | 117 +- tests/unittests/config/test_apt_source_v1.py | 765 ++-- tests/unittests/config/test_apt_source_v3.py | 1220 +++-- tests/unittests/config/test_cc_apk_configure.py | 148 +- tests/unittests/config/test_cc_apt_pipelining.py | 12 +- tests/unittests/config/test_cc_bootcmd.py | 100 +- tests/unittests/config/test_cc_ca_certs.py | 220 +- tests/unittests/config/test_cc_chef.py | 202 +- tests/unittests/config/test_cc_debug.py | 39 +- .../config/test_cc_disable_ec2_metadata.py | 44 +- tests/unittests/config/test_cc_disk_setup.py | 270 +- tests/unittests/config/test_cc_growpart.py | 232 +- tests/unittests/config/test_cc_grub_dpkg.py | 121 +- tests/unittests/config/test_cc_install_hotplug.py | 58 +- tests/unittests/config/test_cc_keys_to_console.py | 18 +- tests/unittests/config/test_cc_landscape.py | 178 +- tests/unittests/config/test_cc_locale.py | 99 +- tests/unittests/config/test_cc_lxd.py | 250 +- tests/unittests/config/test_cc_mcollective.py | 104 +- tests/unittests/config/test_cc_mounts.py | 449 +- tests/unittests/config/test_cc_ntp.py | 682 +-- .../unittests/config/test_cc_power_state_change.py | 74 +- tests/unittests/config/test_cc_puppet.py | 432 +- .../config/test_cc_refresh_rmc_and_interface.py | 162 +- tests/unittests/config/test_cc_resizefs.py | 436 +- tests/unittests/config/test_cc_resolv_conf.py | 76 +- tests/unittests/config/test_cc_rh_subscription.py | 366 +- tests/unittests/config/test_cc_rsyslog.py | 112 +- tests/unittests/config/test_cc_runcmd.py | 74 +- tests/unittests/config/test_cc_seed_random.py | 158 +- tests/unittests/config/test_cc_set_hostname.py | 185 +- tests/unittests/config/test_cc_set_passwords.py | 111 +- tests/unittests/config/test_cc_snap.py | 445 +- tests/unittests/config/test_cc_spacewalk.py | 36 +- tests/unittests/config/test_cc_ssh.py | 356 +- tests/unittests/config/test_cc_timezone.py | 31 +- tests/unittests/config/test_cc_ubuntu_advantage.py | 311 +- tests/unittests/config/test_cc_ubuntu_drivers.py | 213 +- tests/unittests/config/test_cc_update_etc_hosts.py | 63 +- tests/unittests/config/test_cc_users_groups.py | 264 +- tests/unittests/config/test_cc_write_files.py | 148 +- .../config/test_cc_write_files_deferred.py | 62 +- tests/unittests/config/test_cc_yum_add_repo.py | 105 +- tests/unittests/config/test_cc_zypper_add_repo.py | 166 +- tests/unittests/config/test_schema.py | 301 +- tests/unittests/distros/__init__.py | 10 +- tests/unittests/distros/test_arch.py | 50 +- tests/unittests/distros/test_bsd_utils.py | 49 +- tests/unittests/distros/test_create_users.py | 252 +- tests/unittests/distros/test_debian.py | 155 +- tests/unittests/distros/test_freebsd.py | 28 +- tests/unittests/distros/test_generic.py | 300 +- tests/unittests/distros/test_gentoo.py | 11 +- tests/unittests/distros/test_hostname.py | 16 +- tests/unittests/distros/test_hosts.py | 36 +- tests/unittests/distros/test_init.py | 273 +- tests/unittests/distros/test_manage_service.py | 33 +- tests/unittests/distros/test_netbsd.py | 11 +- tests/unittests/distros/test_netconfig.py | 605 ++- tests/unittests/distros/test_networking.py | 30 +- tests/unittests/distros/test_opensuse.py | 3 +- tests/unittests/distros/test_photon.py | 42 +- tests/unittests/distros/test_resolv.py | 55 +- tests/unittests/distros/test_sles.py | 3 +- tests/unittests/distros/test_sysconfig.py | 62 +- .../unittests/distros/test_user_data_normalize.py | 383 +- tests/unittests/filters/test_launch_index.py | 23 +- tests/unittests/helpers.py | 191 +- tests/unittests/net/test_dhcp.py | 678 +-- tests/unittests/net/test_init.py | 1368 +++--- tests/unittests/net/test_network_state.py | 82 +- tests/unittests/net/test_networkd.py | 2 +- tests/unittests/runs/test_merge_run.py | 49 +- tests/unittests/runs/test_simple_run.py | 132 +- tests/unittests/sources/helpers/test_netlink.py | 357 +- tests/unittests/sources/helpers/test_openstack.py | 51 +- tests/unittests/sources/test_aliyun.py | 217 +- tests/unittests/sources/test_altcloud.py | 311 +- tests/unittests/sources/test_azure.py | 3174 +++++++------ tests/unittests/sources/test_azure_helper.py | 1138 +++-- tests/unittests/sources/test_cloudsigma.py | 72 +- tests/unittests/sources/test_cloudstack.py | 121 +- tests/unittests/sources/test_common.py | 86 +- tests/unittests/sources/test_configdrive.py | 1100 +++-- tests/unittests/sources/test_digitalocean.py | 283 +- tests/unittests/sources/test_ec2.py | 851 ++-- tests/unittests/sources/test_exoscale.py | 248 +- tests/unittests/sources/test_gce.py | 304 +- tests/unittests/sources/test_hetzner.py | 85 +- tests/unittests/sources/test_ibmcloud.py | 299 +- tests/unittests/sources/test_init.py | 879 ++-- tests/unittests/sources/test_lxd.py | 134 +- tests/unittests/sources/test_maas.py | 147 +- tests/unittests/sources/test_nocloud.py | 320 +- tests/unittests/sources/test_opennebula.py | 888 ++-- tests/unittests/sources/test_openstack.py | 652 +-- tests/unittests/sources/test_oracle.py | 412 +- tests/unittests/sources/test_ovf.py | 1053 +++-- tests/unittests/sources/test_rbx.py | 215 +- tests/unittests/sources/test_scaleway.py | 481 +- tests/unittests/sources/test_smartos.py | 956 ++-- tests/unittests/sources/test_upcloud.py | 161 +- tests/unittests/sources/test_vmware.py | 12 +- tests/unittests/sources/test_vultr.py | 375 +- .../unittests/sources/vmware/test_custom_script.py | 61 +- .../sources/vmware/test_guestcust_util.py | 79 +- .../sources/vmware/test_vmware_config_file.py | 430 +- tests/unittests/test__init__.py | 193 +- tests/unittests/test_atomic_helper.py | 4 +- tests/unittests/test_builtin_handlers.py | 405 +- tests/unittests/test_cli.py | 214 +- tests/unittests/test_conftest.py | 10 +- tests/unittests/test_cs_util.py | 39 +- tests/unittests/test_data.py | 526 ++- tests/unittests/test_dhclient_hook.py | 89 +- tests/unittests/test_dmi.py | 90 +- tests/unittests/test_ds_identify.py | 1609 ++++--- tests/unittests/test_ec2_util.py | 376 +- tests/unittests/test_event.py | 16 +- tests/unittests/test_features.py | 36 +- tests/unittests/test_gpg.py | 103 +- tests/unittests/test_helpers.py | 11 +- tests/unittests/test_log.py | 12 +- tests/unittests/test_merging.py | 123 +- tests/unittests/test_net.py | 4833 ++++++++++++-------- tests/unittests/test_net_activators.py | 154 +- tests/unittests/test_net_freebsd.py | 45 +- tests/unittests/test_netinfo.py | 193 +- tests/unittests/test_pathprefix2dict.py | 28 +- tests/unittests/test_registry.py | 21 +- tests/unittests/test_render_cloudcfg.py | 71 +- tests/unittests/test_reporting.py | 379 +- tests/unittests/test_reporting_hyperv.py | 193 +- tests/unittests/test_simpletable.py | 47 +- tests/unittests/test_sshutil.py | 817 ++-- tests/unittests/test_stages.py | 444 +- tests/unittests/test_subp.py | 289 +- tests/unittests/test_temp_utils.py | 118 +- tests/unittests/test_templating.py | 103 +- tests/unittests/test_url_helper.py | 134 +- tests/unittests/test_util.py | 934 ++-- tests/unittests/test_version.py | 11 +- tests/unittests/util.py | 14 +- tools/mock-meta.py | 301 +- tools/validate-yaml.py | 4 +- tox.ini | 28 +- 441 files changed, 43425 insertions(+), 31496 deletions(-) create mode 100644 pyproject.toml (limited to 'cloudinit') diff --git a/.travis.yml b/.travis.yml index 9470cc31..c458db48 100644 --- a/.travis.yml +++ b/.travis.yml @@ -133,6 +133,10 @@ matrix: env: TOXENV=flake8 - python: 3.6 env: TOXENV=pylint + - python: 3.6 + env: TOXENV=black + - python: 3.6 + env: TOXENV=isort - python: 3.7 env: TOXENV=doc # Test all supported Python versions (but at the end, so we schedule diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 06b31497..aa09c61e 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -19,6 +19,7 @@ Before any pull request can be accepted, you must do the following: `tools/.github-cla-signers`_ * Add or update any `unit tests`_ accordingly * Add or update any `integration tests`_ (if applicable) +* Format code (using black and isort) with `tox -e format` * Ensure unit tests and linting pass using `tox`_ * Submit a PR against the `main` branch of the `cloud-init` repository @@ -133,6 +134,10 @@ Do these things for each feature or bug git commit +* Apply black and isort formatting rules with `tox`_:: + + tox -e format + * Run unit tests and lint/formatting checks with `tox`_:: tox diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py index 99e5c203..36a5be78 100644 --- a/cloudinit/analyze/__main__.py +++ b/cloudinit/analyze/__main__.py @@ -5,62 +5,111 @@ import argparse import re import sys +from datetime import datetime from cloudinit.util import json_dumps -from datetime import datetime -from . import dump -from . import show + +from . import dump, show def get_parser(parser=None): if not parser: parser = argparse.ArgumentParser( - prog='cloudinit-analyze', - description='Devel tool: Analyze cloud-init logs and data') - subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand') + prog="cloudinit-analyze", + description="Devel tool: Analyze cloud-init logs and data", + ) + subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand") subparsers.required = True parser_blame = subparsers.add_parser( - 'blame', help='Print list of executed stages ordered by time to init') + "blame", help="Print list of executed stages ordered by time to init" + ) parser_blame.add_argument( - '-i', '--infile', action='store', dest='infile', - default='/var/log/cloud-init.log', - help='specify where to read input.') + "-i", + "--infile", + action="store", + dest="infile", + default="/var/log/cloud-init.log", + help="specify where to read input.", + ) parser_blame.add_argument( - '-o', '--outfile', action='store', dest='outfile', default='-', - help='specify where to write output. ') - parser_blame.set_defaults(action=('blame', analyze_blame)) + "-o", + "--outfile", + action="store", + dest="outfile", + default="-", + help="specify where to write output. ", + ) + parser_blame.set_defaults(action=("blame", analyze_blame)) parser_show = subparsers.add_parser( - 'show', help='Print list of in-order events during execution') - parser_show.add_argument('-f', '--format', action='store', - dest='print_format', default='%I%D @%Es +%ds', - help='specify formatting of output.') - parser_show.add_argument('-i', '--infile', action='store', - dest='infile', default='/var/log/cloud-init.log', - help='specify where to read input.') - parser_show.add_argument('-o', '--outfile', action='store', - dest='outfile', default='-', - help='specify where to write output.') - parser_show.set_defaults(action=('show', analyze_show)) + "show", help="Print list of in-order events during execution" + ) + parser_show.add_argument( + "-f", + "--format", + action="store", + dest="print_format", + default="%I%D @%Es +%ds", + help="specify formatting of output.", + ) + parser_show.add_argument( + "-i", + "--infile", + action="store", + dest="infile", + default="/var/log/cloud-init.log", + help="specify where to read input.", + ) + parser_show.add_argument( + "-o", + "--outfile", + action="store", + dest="outfile", + default="-", + help="specify where to write output.", + ) + parser_show.set_defaults(action=("show", analyze_show)) parser_dump = subparsers.add_parser( - 'dump', help='Dump cloud-init events in JSON format') - parser_dump.add_argument('-i', '--infile', action='store', - dest='infile', default='/var/log/cloud-init.log', - help='specify where to read input. ') - parser_dump.add_argument('-o', '--outfile', action='store', - dest='outfile', default='-', - help='specify where to write output. ') - parser_dump.set_defaults(action=('dump', analyze_dump)) + "dump", help="Dump cloud-init events in JSON format" + ) + parser_dump.add_argument( + "-i", + "--infile", + action="store", + dest="infile", + default="/var/log/cloud-init.log", + help="specify where to read input. ", + ) + parser_dump.add_argument( + "-o", + "--outfile", + action="store", + dest="outfile", + default="-", + help="specify where to write output. ", + ) + parser_dump.set_defaults(action=("dump", analyze_dump)) parser_boot = subparsers.add_parser( - 'boot', help='Print list of boot times for kernel and cloud-init') - parser_boot.add_argument('-i', '--infile', action='store', - dest='infile', default='/var/log/cloud-init.log', - help='specify where to read input. ') - parser_boot.add_argument('-o', '--outfile', action='store', - dest='outfile', default='-', - help='specify where to write output.') - parser_boot.set_defaults(action=('boot', analyze_boot)) + "boot", help="Print list of boot times for kernel and cloud-init" + ) + parser_boot.add_argument( + "-i", + "--infile", + action="store", + dest="infile", + default="/var/log/cloud-init.log", + help="specify where to read input. ", + ) + parser_boot.add_argument( + "-o", + "--outfile", + action="store", + dest="outfile", + default="-", + help="specify where to write output.", + ) + parser_boot.set_defaults(action=("boot", analyze_boot)) return parser @@ -78,61 +127,68 @@ def analyze_boot(name, args): """ infh, outfh = configure_io(args) kernel_info = show.dist_check_timestamp() - status_code, kernel_start, kernel_end, ci_sysd_start = \ - kernel_info + status_code, kernel_start, kernel_end, ci_sysd_start = kernel_info kernel_start_timestamp = datetime.utcfromtimestamp(kernel_start) kernel_end_timestamp = datetime.utcfromtimestamp(kernel_end) ci_sysd_start_timestamp = datetime.utcfromtimestamp(ci_sysd_start) try: - last_init_local = \ - [e for e in _get_events(infh) if e['name'] == 'init-local' and - 'starting search' in e['description']][-1] - ci_start = datetime.utcfromtimestamp(last_init_local['timestamp']) + last_init_local = [ + e + for e in _get_events(infh) + if e["name"] == "init-local" + and "starting search" in e["description"] + ][-1] + ci_start = datetime.utcfromtimestamp(last_init_local["timestamp"]) except IndexError: - ci_start = 'Could not find init-local log-line in cloud-init.log' + ci_start = "Could not find init-local log-line in cloud-init.log" status_code = show.FAIL_CODE - FAILURE_MSG = 'Your Linux distro or container does not support this ' \ - 'functionality.\n' \ - 'You must be running a Kernel Telemetry supported ' \ - 'distro.\nPlease check ' \ - 'https://cloudinit.readthedocs.io/en/latest' \ - '/topics/analyze.html for more ' \ - 'information on supported distros.\n' - - SUCCESS_MSG = '-- Most Recent Boot Record --\n' \ - ' Kernel Started at: {k_s_t}\n' \ - ' Kernel ended boot at: {k_e_t}\n' \ - ' Kernel time to boot (seconds): {k_r}\n' \ - ' Cloud-init activated by systemd at: {ci_sysd_t}\n' \ - ' Time between Kernel end boot and Cloud-init ' \ - 'activation (seconds): {bt_r}\n' \ - ' Cloud-init start: {ci_start}\n' - - CONTAINER_MSG = '-- Most Recent Container Boot Record --\n' \ - ' Container started at: {k_s_t}\n' \ - ' Cloud-init activated by systemd at: {ci_sysd_t}\n' \ - ' Cloud-init start: {ci_start}\n' \ - + FAILURE_MSG = ( + "Your Linux distro or container does not support this " + "functionality.\n" + "You must be running a Kernel Telemetry supported " + "distro.\nPlease check " + "https://cloudinit.readthedocs.io/en/latest" + "/topics/analyze.html for more " + "information on supported distros.\n" + ) + + SUCCESS_MSG = ( + "-- Most Recent Boot Record --\n" + " Kernel Started at: {k_s_t}\n" + " Kernel ended boot at: {k_e_t}\n" + " Kernel time to boot (seconds): {k_r}\n" + " Cloud-init activated by systemd at: {ci_sysd_t}\n" + " Time between Kernel end boot and Cloud-init " + "activation (seconds): {bt_r}\n" + " Cloud-init start: {ci_start}\n" + ) + + CONTAINER_MSG = ( + "-- Most Recent Container Boot Record --\n" + " Container started at: {k_s_t}\n" + " Cloud-init activated by systemd at: {ci_sysd_t}\n" + " Cloud-init start: {ci_start}\n" + ) status_map = { show.FAIL_CODE: FAILURE_MSG, show.CONTAINER_CODE: CONTAINER_MSG, - show.SUCCESS_CODE: SUCCESS_MSG + show.SUCCESS_CODE: SUCCESS_MSG, } kernel_runtime = kernel_end - kernel_start between_process_runtime = ci_sysd_start - kernel_end kwargs = { - 'k_s_t': kernel_start_timestamp, - 'k_e_t': kernel_end_timestamp, - 'k_r': kernel_runtime, - 'bt_r': between_process_runtime, - 'k_e': kernel_end, - 'k_s': kernel_start, - 'ci_sysd': ci_sysd_start, - 'ci_sysd_t': ci_sysd_start_timestamp, - 'ci_start': ci_start + "k_s_t": kernel_start_timestamp, + "k_e_t": kernel_end_timestamp, + "k_r": kernel_runtime, + "bt_r": between_process_runtime, + "k_e": kernel_end, + "k_s": kernel_start, + "ci_sysd": ci_sysd_start, + "ci_sysd_t": ci_sysd_start_timestamp, + "ci_start": ci_start, } outfh.write(status_map[status_code].format(**kwargs)) @@ -152,15 +208,16 @@ def analyze_blame(name, args): and sorting by record data ('delta') """ (infh, outfh) = configure_io(args) - blame_format = ' %ds (%n)' - r = re.compile(r'(^\s+\d+\.\d+)', re.MULTILINE) - for idx, record in enumerate(show.show_events(_get_events(infh), - blame_format)): + blame_format = " %ds (%n)" + r = re.compile(r"(^\s+\d+\.\d+)", re.MULTILINE) + for idx, record in enumerate( + show.show_events(_get_events(infh), blame_format) + ): srecs = sorted(filter(r.match, record), reverse=True) - outfh.write('-- Boot Record %02d --\n' % (idx + 1)) - outfh.write('\n'.join(srecs) + '\n') - outfh.write('\n') - outfh.write('%d boot records analyzed\n' % (idx + 1)) + outfh.write("-- Boot Record %02d --\n" % (idx + 1)) + outfh.write("\n".join(srecs) + "\n") + outfh.write("\n") + outfh.write("%d boot records analyzed\n" % (idx + 1)) def analyze_show(name, args): @@ -184,21 +241,25 @@ def analyze_show(name, args): Finished stage: (modules-final) 0.NNN seconds """ (infh, outfh) = configure_io(args) - for idx, record in enumerate(show.show_events(_get_events(infh), - args.print_format)): - outfh.write('-- Boot Record %02d --\n' % (idx + 1)) - outfh.write('The total time elapsed since completing an event is' - ' printed after the "@" character.\n') - outfh.write('The time the event takes is printed after the "+" ' - 'character.\n\n') - outfh.write('\n'.join(record) + '\n') - outfh.write('%d boot records analyzed\n' % (idx + 1)) + for idx, record in enumerate( + show.show_events(_get_events(infh), args.print_format) + ): + outfh.write("-- Boot Record %02d --\n" % (idx + 1)) + outfh.write( + "The total time elapsed since completing an event is" + ' printed after the "@" character.\n' + ) + outfh.write( + 'The time the event takes is printed after the "+" character.\n\n' + ) + outfh.write("\n".join(record) + "\n") + outfh.write("%d boot records analyzed\n" % (idx + 1)) def analyze_dump(name, args): """Dump cloud-init events in json format""" (infh, outfh) = configure_io(args) - outfh.write(json_dumps(_get_events(infh)) + '\n') + outfh.write(json_dumps(_get_events(infh)) + "\n") def _get_events(infile): @@ -211,28 +272,28 @@ def _get_events(infile): def configure_io(args): """Common parsing and setup of input/output files""" - if args.infile == '-': + if args.infile == "-": infh = sys.stdin else: try: - infh = open(args.infile, 'r') + infh = open(args.infile, "r") except OSError: - sys.stderr.write('Cannot open file %s\n' % args.infile) + sys.stderr.write("Cannot open file %s\n" % args.infile) sys.exit(1) - if args.outfile == '-': + if args.outfile == "-": outfh = sys.stdout else: try: - outfh = open(args.outfile, 'w') + outfh = open(args.outfile, "w") except OSError: - sys.stderr.write('Cannot open file %s\n' % args.outfile) + sys.stderr.write("Cannot open file %s\n" % args.outfile) sys.exit(1) return (infh, outfh) -if __name__ == '__main__': +if __name__ == "__main__": parser = get_parser() args = parser.parse_args() (name, action_functor) = args.action diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py index 62ad51fe..8e6e3c6a 100644 --- a/cloudinit/analyze/dump.py +++ b/cloudinit/analyze/dump.py @@ -1,21 +1,20 @@ # This file is part of cloud-init. See LICENSE file for license information. import calendar -from datetime import datetime import sys +from datetime import datetime -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util stage_to_description = { - 'finished': 'finished running cloud-init', - 'init-local': 'starting search for local datasources', - 'init-network': 'searching for network datasources', - 'init': 'searching for network datasources', - 'modules-config': 'running config modules', - 'modules-final': 'finalizing modules', - 'modules': 'running modules for', - 'single': 'running single module ', + "finished": "finished running cloud-init", + "init-local": "starting search for local datasources", + "init-network": "searching for network datasources", + "init": "searching for network datasources", + "modules-config": "running config modules", + "modules-final": "finalizing modules", + "modules": "running modules for", + "single": "running single module ", } # logger's asctime format @@ -34,11 +33,11 @@ def parse_timestamp(timestampstr): if timestampstr.split()[0] in months: # Aug 29 22:55:26 FMT = DEFAULT_FMT - if '.' in timestampstr: + if "." in timestampstr: FMT = CLOUD_INIT_JOURNALCTL_FMT - dt = datetime.strptime(timestampstr + " " + - str(datetime.now().year), - FMT) + dt = datetime.strptime( + timestampstr + " " + str(datetime.now().year), FMT + ) timestamp = dt.strftime("%s.%f") elif "," in timestampstr: # 2016-09-12 14:39:20,839 @@ -52,7 +51,7 @@ def parse_timestamp(timestampstr): def parse_timestamp_from_date(timestampstr): - out, _ = subp.subp(['date', '+%s.%3N', '-d', timestampstr]) + out, _ = subp.subp(["date", "+%s.%3N", "-d", timestampstr]) timestamp = out.strip() return float(timestamp) @@ -79,8 +78,8 @@ def parse_ci_logline(line): # Apr 30 19:39:11 cloud-init[2673]: handlers.py[DEBUG]: start: \ # init-local/check-cache: attempting to read from cache [check] - amazon_linux_2_sep = ' cloud-init[' - separators = [' - ', ' [CLOUDINIT] ', amazon_linux_2_sep] + amazon_linux_2_sep = " cloud-init[" + separators = [" - ", " [CLOUDINIT] ", amazon_linux_2_sep] found = False for sep in separators: if sep in line: @@ -99,7 +98,7 @@ def parse_ci_logline(line): if "," in timehost: timestampstr, extra = timehost.split(",") timestampstr += ",%s" % extra.split()[0] - if ' ' in extra: + if " " in extra: hostname = extra.split()[-1] else: hostname = timehost.split()[-1] @@ -111,11 +110,11 @@ def parse_ci_logline(line): eventstr = eventstr.split(maxsplit=1)[1] else: timestampstr = timehost.split(hostname)[0].strip() - if 'Cloud-init v.' in eventstr: - event_type = 'start' - if 'running' in eventstr: - stage_and_timestamp = eventstr.split('running')[1].lstrip() - event_name, _ = stage_and_timestamp.split(' at ') + if "Cloud-init v." in eventstr: + event_type = "start" + if "running" in eventstr: + stage_and_timestamp = eventstr.split("running")[1].lstrip() + event_name, _ = stage_and_timestamp.split(" at ") event_name = event_name.replace("'", "").replace(":", "-") if event_name == "init": event_name = "init-network" @@ -128,17 +127,17 @@ def parse_ci_logline(line): event_description = eventstr.split(event_name)[1].strip() event = { - 'name': event_name.rstrip(":"), - 'description': event_description, - 'timestamp': parse_timestamp(timestampstr), - 'origin': 'cloudinit', - 'event_type': event_type.rstrip(":"), + "name": event_name.rstrip(":"), + "description": event_description, + "timestamp": parse_timestamp(timestampstr), + "origin": "cloudinit", + "event_type": event_type.rstrip(":"), } - if event['event_type'] == "finish": + if event["event_type"] == "finish": result = event_description.split(":")[0] - desc = event_description.split(result)[1].lstrip(':').strip() - event['result'] = result - event['description'] = desc.strip() + desc = event_description.split(result)[1].lstrip(":").strip() + event["result"] = result + event["description"] = desc.strip() return event @@ -146,10 +145,10 @@ def parse_ci_logline(line): def dump_events(cisource=None, rawdata=None): events = [] event = None - CI_EVENT_MATCHES = ['start:', 'finish:', 'Cloud-init v.'] + CI_EVENT_MATCHES = ["start:", "finish:", "Cloud-init v."] if not any([cisource, rawdata]): - raise ValueError('Either cisource or rawdata parameters are required') + raise ValueError("Either cisource or rawdata parameters are required") if rawdata: data = rawdata.splitlines() @@ -162,7 +161,7 @@ def dump_events(cisource=None, rawdata=None): try: event = parse_ci_logline(line) except ValueError: - sys.stderr.write('Skipping invalid entry\n') + sys.stderr.write("Skipping invalid entry\n") if event: events.append(event) diff --git a/cloudinit/analyze/show.py b/cloudinit/analyze/show.py index 01a4d3e5..5fd9cdfd 100644 --- a/cloudinit/analyze/show.py +++ b/cloudinit/analyze/show.py @@ -8,11 +8,10 @@ import base64 import datetime import json import os -import time import sys +import time -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util from cloudinit.distros import uses_systemd # Example events: @@ -35,24 +34,25 @@ from cloudinit.distros import uses_systemd # } format_key = { - '%d': 'delta', - '%D': 'description', - '%E': 'elapsed', - '%e': 'event_type', - '%I': 'indent', - '%l': 'level', - '%n': 'name', - '%o': 'origin', - '%r': 'result', - '%t': 'timestamp', - '%T': 'total_time', + "%d": "delta", + "%D": "description", + "%E": "elapsed", + "%e": "event_type", + "%I": "indent", + "%l": "level", + "%n": "name", + "%o": "origin", + "%r": "result", + "%t": "timestamp", + "%T": "total_time", } -formatting_help = " ".join(["{0}: {1}".format(k.replace('%', '%%'), v) - for k, v in format_key.items()]) -SUCCESS_CODE = 'successful' -FAIL_CODE = 'failure' -CONTAINER_CODE = 'container' +formatting_help = " ".join( + ["{0}: {1}".format(k.replace("%", "%%"), v) for k, v in format_key.items()] +) +SUCCESS_CODE = "successful" +FAIL_CODE = "failure" +CONTAINER_CODE = "container" TIMESTAMP_UNKNOWN = (FAIL_CODE, -1, -1, -1) @@ -60,7 +60,7 @@ def format_record(msg, event): for i, j in format_key.items(): if i in msg: # ensure consistent formatting of time values - if j in ['delta', 'elapsed', 'timestamp']: + if j in ["delta", "elapsed", "timestamp"]: msg = msg.replace(i, "{%s:08.5f}" % j) else: msg = msg.replace(i, "{%s}" % j) @@ -68,13 +68,13 @@ def format_record(msg, event): def dump_event_files(event): - content = dict((k, v) for k, v in event.items() if k not in ['content']) - files = content['files'] + content = dict((k, v) for k, v in event.items() if k not in ["content"]) + files = content["files"] saved = [] for f in files: - fname = f['path'] + fname = f["path"] fn_local = os.path.basename(fname) - fcontent = base64.b64decode(f['content']).decode('ascii') + fcontent = base64.b64decode(f["content"]).decode("ascii") util.write_file(fn_local, fcontent) saved.append(fn_local) @@ -83,13 +83,13 @@ def dump_event_files(event): def event_name(event): if event: - return event.get('name') + return event.get("name") return None def event_type(event): if event: - return event.get('event_type') + return event.get("event_type") return None @@ -100,7 +100,7 @@ def event_parent(event): def event_timestamp(event): - return float(event.get('timestamp')) + return float(event.get("timestamp")) def event_datetime(event): @@ -117,41 +117,44 @@ def event_duration(start, finish): def event_record(start_time, start, finish): record = finish.copy() - record.update({ - 'delta': event_duration(start, finish), - 'elapsed': delta_seconds(start_time, event_datetime(start)), - 'indent': '|' + ' ' * (event_name(start).count('/') - 1) + '`->', - }) + record.update( + { + "delta": event_duration(start, finish), + "elapsed": delta_seconds(start_time, event_datetime(start)), + "indent": "|" + " " * (event_name(start).count("/") - 1) + "`->", + } + ) return record def total_time_record(total_time): - return 'Total Time: %3.5f seconds\n' % total_time + return "Total Time: %3.5f seconds\n" % total_time class SystemctlReader(object): - ''' + """ Class for dealing with all systemctl subp calls in a consistent manner. - ''' + """ + def __init__(self, property, parameter=None): self.epoch = None - self.args = ['/bin/systemctl', 'show'] + self.args = ["/bin/systemctl", "show"] if parameter: self.args.append(parameter) - self.args.extend(['-p', property]) + self.args.extend(["-p", property]) # Don't want the init of our object to break. Instead of throwing # an exception, set an error code that gets checked when data is # requested from the object self.failure = self.subp() def subp(self): - ''' + """ Make a subp call based on set args and handle errors by setting failure code :return: whether the subp call failed or not - ''' + """ try: value, err = subp.subp(self.args, capture=True) if err: @@ -162,41 +165,41 @@ class SystemctlReader(object): return systemctl_fail def parse_epoch_as_float(self): - ''' + """ If subp call succeeded, return the timestamp from subp as a float. :return: timestamp as a float - ''' + """ # subp has 2 ways to fail: it either fails and throws an exception, # or returns an error code. Raise an exception here in order to make # sure both scenarios throw exceptions if self.failure: - raise RuntimeError('Subprocess call to systemctl has failed, ' - 'returning error code ({})' - .format(self.failure)) + raise RuntimeError( + "Subprocess call to systemctl has failed, " + "returning error code ({})".format(self.failure) + ) # Output from systemctl show has the format Property=Value. # For example, UserspaceMonotonic=1929304 - timestamp = self.epoch.split('=')[1] + timestamp = self.epoch.split("=")[1] # Timestamps reported by systemctl are in microseconds, converting return float(timestamp) / 1000000 def dist_check_timestamp(): - ''' + """ Determine which init system a particular linux distro is using. Each init system (systemd, upstart, etc) has a different way of providing timestamps. :return: timestamps of kernelboot, kernelendboot, and cloud-initstart or TIMESTAMP_UNKNOWN if the timestamps cannot be retrieved. - ''' + """ if uses_systemd(): return gather_timestamps_using_systemd() # Use dmesg to get timestamps if the distro does not have systemd - if util.is_FreeBSD() or 'gentoo' in \ - util.system_info()['system'].lower(): + if util.is_FreeBSD() or "gentoo" in util.system_info()["system"].lower(): return gather_timestamps_using_dmesg() # this distro doesn't fit anything that is supported by cloud-init. just @@ -205,20 +208,20 @@ def dist_check_timestamp(): def gather_timestamps_using_dmesg(): - ''' + """ Gather timestamps that corresponds to kernel begin initialization, kernel finish initialization using dmesg as opposed to systemctl :return: the two timestamps plus a dummy timestamp to keep consistency with gather_timestamps_using_systemd - ''' + """ try: - data, _ = subp.subp(['dmesg'], capture=True) + data, _ = subp.subp(["dmesg"], capture=True) split_entries = data[0].splitlines() for i in split_entries: - if i.decode('UTF-8').find('user') != -1: - splitup = i.decode('UTF-8').split() - stripped = splitup[1].strip(']') + if i.decode("UTF-8").find("user") != -1: + splitup = i.decode("UTF-8").split() + stripped = splitup[1].strip("]") # kernel timestamp from dmesg is equal to 0, # with the userspace timestamp relative to it. @@ -228,8 +231,7 @@ def gather_timestamps_using_dmesg(): # systemd wont start cloud-init in this case, # so we cannot get that timestamp - return SUCCESS_CODE, kernel_start, kernel_end, \ - kernel_end + return SUCCESS_CODE, kernel_start, kernel_end, kernel_end except Exception: pass @@ -237,18 +239,20 @@ def gather_timestamps_using_dmesg(): def gather_timestamps_using_systemd(): - ''' + """ Gather timestamps that corresponds to kernel begin initialization, kernel finish initialization. and cloud-init systemd unit activation :return: the three timestamps - ''' + """ kernel_start = float(time.time()) - float(util.uptime()) try: - delta_k_end = SystemctlReader('UserspaceTimestampMonotonic')\ - .parse_epoch_as_float() - delta_ci_s = SystemctlReader('InactiveExitTimestampMonotonic', - 'cloud-init-local').parse_epoch_as_float() + delta_k_end = SystemctlReader( + "UserspaceTimestampMonotonic" + ).parse_epoch_as_float() + delta_ci_s = SystemctlReader( + "InactiveExitTimestampMonotonic", "cloud-init-local" + ).parse_epoch_as_float() base_time = kernel_start status = SUCCESS_CODE # lxc based containers do not set their monotonic zero point to be when @@ -262,12 +266,13 @@ def gather_timestamps_using_systemd(): # in containers when https://github.com/lxc/lxcfs/issues/292 # is fixed, util.uptime() should be used instead of stat on try: - file_stat = os.stat('/proc/1/cmdline') + file_stat = os.stat("/proc/1/cmdline") kernel_start = file_stat.st_atime except OSError as err: - raise RuntimeError('Could not determine container boot ' - 'time from /proc/1/cmdline. ({})' - .format(err)) from err + raise RuntimeError( + "Could not determine container boot " + "time from /proc/1/cmdline. ({})".format(err) + ) from err status = CONTAINER_CODE else: status = FAIL_CODE @@ -283,10 +288,14 @@ def gather_timestamps_using_systemd(): return status, kernel_start, kernel_end, cloudinit_sysd -def generate_records(events, blame_sort=False, - print_format="(%n) %d seconds in %I%D", - dump_files=False, log_datafiles=False): - ''' +def generate_records( + events, + blame_sort=False, + print_format="(%n) %d seconds in %I%D", + dump_files=False, + log_datafiles=False, +): + """ Take in raw events and create parent-child dependencies between events in order to order events in chronological order. @@ -298,9 +307,9 @@ def generate_records(events, blame_sort=False, :param log_datafiles: whether or not to log events generated :return: boot records ordered chronologically - ''' + """ - sorted_events = sorted(events, key=lambda x: x['timestamp']) + sorted_events = sorted(events, key=lambda x: x["timestamp"]) records = [] start_time = None total_time = 0.0 @@ -316,8 +325,8 @@ def generate_records(events, blame_sort=False, except IndexError: next_evt = None - if event_type(event) == 'start': - if event.get('name') in stages_seen: + if event_type(event) == "start": + if event.get("name") in stages_seen: records.append(total_time_record(total_time)) boot_records.append(records) records = [] @@ -331,25 +340,28 @@ def generate_records(events, blame_sort=False, # see if we have a pair if event_name(event) == event_name(next_evt): - if event_type(next_evt) == 'finish': - records.append(format_record(print_format, - event_record(start_time, - event, - next_evt))) + if event_type(next_evt) == "finish": + records.append( + format_record( + print_format, + event_record(start_time, event, next_evt), + ) + ) else: # This is a parent event - records.append("Starting stage: %s" % event.get('name')) + records.append("Starting stage: %s" % event.get("name")) unprocessed.append(event) - stages_seen.append(event.get('name')) + stages_seen.append(event.get("name")) continue else: prev_evt = unprocessed.pop() if event_name(event) == event_name(prev_evt): record = event_record(start_time, prev_evt, event) - records.append(format_record("Finished stage: " - "(%n) %d seconds", - record) + "\n") - total_time += record.get('delta') + records.append( + format_record("Finished stage: (%n) %d seconds", record) + + "\n" + ) + total_time += record.get("delta") else: # not a match, put it back unprocessed.append(prev_evt) @@ -360,7 +372,7 @@ def generate_records(events, blame_sort=False, def show_events(events, print_format): - ''' + """ A passthrough method that makes it easier to call generate_records() :param events: JSONs from dump that represents events taken from logs @@ -368,18 +380,18 @@ def show_events(events, print_format): and time taken by the event in one line :return: boot records ordered chronologically - ''' + """ return generate_records(events, print_format=print_format) def load_events_infile(infile): - ''' + """ Takes in a log file, read it, and convert to json. :param infile: The Log file to be read :return: json version of logfile, raw file - ''' + """ data = infile.read() try: return json.loads(data), data diff --git a/cloudinit/apport.py b/cloudinit/apport.py index aadc638f..92068aa9 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -2,127 +2,143 @@ # # This file is part of cloud-init. See LICENSE file for license information. -'''Cloud-init apport interface''' +"""Cloud-init apport interface""" try: from apport.hookutils import ( - attach_file, attach_root_command_outputs, root_command_output) + attach_file, + attach_root_command_outputs, + root_command_output, + ) + has_apport = True except ImportError: has_apport = False KNOWN_CLOUD_NAMES = [ - 'AliYun', - 'AltCloud', - 'Amazon - Ec2', - 'Azure', - 'Bigstep', - 'Brightbox', - 'CloudSigma', - 'CloudStack', - 'DigitalOcean', - 'E24Cloud', - 'GCE - Google Compute Engine', - 'Exoscale', - 'Hetzner Cloud', - 'IBM - (aka SoftLayer or BlueMix)', - 'LXD', - 'MAAS', - 'NoCloud', - 'OpenNebula', - 'OpenStack', - 'Oracle', - 'OVF', - 'RbxCloud - (HyperOne, Rootbox, Rubikon)', - 'OpenTelekomCloud', - 'SAP Converged Cloud', - 'Scaleway', - 'SmartOS', - 'UpCloud', - 'VMware', - 'Vultr', - 'ZStack', - 'Other' + "AliYun", + "AltCloud", + "Amazon - Ec2", + "Azure", + "Bigstep", + "Brightbox", + "CloudSigma", + "CloudStack", + "DigitalOcean", + "E24Cloud", + "GCE - Google Compute Engine", + "Exoscale", + "Hetzner Cloud", + "IBM - (aka SoftLayer or BlueMix)", + "LXD", + "MAAS", + "NoCloud", + "OpenNebula", + "OpenStack", + "Oracle", + "OVF", + "RbxCloud - (HyperOne, Rootbox, Rubikon)", + "OpenTelekomCloud", + "SAP Converged Cloud", + "Scaleway", + "SmartOS", + "UpCloud", + "VMware", + "Vultr", + "ZStack", + "Other", ] # Potentially clear text collected logs -CLOUDINIT_LOG = '/var/log/cloud-init.log' -CLOUDINIT_OUTPUT_LOG = '/var/log/cloud-init-output.log' -USER_DATA_FILE = '/var/lib/cloud/instance/user-data.txt' # Optional +CLOUDINIT_LOG = "/var/log/cloud-init.log" +CLOUDINIT_OUTPUT_LOG = "/var/log/cloud-init-output.log" +USER_DATA_FILE = "/var/lib/cloud/instance/user-data.txt" # Optional def attach_cloud_init_logs(report, ui=None): - '''Attach cloud-init logs and tarfile from 'cloud-init collect-logs'.''' - attach_root_command_outputs(report, { - 'cloud-init-log-warnings': - 'egrep -i "warn|error" /var/log/cloud-init.log', - 'cloud-init-output.log.txt': 'cat /var/log/cloud-init-output.log'}) + """Attach cloud-init logs and tarfile from 'cloud-init collect-logs'.""" + attach_root_command_outputs( + report, + { + "cloud-init-log-warnings": ( + 'egrep -i "warn|error" /var/log/cloud-init.log' + ), + "cloud-init-output.log.txt": "cat /var/log/cloud-init-output.log", + }, + ) root_command_output( - ['cloud-init', 'collect-logs', '-t', '/tmp/cloud-init-logs.tgz']) - attach_file(report, '/tmp/cloud-init-logs.tgz', 'logs.tgz') + ["cloud-init", "collect-logs", "-t", "/tmp/cloud-init-logs.tgz"] + ) + attach_file(report, "/tmp/cloud-init-logs.tgz", "logs.tgz") def attach_hwinfo(report, ui=None): - '''Optionally attach hardware info from lshw.''' + """Optionally attach hardware info from lshw.""" prompt = ( - 'Your device details (lshw) may be useful to developers when' - ' addressing this bug, but gathering it requires admin privileges.' - ' Would you like to include this info?') + "Your device details (lshw) may be useful to developers when" + " addressing this bug, but gathering it requires admin privileges." + " Would you like to include this info?" + ) if ui and ui.yesno(prompt): - attach_root_command_outputs(report, {'lshw.txt': 'lshw'}) + attach_root_command_outputs(report, {"lshw.txt": "lshw"}) def attach_cloud_info(report, ui=None): - '''Prompt for cloud details if available.''' + """Prompt for cloud details if available.""" if ui: - prompt = 'Is this machine running in a cloud environment?' + prompt = "Is this machine running in a cloud environment?" response = ui.yesno(prompt) if response is None: raise StopIteration # User cancelled if response: - prompt = ('Please select the cloud vendor or environment in which' - ' this instance is running') + prompt = ( + "Please select the cloud vendor or environment in which" + " this instance is running" + ) response = ui.choice(prompt, KNOWN_CLOUD_NAMES) if response: - report['CloudName'] = KNOWN_CLOUD_NAMES[response[0]] + report["CloudName"] = KNOWN_CLOUD_NAMES[response[0]] else: - report['CloudName'] = 'None' + report["CloudName"] = "None" def attach_user_data(report, ui=None): - '''Optionally provide user-data if desired.''' + """Optionally provide user-data if desired.""" if ui: prompt = ( - 'Your user-data or cloud-config file can optionally be provided' - ' from {0} and could be useful to developers when addressing this' - ' bug. Do you wish to attach user-data to this bug?'.format( - USER_DATA_FILE)) + "Your user-data or cloud-config file can optionally be provided" + " from {0} and could be useful to developers when addressing this" + " bug. Do you wish to attach user-data to this bug?".format( + USER_DATA_FILE + ) + ) response = ui.yesno(prompt) if response is None: raise StopIteration # User cancelled if response: - attach_file(report, USER_DATA_FILE, 'user_data.txt') + attach_file(report, USER_DATA_FILE, "user_data.txt") def add_bug_tags(report): - '''Add any appropriate tags to the bug.''' - if 'JournalErrors' in report.keys(): - errors = report['JournalErrors'] - if 'Breaking ordering cycle' in errors: - report['Tags'] = 'systemd-ordering' + """Add any appropriate tags to the bug.""" + if "JournalErrors" in report.keys(): + errors = report["JournalErrors"] + if "Breaking ordering cycle" in errors: + report["Tags"] = "systemd-ordering" def add_info(report, ui): - '''This is an entry point to run cloud-init's apport functionality. + """This is an entry point to run cloud-init's apport functionality. Distros which want apport support will have a cloud-init package-hook at /usr/share/apport/package-hooks/cloud-init.py which defines an add_info function and returns the result of cloudinit.apport.add_info(report, ui). - ''' + """ if not has_apport: raise RuntimeError( - 'No apport imports discovered. Apport functionality disabled') + "No apport imports discovered. Apport functionality disabled" + ) attach_cloud_init_logs(report, ui) attach_hwinfo(report, ui) attach_cloud_info(report, ui) @@ -130,4 +146,5 @@ def add_info(report, ui): add_bug_tags(report) return True + # vi: ts=4 expandtab diff --git a/cloudinit/atomic_helper.py b/cloudinit/atomic_helper.py index 485ff92f..ae117fad 100644 --- a/cloudinit/atomic_helper.py +++ b/cloudinit/atomic_helper.py @@ -10,8 +10,9 @@ _DEF_PERMS = 0o644 LOG = logging.getLogger(__name__) -def write_file(filename, content, mode=_DEF_PERMS, - omode="wb", preserve_mode=False): +def write_file( + filename, content, mode=_DEF_PERMS, omode="wb", preserve_mode=False +): # open filename in mode 'omode', write content, set permissions to 'mode' if preserve_mode: @@ -23,12 +24,18 @@ def write_file(filename, content, mode=_DEF_PERMS, tf = None try: - tf = tempfile.NamedTemporaryFile(dir=os.path.dirname(filename), - delete=False, mode=omode) + tf = tempfile.NamedTemporaryFile( + dir=os.path.dirname(filename), delete=False, mode=omode + ) LOG.debug( "Atomically writing to file %s (via temporary file %s) - %s: [%o]" " %d bytes/chars", - filename, tf.name, omode, mode, len(content)) + filename, + tf.name, + omode, + mode, + len(content), + ) tf.write(content) tf.close() os.chmod(tf.name, mode) @@ -42,7 +49,11 @@ def write_file(filename, content, mode=_DEF_PERMS, def write_json(filename, data, mode=_DEF_PERMS): # dump json representation of data to file filename. return write_file( - filename, json.dumps(data, indent=1, sort_keys=True) + "\n", - omode="w", mode=mode) + filename, + json.dumps(data, indent=1, sort_keys=True) + "\n", + omode="w", + mode=mode, + ) + # vi: ts=4 expandtab diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py index 7ae98e1c..91e48103 100644 --- a/cloudinit/cloud.py +++ b/cloudinit/cloud.py @@ -35,7 +35,8 @@ class Cloud(object): reporter = events.ReportEventStack( name="unnamed-cloud-reporter", description="unnamed-cloud-reporter", - reporting_enabled=False) + reporting_enabled=False, + ) self.reporter = reporter # If a 'user' manipulates logging or logging services @@ -56,8 +57,11 @@ class Cloud(object): def get_template_filename(self, name): fn = self.paths.template_tpl % (name) if not os.path.isfile(fn): - LOG.warning("No template found in %s for template named %s", - os.path.dirname(fn), name) + LOG.warning( + "No template found in %s for template named %s", + os.path.dirname(fn), + name, + ) return None return fn @@ -80,7 +84,8 @@ class Cloud(object): def get_hostname(self, fqdn=False, metadata_only=False): return self.datasource.get_hostname( - fqdn=fqdn, metadata_only=metadata_only) + fqdn=fqdn, metadata_only=metadata_only + ) def device_name_to_device(self, name): return self.datasource.device_name_to_device(name) @@ -94,4 +99,5 @@ class Cloud(object): def get_ipath(self, name=None): return self.paths.get_ipath(name) + # vi: ts=4 expandtab diff --git a/cloudinit/cmd/clean.py b/cloudinit/cmd/clean.py index 3502dd56..0e1db118 100644 --- a/cloudinit/cmd/clean.py +++ b/cloudinit/cmd/clean.py @@ -10,9 +10,13 @@ import os import sys from cloudinit.stages import Init -from cloudinit.subp import (ProcessExecutionError, subp) +from cloudinit.subp import ProcessExecutionError, subp from cloudinit.util import ( - del_dir, del_file, get_config_logfiles, is_link, error + del_dir, + del_file, + error, + get_config_logfiles, + is_link, ) @@ -27,18 +31,35 @@ def get_parser(parser=None): """ if not parser: parser = argparse.ArgumentParser( - prog='clean', - description=('Remove logs and artifacts so cloud-init re-runs on ' - 'a clean system')) + prog="clean", + description=( + "Remove logs and artifacts so cloud-init re-runs on " + "a clean system" + ), + ) parser.add_argument( - '-l', '--logs', action='store_true', default=False, dest='remove_logs', - help='Remove cloud-init logs.') + "-l", + "--logs", + action="store_true", + default=False, + dest="remove_logs", + help="Remove cloud-init logs.", + ) parser.add_argument( - '-r', '--reboot', action='store_true', default=False, - help='Reboot system after logs are cleaned so cloud-init re-runs.') + "-r", + "--reboot", + action="store_true", + default=False, + help="Reboot system after logs are cleaned so cloud-init re-runs.", + ) parser.add_argument( - '-s', '--seed', action='store_true', default=False, dest='remove_seed', - help='Remove cloud-init seed directory /var/lib/cloud/seed.') + "-s", + "--seed", + action="store_true", + default=False, + dest="remove_seed", + help="Remove cloud-init seed directory /var/lib/cloud/seed.", + ) return parser @@ -59,8 +80,8 @@ def remove_artifacts(remove_logs, remove_seed=False): if not os.path.isdir(init.paths.cloud_dir): return 0 # Artifacts dir already cleaned - seed_path = os.path.join(init.paths.cloud_dir, 'seed') - for path in glob.glob('%s/*' % init.paths.cloud_dir): + seed_path = os.path.join(init.paths.cloud_dir, "seed") + for path in glob.glob("%s/*" % init.paths.cloud_dir): if path == seed_path and not remove_seed: continue try: @@ -69,7 +90,7 @@ def remove_artifacts(remove_logs, remove_seed=False): else: del_file(path) except OSError as e: - error('Could not remove {0}: {1}'.format(path, str(e))) + error("Could not remove {0}: {1}".format(path, str(e))) return 1 return 0 @@ -78,13 +99,15 @@ def handle_clean_args(name, args): """Handle calls to 'cloud-init clean' as a subcommand.""" exit_code = remove_artifacts(args.remove_logs, args.remove_seed) if exit_code == 0 and args.reboot: - cmd = ['shutdown', '-r', 'now'] + cmd = ["shutdown", "-r", "now"] try: subp(cmd, capture=False) except ProcessExecutionError as e: error( 'Could not reboot this system using "{0}": {1}'.format( - cmd, str(e))) + cmd, str(e) + ) + ) exit_code = 1 return exit_code @@ -92,10 +115,10 @@ def handle_clean_args(name, args): def main(): """Tool to collect and tar all cloud-init related logs.""" parser = get_parser() - sys.exit(handle_clean_args('clean', parser.parse_args())) + sys.exit(handle_clean_args("clean", parser.parse_args())) -if __name__ == '__main__': +if __name__ == "__main__": main() # vi: ts=4 expandtab diff --git a/cloudinit/cmd/cloud_id.py b/cloudinit/cmd/cloud_id.py index 0cdc9675..b92b03a8 100755 --- a/cloudinit/cmd/cloud_id.py +++ b/cloudinit/cmd/cloud_id.py @@ -6,13 +6,16 @@ import argparse import json import sys -from cloudinit.util import error from cloudinit.sources import ( - INSTANCE_JSON_FILE, METADATA_UNKNOWN, canonical_cloud_id) + INSTANCE_JSON_FILE, + METADATA_UNKNOWN, + canonical_cloud_id, +) +from cloudinit.util import error -DEFAULT_INSTANCE_JSON = '/run/cloud-init/%s' % INSTANCE_JSON_FILE +DEFAULT_INSTANCE_JSON = "/run/cloud-init/%s" % INSTANCE_JSON_FILE -NAME = 'cloud-id' +NAME = "cloud-id" def get_parser(parser=None): @@ -27,17 +30,30 @@ def get_parser(parser=None): if not parser: parser = argparse.ArgumentParser( prog=NAME, - description='Report the canonical cloud-id for this instance') + description="Report the canonical cloud-id for this instance", + ) parser.add_argument( - '-j', '--json', action='store_true', default=False, - help='Report all standardized cloud-id information as json.') + "-j", + "--json", + action="store_true", + default=False, + help="Report all standardized cloud-id information as json.", + ) parser.add_argument( - '-l', '--long', action='store_true', default=False, - help='Report extended cloud-id information as tab-delimited string.') + "-l", + "--long", + action="store_true", + default=False, + help="Report extended cloud-id information as tab-delimited string.", + ) parser.add_argument( - '-i', '--instance-data', type=str, default=DEFAULT_INSTANCE_JSON, - help=('Path to instance-data.json file. Default is %s' % - DEFAULT_INSTANCE_JSON)) + "-i", + "--instance-data", + type=str, + default=DEFAULT_INSTANCE_JSON, + help="Path to instance-data.json file. Default is %s" + % DEFAULT_INSTANCE_JSON, + ) return parser @@ -53,24 +69,28 @@ def handle_args(name, args): except IOError: return error( "File not found '%s'. Provide a path to instance data json file" - ' using --instance-data' % args.instance_data) + " using --instance-data" % args.instance_data + ) except ValueError as e: return error( - "File '%s' is not valid json. %s" % (args.instance_data, e)) - v1 = instance_data.get('v1', {}) + "File '%s' is not valid json. %s" % (args.instance_data, e) + ) + v1 = instance_data.get("v1", {}) cloud_id = canonical_cloud_id( - v1.get('cloud_name', METADATA_UNKNOWN), - v1.get('region', METADATA_UNKNOWN), - v1.get('platform', METADATA_UNKNOWN)) + v1.get("cloud_name", METADATA_UNKNOWN), + v1.get("region", METADATA_UNKNOWN), + v1.get("platform", METADATA_UNKNOWN), + ) if args.json: - v1['cloud_id'] = cloud_id - response = json.dumps( # Pretty, sorted json - v1, indent=1, sort_keys=True, separators=(',', ': ')) + v1["cloud_id"] = cloud_id + response = json.dumps( # Pretty, sorted json + v1, indent=1, sort_keys=True, separators=(",", ": ") + ) elif args.long: - response = '%s\t%s' % (cloud_id, v1.get('region', METADATA_UNKNOWN)) + response = "%s\t%s" % (cloud_id, v1.get("region", METADATA_UNKNOWN)) else: response = cloud_id - sys.stdout.write('%s\n' % response) + sys.stdout.write("%s\n" % response) return 0 @@ -80,7 +100,7 @@ def main(): sys.exit(handle_args(NAME, parser.parse_args())) -if __name__ == '__main__': +if __name__ == "__main__": main() # vi: ts=4 expandtab diff --git a/cloudinit/cmd/devel/__init__.py b/cloudinit/cmd/devel/__init__.py index 3ae28b69..ead5f7a9 100644 --- a/cloudinit/cmd/devel/__init__.py +++ b/cloudinit/cmd/devel/__init__.py @@ -11,7 +11,7 @@ from cloudinit.stages import Init def addLogHandlerCLI(logger, log_level): """Add a commandline logging handler to emit messages to stderr.""" - formatter = logging.Formatter('%(levelname)s: %(message)s') + formatter = logging.Formatter("%(levelname)s: %(message)s") log.setupBasicLogging(log_level, formatter=formatter) return logger @@ -22,4 +22,5 @@ def read_cfg_paths(): init.read_cfg() return init.paths + # vi: ts=4 expandtab diff --git a/cloudinit/cmd/devel/hotplug_hook.py b/cloudinit/cmd/devel/hotplug_hook.py index f6f36a00..a9be0379 100644 --- a/cloudinit/cmd/devel/hotplug_hook.py +++ b/cloudinit/cmd/devel/hotplug_hook.py @@ -6,20 +6,17 @@ import os import sys import time -from cloudinit import log -from cloudinit import reporting -from cloudinit import stages +from cloudinit import log, reporting, stages from cloudinit.event import EventScope, EventType from cloudinit.net import activators, read_sys_net_safe from cloudinit.net.network_state import parse_net_config_data from cloudinit.reporting import events -from cloudinit.stages import Init from cloudinit.sources import DataSource # noqa: F401 from cloudinit.sources import DataSourceNotFoundException - +from cloudinit.stages import Init LOG = log.getLogger(__name__) -NAME = 'hotplug-hook' +NAME = "hotplug-hook" def get_parser(parser=None): @@ -35,33 +32,38 @@ def get_parser(parser=None): parser.description = __doc__ parser.add_argument( - "-s", "--subsystem", required=True, + "-s", + "--subsystem", + required=True, help="subsystem to act on", - choices=['net'] + choices=["net"], ) subparsers = parser.add_subparsers( - title='Hotplug Action', - dest='hotplug_action' + title="Hotplug Action", dest="hotplug_action" ) subparsers.required = True subparsers.add_parser( - 'query', - help='query if hotplug is enabled for given subsystem' + "query", help="query if hotplug is enabled for given subsystem" ) parser_handle = subparsers.add_parser( - 'handle', help='handle the hotplug event') + "handle", help="handle the hotplug event" + ) parser_handle.add_argument( - "-d", "--devpath", required=True, + "-d", + "--devpath", + required=True, metavar="PATH", - help="sysfs path to hotplugged device" + help="sysfs path to hotplugged device", ) parser_handle.add_argument( - "-u", "--udevaction", required=True, + "-u", + "--udevaction", + required=True, help="action to take", - choices=['add', 'remove'] + choices=["add", "remove"], ) return parser @@ -90,27 +92,29 @@ class UeventHandler(abc.ABC): def detect_hotplugged_device(self): detect_presence = None - if self.action == 'add': + if self.action == "add": detect_presence = True - elif self.action == 'remove': + elif self.action == "remove": detect_presence = False else: - raise ValueError('Unknown action: %s' % self.action) + raise ValueError("Unknown action: %s" % self.action) if detect_presence != self.device_detected(): raise RuntimeError( - 'Failed to detect %s in updated metadata' % self.id) + "Failed to detect %s in updated metadata" % self.id + ) def success(self): return self.success_fn() def update_metadata(self): - result = self.datasource.update_metadata_if_supported([ - EventType.HOTPLUG]) + result = self.datasource.update_metadata_if_supported( + [EventType.HOTPLUG] + ) if not result: raise RuntimeError( - 'Datasource %s not updated for ' - 'event %s' % (self.datasource, EventType.HOTPLUG) + "Datasource %s not updated for event %s" + % (self.datasource, EventType.HOTPLUG) ) return result @@ -118,7 +122,7 @@ class UeventHandler(abc.ABC): class NetHandler(UeventHandler): def __init__(self, datasource, devpath, action, success_fn): # convert devpath to mac address - id = read_sys_net_safe(os.path.basename(devpath), 'address') + id = read_sys_net_safe(os.path.basename(devpath), "address") super().__init__(id, datasource, devpath, action, success_fn) def apply(self): @@ -128,14 +132,16 @@ class NetHandler(UeventHandler): ) interface_name = os.path.basename(self.devpath) activator = activators.select_activator() - if self.action == 'add': + if self.action == "add": if not activator.bring_up_interface(interface_name): raise RuntimeError( - 'Failed to bring up device: {}'.format(self.devpath)) - elif self.action == 'remove': + "Failed to bring up device: {}".format(self.devpath) + ) + elif self.action == "remove": if not activator.bring_down_interface(interface_name): raise RuntimeError( - 'Failed to bring down device: {}'.format(self.devpath)) + "Failed to bring down device: {}".format(self.devpath) + ) @property def config(self): @@ -144,15 +150,16 @@ class NetHandler(UeventHandler): def device_detected(self) -> bool: netstate = parse_net_config_data(self.config) found = [ - iface for iface in netstate.iter_interfaces() - if iface.get('mac_address') == self.id + iface + for iface in netstate.iter_interfaces() + if iface.get("mac_address") == self.id ] - LOG.debug('Ifaces with ID=%s : %s', self.id, found) + LOG.debug("Ifaces with ID=%s : %s", self.id, found) return len(found) > 0 SUBSYSTEM_PROPERTES_MAP = { - 'net': (NetHandler, EventScope.NETWORK), + "net": (NetHandler, EventScope.NETWORK), } @@ -161,66 +168,65 @@ def is_enabled(hotplug_init, subsystem): scope = SUBSYSTEM_PROPERTES_MAP[subsystem][1] except KeyError as e: raise Exception( - 'hotplug-hook: cannot handle events for subsystem: {}'.format( - subsystem) + "hotplug-hook: cannot handle events for subsystem: {}".format( + subsystem + ) ) from e return stages.update_event_enabled( datasource=hotplug_init.datasource, cfg=hotplug_init.cfg, event_source_type=EventType.HOTPLUG, - scope=scope + scope=scope, ) def initialize_datasource(hotplug_init, subsystem): - LOG.debug('Fetching datasource') + LOG.debug("Fetching datasource") datasource = hotplug_init.fetch(existing="trust") if not datasource.get_supported_events([EventType.HOTPLUG]): - LOG.debug('hotplug not supported for event of type %s', subsystem) + LOG.debug("hotplug not supported for event of type %s", subsystem) return if not is_enabled(hotplug_init, subsystem): - LOG.debug('hotplug not enabled for event of type %s', subsystem) + LOG.debug("hotplug not enabled for event of type %s", subsystem) return return datasource -def handle_hotplug( - hotplug_init: Init, devpath, subsystem, udevaction -): +def handle_hotplug(hotplug_init: Init, devpath, subsystem, udevaction): datasource = initialize_datasource(hotplug_init, subsystem) if not datasource: return handler_cls = SUBSYSTEM_PROPERTES_MAP[subsystem][0] - LOG.debug('Creating %s event handler', subsystem) + LOG.debug("Creating %s event handler", subsystem) event_handler = handler_cls( datasource=datasource, devpath=devpath, action=udevaction, - success_fn=hotplug_init._write_to_cache + success_fn=hotplug_init._write_to_cache, ) # type: UeventHandler wait_times = [1, 3, 5, 10, 30] for attempt, wait in enumerate(wait_times): LOG.debug( - 'subsystem=%s update attempt %s/%s', + "subsystem=%s update attempt %s/%s", subsystem, attempt, - len(wait_times) + len(wait_times), ) try: - LOG.debug('Refreshing metadata') + LOG.debug("Refreshing metadata") event_handler.update_metadata() - LOG.debug('Detecting device in updated metadata') + LOG.debug("Detecting device in updated metadata") event_handler.detect_hotplugged_device() - LOG.debug('Applying config change') + LOG.debug("Applying config change") event_handler.apply() - LOG.debug('Updating cache') + LOG.debug("Updating cache") event_handler.success() break except Exception as e: - LOG.debug('Exception while processing hotplug event. %s', e) + LOG.debug("Exception while processing hotplug event. %s", e) time.sleep(wait) last_exception = e else: @@ -238,31 +244,33 @@ def handle_args(name, args): hotplug_init.read_cfg() log.setupLogging(hotplug_init.cfg) - if 'reporting' in hotplug_init.cfg: - reporting.update_configuration(hotplug_init.cfg.get('reporting')) + if "reporting" in hotplug_init.cfg: + reporting.update_configuration(hotplug_init.cfg.get("reporting")) # Logging isn't going to be setup until now LOG.debug( - '%s called with the following arguments: {' - 'hotplug_action: %s, subsystem: %s, udevaction: %s, devpath: %s}', + "%s called with the following arguments: {" + "hotplug_action: %s, subsystem: %s, udevaction: %s, devpath: %s}", name, args.hotplug_action, args.subsystem, - args.udevaction if 'udevaction' in args else None, - args.devpath if 'devpath' in args else None, + args.udevaction if "udevaction" in args else None, + args.devpath if "devpath" in args else None, ) with hotplug_reporter: try: - if args.hotplug_action == 'query': + if args.hotplug_action == "query": try: datasource = initialize_datasource( - hotplug_init, args.subsystem) + hotplug_init, args.subsystem + ) except DataSourceNotFoundException: print( "Unable to determine hotplug state. No datasource " - "detected") + "detected" + ) sys.exit(1) - print('enabled' if datasource else 'disabled') + print("enabled" if datasource else "disabled") else: handle_hotplug( hotplug_init=hotplug_init, @@ -271,13 +279,13 @@ def handle_args(name, args): udevaction=args.udevaction, ) except Exception: - LOG.exception('Received fatal exception handling hotplug!') + LOG.exception("Received fatal exception handling hotplug!") raise - LOG.debug('Exiting hotplug handler') + LOG.debug("Exiting hotplug handler") reporting.flush_events() -if __name__ == '__main__': +if __name__ == "__main__": args = get_parser().parse_args() handle_args(NAME, args) diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py index 31ade73d..d54b809a 100644 --- a/cloudinit/cmd/devel/logs.py +++ b/cloudinit/cmd/devel/logs.py @@ -5,20 +5,19 @@ """Define 'collect-logs' utility and handler to include in cloud-init cmd.""" import argparse -from datetime import datetime import os import shutil import sys +from datetime import datetime from cloudinit.sources import INSTANCE_JSON_SENSITIVE_FILE +from cloudinit.subp import ProcessExecutionError, subp from cloudinit.temp_utils import tempdir -from cloudinit.subp import (ProcessExecutionError, subp) -from cloudinit.util import (chdir, copy, ensure_dir, write_file) +from cloudinit.util import chdir, copy, ensure_dir, write_file - -CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log'] -CLOUDINIT_RUN_DIR = '/run/cloud-init' -USER_DATA_FILE = '/var/lib/cloud/instance/user-data.txt' # Optional +CLOUDINIT_LOGS = ["/var/log/cloud-init.log", "/var/log/cloud-init-output.log"] +CLOUDINIT_RUN_DIR = "/run/cloud-init" +USER_DATA_FILE = "/var/lib/cloud/instance/user-data.txt" # Optional def get_parser(parser=None): @@ -32,26 +31,44 @@ def get_parser(parser=None): """ if not parser: parser = argparse.ArgumentParser( - prog='collect-logs', - description='Collect and tar all cloud-init debug info') - parser.add_argument('--verbose', '-v', action='count', default=0, - dest='verbosity', help="Be more verbose.") + prog="collect-logs", + description="Collect and tar all cloud-init debug info", + ) + parser.add_argument( + "--verbose", + "-v", + action="count", + default=0, + dest="verbosity", + help="Be more verbose.", + ) parser.add_argument( - "--tarfile", '-t', default='cloud-init.tar.gz', - help=('The tarfile to create containing all collected logs.' - ' Default: cloud-init.tar.gz')) + "--tarfile", + "-t", + default="cloud-init.tar.gz", + help=( + "The tarfile to create containing all collected logs." + " Default: cloud-init.tar.gz" + ), + ) parser.add_argument( - "--include-userdata", '-u', default=False, action='store_true', - dest='userdata', help=( - 'Optionally include user-data from {0} which could contain' - ' sensitive information.'.format(USER_DATA_FILE))) + "--include-userdata", + "-u", + default=False, + action="store_true", + dest="userdata", + help=( + "Optionally include user-data from {0} which could contain" + " sensitive information.".format(USER_DATA_FILE) + ), + ) return parser def _copytree_rundir_ignore_files(curdir, files): """Return a list of files to ignore for /run/cloud-init directory""" ignored_files = [ - 'hook-hotplug-cmd', # named pipe for hotplug + "hook-hotplug-cmd", # named pipe for hotplug ] if os.getuid() != 0: # Ignore root-permissioned files @@ -94,52 +111,67 @@ def collect_logs(tarfile, include_userdata, verbosity=0): if include_userdata and os.getuid() != 0: sys.stderr.write( "To include userdata, root user is required." - " Try sudo cloud-init collect-logs\n") + " Try sudo cloud-init collect-logs\n" + ) return 1 tarfile = os.path.abspath(tarfile) - date = datetime.utcnow().date().strftime('%Y-%m-%d') - log_dir = 'cloud-init-logs-{0}'.format(date) - with tempdir(dir='/tmp') as tmp_dir: + date = datetime.utcnow().date().strftime("%Y-%m-%d") + log_dir = "cloud-init-logs-{0}".format(date) + with tempdir(dir="/tmp") as tmp_dir: log_dir = os.path.join(tmp_dir, log_dir) version = _write_command_output_to_file( - ['cloud-init', '--version'], - os.path.join(log_dir, 'version'), - "cloud-init --version", verbosity) + ["cloud-init", "--version"], + os.path.join(log_dir, "version"), + "cloud-init --version", + verbosity, + ) dpkg_ver = _write_command_output_to_file( - ['dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'], - os.path.join(log_dir, 'dpkg-version'), - "dpkg version", verbosity) + ["dpkg-query", "--show", "-f=${Version}\n", "cloud-init"], + os.path.join(log_dir, "dpkg-version"), + "dpkg version", + verbosity, + ) if not version: version = dpkg_ver if dpkg_ver else "not-available" _debug("collected cloud-init version: %s\n" % version, 1, verbosity) _write_command_output_to_file( - ['dmesg'], os.path.join(log_dir, 'dmesg.txt'), - "dmesg output", verbosity) + ["dmesg"], + os.path.join(log_dir, "dmesg.txt"), + "dmesg output", + verbosity, + ) _write_command_output_to_file( - ['journalctl', '--boot=0', '-o', 'short-precise'], - os.path.join(log_dir, 'journal.txt'), - "systemd journal of current boot", verbosity) + ["journalctl", "--boot=0", "-o", "short-precise"], + os.path.join(log_dir, "journal.txt"), + "systemd journal of current boot", + verbosity, + ) for log in CLOUDINIT_LOGS: _collect_file(log, log_dir, verbosity) if include_userdata: _collect_file(USER_DATA_FILE, log_dir, verbosity) - run_dir = os.path.join(log_dir, 'run') + run_dir = os.path.join(log_dir, "run") ensure_dir(run_dir) if os.path.exists(CLOUDINIT_RUN_DIR): try: - shutil.copytree(CLOUDINIT_RUN_DIR, - os.path.join(run_dir, 'cloud-init'), - ignore=_copytree_rundir_ignore_files) + shutil.copytree( + CLOUDINIT_RUN_DIR, + os.path.join(run_dir, "cloud-init"), + ignore=_copytree_rundir_ignore_files, + ) except shutil.Error as e: sys.stderr.write("Failed collecting file(s) due to error:\n") - sys.stderr.write(str(e) + '\n') + sys.stderr.write(str(e) + "\n") _debug("collected dir %s\n" % CLOUDINIT_RUN_DIR, 1, verbosity) else: - _debug("directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR, 1, - verbosity) + _debug( + "directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR, + 1, + verbosity, + ) with chdir(tmp_dir): - subp(['tar', 'czvf', tarfile, log_dir.replace(tmp_dir + '/', '')]) + subp(["tar", "czvf", tarfile, log_dir.replace(tmp_dir + "/", "")]) sys.stderr.write("Wrote %s\n" % tarfile) return 0 @@ -152,10 +184,10 @@ def handle_collect_logs_args(name, args): def main(): """Tool to collect and tar all cloud-init related logs.""" parser = get_parser() - return handle_collect_logs_args('collect-logs', parser.parse_args()) + return handle_collect_logs_args("collect-logs", parser.parse_args()) -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(main()) # vi: ts=4 expandtab diff --git a/cloudinit/cmd/devel/make_mime.py b/cloudinit/cmd/devel/make_mime.py index 4e6a5778..a7493c74 100755 --- a/cloudinit/cmd/devel/make_mime.py +++ b/cloudinit/cmd/devel/make_mime.py @@ -9,19 +9,22 @@ from email.mime.text import MIMEText from cloudinit import log from cloudinit.handlers import INCLUSION_TYPES_MAP + from . import addLogHandlerCLI -NAME = 'make-mime' +NAME = "make-mime" LOG = log.getLogger(NAME) -EPILOG = ("Example: make-mime -a config.yaml:cloud-config " - "-a script.sh:x-shellscript > user-data") +EPILOG = ( + "Example: make-mime -a config.yaml:cloud-config " + "-a script.sh:x-shellscript > user-data" +) def file_content_type(text): - """ Return file content type by reading the first line of the input. """ + """Return file content type by reading the first line of the input.""" try: filename, content_type = text.split(":", 1) - return (open(filename, 'r'), filename, content_type.strip()) + return (open(filename, "r"), filename, content_type.strip()) except ValueError as e: raise argparse.ArgumentError( text, "Invalid value for %r" % (text) @@ -41,26 +44,43 @@ def get_parser(parser=None): # update the parser's doc and add an epilog to show an example parser.description = __doc__ parser.epilog = EPILOG - parser.add_argument("-a", "--attach", dest="files", type=file_content_type, - action='append', default=[], - metavar=":", - help=("attach the given file as the specified " - "content-type")) - parser.add_argument('-l', '--list-types', action='store_true', - default=False, - help='List support cloud-init content types.') - parser.add_argument('-f', '--force', action='store_true', - default=False, - help='Ignore unknown content-type warnings') + parser.add_argument( + "-a", + "--attach", + dest="files", + type=file_content_type, + action="append", + default=[], + metavar=":", + help="attach the given file as the specified content-type", + ) + parser.add_argument( + "-l", + "--list-types", + action="store_true", + default=False, + help="List support cloud-init content types.", + ) + parser.add_argument( + "-f", + "--force", + action="store_true", + default=False, + help="Ignore unknown content-type warnings", + ) return parser def get_content_types(strip_prefix=False): - """ Return a list of cloud-init supported content types. Optionally - strip out the leading 'text/' of the type if strip_prefix=True. + """Return a list of cloud-init supported content types. Optionally + strip out the leading 'text/' of the type if strip_prefix=True. """ - return sorted([ctype.replace("text/", "") if strip_prefix else ctype - for ctype in INCLUSION_TYPES_MAP.values()]) + return sorted( + [ + ctype.replace("text/", "") if strip_prefix else ctype + for ctype in INCLUSION_TYPES_MAP.values() + ] + ) def handle_args(name, args): @@ -82,14 +102,16 @@ def handle_args(name, args): for i, (fh, filename, format_type) in enumerate(args.files): contents = fh.read() sub_message = MIMEText(contents, format_type, sys.getdefaultencoding()) - sub_message.add_header('Content-Disposition', - 'attachment; filename="%s"' % (filename)) + sub_message.add_header( + "Content-Disposition", 'attachment; filename="%s"' % (filename) + ) content_type = sub_message.get_content_type().lower() if content_type not in get_content_types(): level = "WARNING" if args.force else "ERROR" - msg = (level + ": content type %r for attachment %s " - "may be incorrect!") % (content_type, i + 1) - sys.stderr.write(msg + '\n') + msg = ( + level + ": content type %r for attachment %s may be incorrect!" + ) % (content_type, i + 1) + sys.stderr.write(msg + "\n") errors.append(msg) sub_messages.append(sub_message) if len(errors) and not args.force: @@ -104,10 +126,10 @@ def handle_args(name, args): def main(): args = get_parser().parse_args() - return(handle_args(NAME, args)) + return handle_args(NAME, args) -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(main()) diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py index f4a98e5e..18b1e7ff 100755 --- a/cloudinit/cmd/devel/net_convert.py +++ b/cloudinit/cmd/devel/net_convert.py @@ -6,15 +6,13 @@ import json import os import sys -from cloudinit.sources.helpers import openstack +from cloudinit import distros, log, safeyaml +from cloudinit.net import eni, netplan, network_state, networkd, sysconfig from cloudinit.sources import DataSourceAzure as azure from cloudinit.sources import DataSourceOVF as ovf +from cloudinit.sources.helpers import openstack -from cloudinit import distros, safeyaml -from cloudinit.net import eni, netplan, networkd, network_state, sysconfig -from cloudinit import log - -NAME = 'net-convert' +NAME = "net-convert" def get_parser(parser=None): @@ -27,33 +25,59 @@ def get_parser(parser=None): """ if not parser: parser = argparse.ArgumentParser(prog=NAME, description=__doc__) - parser.add_argument("-p", "--network-data", type=open, - metavar="PATH", required=True, - help="The network configuration to read") - parser.add_argument("-k", "--kind", - choices=['eni', 'network_data.json', 'yaml', - 'azure-imds', 'vmware-imc'], - required=True, - help="The format of the given network config") - parser.add_argument("-d", "--directory", - metavar="PATH", - help="directory to place output in", - required=True) - parser.add_argument("-D", "--distro", - choices=[item for sublist in - distros.OSFAMILIES.values() - for item in sublist], - required=True) - parser.add_argument("-m", "--mac", - metavar="name,mac", - action='append', - help="interface name to mac mapping") - parser.add_argument("--debug", action='store_true', - help='enable debug logging to stderr.') - parser.add_argument("-O", "--output-kind", - choices=['eni', 'netplan', 'networkd', 'sysconfig'], - required=True, - help="The network config format to emit") + parser.add_argument( + "-p", + "--network-data", + type=open, + metavar="PATH", + required=True, + help="The network configuration to read", + ) + parser.add_argument( + "-k", + "--kind", + choices=[ + "eni", + "network_data.json", + "yaml", + "azure-imds", + "vmware-imc", + ], + required=True, + help="The format of the given network config", + ) + parser.add_argument( + "-d", + "--directory", + metavar="PATH", + help="directory to place output in", + required=True, + ) + parser.add_argument( + "-D", + "--distro", + choices=[ + item for sublist in distros.OSFAMILIES.values() for item in sublist + ], + required=True, + ) + parser.add_argument( + "-m", + "--mac", + metavar="name,mac", + action="append", + help="interface name to mac mapping", + ) + parser.add_argument( + "--debug", action="store_true", help="enable debug logging to stderr." + ) + parser.add_argument( + "-O", + "--output-kind", + choices=["eni", "netplan", "networkd", "sysconfig"], + required=True, + help="The network config format to emit", + ) return parser @@ -81,59 +105,68 @@ def handle_args(name, args): pre_ns = eni.convert_eni_data(net_data) elif args.kind == "yaml": pre_ns = safeyaml.load(net_data) - if 'network' in pre_ns: - pre_ns = pre_ns.get('network') + if "network" in pre_ns: + pre_ns = pre_ns.get("network") if args.debug: - sys.stderr.write('\n'.join( - ["Input YAML", safeyaml.dumps(pre_ns), ""])) - elif args.kind == 'network_data.json': + sys.stderr.write( + "\n".join(["Input YAML", safeyaml.dumps(pre_ns), ""]) + ) + elif args.kind == "network_data.json": pre_ns = openstack.convert_net_json( - json.loads(net_data), known_macs=known_macs) - elif args.kind == 'azure-imds': + json.loads(net_data), known_macs=known_macs + ) + elif args.kind == "azure-imds": pre_ns = azure.parse_network_config(json.loads(net_data)) - elif args.kind == 'vmware-imc': + elif args.kind == "vmware-imc": config = ovf.Config(ovf.ConfigFile(args.network_data.name)) pre_ns = ovf.get_network_config_from_conf(config, False) ns = network_state.parse_net_config_data(pre_ns) if args.debug: - sys.stderr.write('\n'.join( - ["", "Internal State", safeyaml.dumps(ns), ""])) + sys.stderr.write( + "\n".join(["", "Internal State", safeyaml.dumps(ns), ""]) + ) distro_cls = distros.fetch(args.distro) distro = distro_cls(args.distro, {}, None) config = {} if args.output_kind == "eni": r_cls = eni.Renderer - config = distro.renderer_configs.get('eni') + config = distro.renderer_configs.get("eni") elif args.output_kind == "netplan": r_cls = netplan.Renderer - config = distro.renderer_configs.get('netplan') + config = distro.renderer_configs.get("netplan") # don't run netplan generate/apply - config['postcmds'] = False + config["postcmds"] = False # trim leading slash - config['netplan_path'] = config['netplan_path'][1:] + config["netplan_path"] = config["netplan_path"][1:] # enable some netplan features - config['features'] = ['dhcp-use-domains', 'ipv6-mtu'] + config["features"] = ["dhcp-use-domains", "ipv6-mtu"] elif args.output_kind == "networkd": r_cls = networkd.Renderer - config = distro.renderer_configs.get('networkd') + config = distro.renderer_configs.get("networkd") elif args.output_kind == "sysconfig": r_cls = sysconfig.Renderer - config = distro.renderer_configs.get('sysconfig') + config = distro.renderer_configs.get("sysconfig") else: raise RuntimeError("Invalid output_kind") r = r_cls(config=config) - sys.stderr.write(''.join([ - "Read input format '%s' from '%s'.\n" % ( - args.kind, args.network_data.name), - "Wrote output format '%s' to '%s'\n" % ( - args.output_kind, args.directory)]) + "\n") + sys.stderr.write( + "".join( + [ + "Read input format '%s' from '%s'.\n" + % (args.kind, args.network_data.name), + "Wrote output format '%s' to '%s'\n" + % (args.output_kind, args.directory), + ] + ) + + "\n" + ) r.render_network_state(network_state=ns, target=args.directory) -if __name__ == '__main__': +if __name__ == "__main__": args = get_parser().parse_args() handle_args(NAME, args) diff --git a/cloudinit/cmd/devel/parser.py b/cloudinit/cmd/devel/parser.py index be304630..76b16c2e 100644 --- a/cloudinit/cmd/devel/parser.py +++ b/cloudinit/cmd/devel/parser.py @@ -5,33 +5,47 @@ """Define 'devel' subcommand argument parsers to include in cloud-init cmd.""" import argparse + from cloudinit.config import schema -from . import hotplug_hook -from . import net_convert -from . import render -from . import make_mime +from . import hotplug_hook, make_mime, net_convert, render def get_parser(parser=None): if not parser: parser = argparse.ArgumentParser( - prog='cloudinit-devel', - description='Run development cloud-init tools') - subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand') + prog="cloudinit-devel", + description="Run development cloud-init tools", + ) + subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand") subparsers.required = True subcmds = [ - (hotplug_hook.NAME, hotplug_hook.__doc__, - hotplug_hook.get_parser, hotplug_hook.handle_args), - ('schema', 'Validate cloud-config files for document schema', - schema.get_parser, schema.handle_schema_args), - (net_convert.NAME, net_convert.__doc__, - net_convert.get_parser, net_convert.handle_args), - (render.NAME, render.__doc__, - render.get_parser, render.handle_args), - (make_mime.NAME, make_mime.__doc__, - make_mime.get_parser, make_mime.handle_args), + ( + hotplug_hook.NAME, + hotplug_hook.__doc__, + hotplug_hook.get_parser, + hotplug_hook.handle_args, + ), + ( + "schema", + "Validate cloud-config files for document schema", + schema.get_parser, + schema.handle_schema_args, + ), + ( + net_convert.NAME, + net_convert.__doc__, + net_convert.get_parser, + net_convert.handle_args, + ), + (render.NAME, render.__doc__, render.get_parser, render.handle_args), + ( + make_mime.NAME, + make_mime.__doc__, + make_mime.get_parser, + make_mime.handle_args, + ), ] for (subcmd, helpmsg, get_parser, handler) in subcmds: parser = subparsers.add_parser(subcmd, help=helpmsg) diff --git a/cloudinit/cmd/devel/render.py b/cloudinit/cmd/devel/render.py index 1090aa16..2f9a22a8 100755 --- a/cloudinit/cmd/devel/render.py +++ b/cloudinit/cmd/devel/render.py @@ -6,12 +6,13 @@ import argparse import os import sys -from cloudinit.handlers.jinja_template import render_jinja_payload_from_file from cloudinit import log +from cloudinit.handlers.jinja_template import render_jinja_payload_from_file from cloudinit.sources import INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE + from . import addLogHandlerCLI, read_cfg_paths -NAME = 'render' +NAME = "render" LOG = log.getLogger(NAME) @@ -27,13 +28,24 @@ def get_parser(parser=None): if not parser: parser = argparse.ArgumentParser(prog=NAME, description=__doc__) parser.add_argument( - 'user_data', type=str, help='Path to the user-data file to render') + "user_data", type=str, help="Path to the user-data file to render" + ) + parser.add_argument( + "-i", + "--instance-data", + type=str, + help=( + "Optional path to instance-data.json file. Defaults to" + " /run/cloud-init/instance-data.json" + ), + ) parser.add_argument( - '-i', '--instance-data', type=str, - help=('Optional path to instance-data.json file. Defaults to' - ' /run/cloud-init/instance-data.json')) - parser.add_argument('-d', '--debug', action='store_true', default=False, - help='Add verbose messages during template render') + "-d", + "--debug", + action="store_true", + default=False, + help="Add verbose messages during template render", + ) return parser @@ -54,34 +66,38 @@ def handle_args(name, args): redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE) if uid == 0: instance_data_fn = os.path.join( - paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE) + paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE + ) if not os.path.exists(instance_data_fn): LOG.warning( - 'Missing root-readable %s. Using redacted %s instead.', - instance_data_fn, redacted_data_fn + "Missing root-readable %s. Using redacted %s instead.", + instance_data_fn, + redacted_data_fn, ) instance_data_fn = redacted_data_fn else: instance_data_fn = redacted_data_fn if not os.path.exists(instance_data_fn): - LOG.error('Missing instance-data.json file: %s', instance_data_fn) + LOG.error("Missing instance-data.json file: %s", instance_data_fn) return 1 try: with open(args.user_data) as stream: user_data = stream.read() except IOError: - LOG.error('Missing user-data file: %s', args.user_data) + LOG.error("Missing user-data file: %s", args.user_data) return 1 try: rendered_payload = render_jinja_payload_from_file( - payload=user_data, payload_fn=args.user_data, + payload=user_data, + payload_fn=args.user_data, instance_data_file=instance_data_fn, - debug=True if args.debug else False) + debug=True if args.debug else False, + ) except RuntimeError as e: - LOG.error('Cannot render from instance data: %s', str(e)) + LOG.error("Cannot render from instance data: %s", str(e)) return 1 if not rendered_payload: - LOG.error('Unable to render user-data file: %s', args.user_data) + LOG.error("Unable to render user-data file: %s", args.user_data) return 1 sys.stdout.write(rendered_payload) return 0 @@ -89,10 +105,10 @@ def handle_args(name, args): def main(): args = get_parser().parse_args() - return(handle_args(NAME, args)) + return handle_args(NAME, args) -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(main()) diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index 63186d34..e67edbc3 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -19,6 +19,7 @@ import time import traceback from cloudinit import patcher + patcher.patch_logging() from cloudinit import log as logging @@ -34,8 +35,7 @@ from cloudinit import warnings from cloudinit import reporting from cloudinit.reporting import events -from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE, - CLOUD_CONFIG) +from cloudinit.settings import PER_INSTANCE, PER_ALWAYS, PER_ONCE, CLOUD_CONFIG from cloudinit import atomic_helper @@ -44,8 +44,10 @@ from cloudinit import dhclient_hook # Welcome message template -WELCOME_MSG_TPL = ("Cloud-init v. {version} running '{action}' at " - "{timestamp}. Up {uptime} seconds.") +WELCOME_MSG_TPL = ( + "Cloud-init v. {version} running '{action}' at " + "{timestamp}. Up {uptime} seconds." +) # Module section template MOD_SECTION_TPL = "cloud_%s_modules" @@ -53,9 +55,9 @@ MOD_SECTION_TPL = "cloud_%s_modules" # Frequency shortname to full name # (so users don't have to remember the full name...) FREQ_SHORT_NAMES = { - 'instance': PER_INSTANCE, - 'always': PER_ALWAYS, - 'once': PER_ONCE, + "instance": PER_INSTANCE, + "always": PER_ALWAYS, + "once": PER_ONCE, } LOG = logging.getLogger() @@ -63,21 +65,20 @@ LOG = logging.getLogger() # Used for when a logger may not be active # and we still want to print exceptions... -def print_exc(msg=''): +def print_exc(msg=""): if msg: sys.stderr.write("%s\n" % (msg)) - sys.stderr.write('-' * 60) + sys.stderr.write("-" * 60) sys.stderr.write("\n") traceback.print_exc(file=sys.stderr) - sys.stderr.write('-' * 60) + sys.stderr.write("-" * 60) sys.stderr.write("\n") def welcome(action, msg=None): if not msg: msg = welcome_format(action) - util.multi_log("%s\n" % (msg), - console=False, stderr=True, log=LOG) + util.multi_log("%s\n" % (msg), console=False, stderr=True, log=LOG) return msg @@ -86,7 +87,8 @@ def welcome_format(action): version=version.version_string(), uptime=util.uptime(), timestamp=util.time_rfc2822(), - action=action) + action=action, + ) def extract_fns(args): @@ -107,29 +109,31 @@ def run_module_section(mods, action_name, section): (which_ran, failures) = mods.run_section(full_section_name) total_attempted = len(which_ran) + len(failures) if total_attempted == 0: - msg = ("No '%s' modules to run" - " under section '%s'") % (action_name, full_section_name) + msg = "No '%s' modules to run under section '%s'" % ( + action_name, + full_section_name, + ) sys.stderr.write("%s\n" % (msg)) LOG.debug(msg) return [] else: - LOG.debug("Ran %s modules with %s failures", - len(which_ran), len(failures)) + LOG.debug( + "Ran %s modules with %s failures", len(which_ran), len(failures) + ) return failures def apply_reporting_cfg(cfg): - if cfg.get('reporting'): - reporting.update_configuration(cfg.get('reporting')) + if cfg.get("reporting"): + reporting.update_configuration(cfg.get("reporting")) -def parse_cmdline_url(cmdline, names=('cloud-config-url', 'url')): +def parse_cmdline_url(cmdline, names=("cloud-config-url", "url")): data = util.keyval_str_to_dict(cmdline) for key in names: if key in data: return key, data[key] - raise KeyError("No keys (%s) found in string '%s'" % - (cmdline, names)) + raise KeyError("No keys (%s) found in string '%s'" % (cmdline, names)) def attempt_cmdline_url(path, network=True, cmdline=None): @@ -163,51 +167,60 @@ def attempt_cmdline_url(path, network=True, cmdline=None): if path_is_local and os.path.exists(path): if network: - m = ("file '%s' existed, possibly from local stage download" - " of command line url '%s'. Not re-writing." % (path, url)) + m = ( + "file '%s' existed, possibly from local stage download" + " of command line url '%s'. Not re-writing." % (path, url) + ) level = logging.INFO if path_is_local: level = logging.DEBUG else: - m = ("file '%s' existed, possibly from previous boot download" - " of command line url '%s'. Not re-writing." % (path, url)) + m = ( + "file '%s' existed, possibly from previous boot download" + " of command line url '%s'. Not re-writing." % (path, url) + ) level = logging.WARN return (level, m) - kwargs = {'url': url, 'timeout': 10, 'retries': 2} + kwargs = {"url": url, "timeout": 10, "retries": 2} if network or path_is_local: level = logging.WARN - kwargs['sec_between'] = 1 + kwargs["sec_between"] = 1 else: level = logging.DEBUG - kwargs['sec_between'] = .1 + kwargs["sec_between"] = 0.1 data = None - header = b'#cloud-config' + header = b"#cloud-config" try: resp = url_helper.read_file_or_url(**kwargs) if resp.ok(): data = resp.contents if not resp.contents.startswith(header): - if cmdline_name == 'cloud-config-url': + if cmdline_name == "cloud-config-url": level = logging.WARN else: level = logging.INFO return ( level, - "contents of '%s' did not start with %s" % (url, header)) + "contents of '%s' did not start with %s" % (url, header), + ) else: - return (level, - "url '%s' returned code %s. Ignoring." % (url, resp.code)) + return ( + level, + "url '%s' returned code %s. Ignoring." % (url, resp.code), + ) except url_helper.UrlError as e: return (level, "retrieving url '%s' failed: %s" % (url, e)) util.write_file(path, data, mode=0o600) - return (logging.INFO, - "wrote cloud-config data from %s='%s' to %s" % - (cmdline_name, url, path)) + return ( + logging.INFO, + "wrote cloud-config data from %s='%s' to %s" + % (cmdline_name, url, path), + ) def purge_cache_on_python_version_change(init): @@ -216,31 +229,32 @@ def purge_cache_on_python_version_change(init): There could be changes not represented in our cache (obj.pkl) after we upgrade to a new version of python, so at that point clear the cache """ - current_python_version = '%d.%d' % ( - sys.version_info.major, sys.version_info.minor + current_python_version = "%d.%d" % ( + sys.version_info.major, + sys.version_info.minor, ) python_version_path = os.path.join( - init.paths.get_cpath('data'), 'python-version' + init.paths.get_cpath("data"), "python-version" ) if os.path.exists(python_version_path): cached_python_version = open(python_version_path).read() # The Python version has changed out from under us, anything that was # pickled previously is likely useless due to API changes. if cached_python_version != current_python_version: - LOG.debug('Python version change detected. Purging cache') + LOG.debug("Python version change detected. Purging cache") init.purge_cache(True) util.write_file(python_version_path, current_python_version) else: - if os.path.exists(init.paths.get_ipath_cur('obj_pkl')): + if os.path.exists(init.paths.get_ipath_cur("obj_pkl")): LOG.info( - 'Writing python-version file. ' - 'Cache compatibility status is currently unknown.' + "Writing python-version file. " + "Cache compatibility status is currently unknown." ) util.write_file(python_version_path, current_python_version) def _should_bring_up_interfaces(init, args): - if util.get_cfg_option_bool(init.cfg, 'disable_network_activation'): + if util.get_cfg_option_bool(init.cfg, "disable_network_activation"): return False return not args.local @@ -250,10 +264,14 @@ def main_init(name, args): if args.local: deps = [sources.DEP_FILESYSTEM] - early_logs = [attempt_cmdline_url( - path=os.path.join("%s.d" % CLOUD_CONFIG, - "91_kernel_cmdline_url.cfg"), - network=not args.local)] + early_logs = [ + attempt_cmdline_url( + path=os.path.join( + "%s.d" % CLOUD_CONFIG, "91_kernel_cmdline_url.cfg" + ), + network=not args.local, + ) + ] # Cloud-init 'init' stage is broken up into the following sub-stages # 1. Ensure that the init object fetches its config without errors @@ -289,8 +307,9 @@ def main_init(name, args): early_logs.append((logging.WARN, msg)) if args.debug: # Reset so that all the debug handlers are closed out - LOG.debug(("Logging being reset, this logger may no" - " longer be active shortly")) + LOG.debug( + "Logging being reset, this logger may no longer be active shortly" + ) logging.resetLogging() logging.setupLogging(init.cfg) apply_reporting_cfg(init.cfg) @@ -317,9 +336,11 @@ def main_init(name, args): if mode == sources.DSMODE_NETWORK: existing = "trust" sys.stderr.write("%s\n" % (netinfo.debug_info())) - LOG.debug(("Checking to see if files that we need already" - " exist from a previous run that would allow us" - " to stop early.")) + LOG.debug( + "Checking to see if files that we need already" + " exist from a previous run that would allow us" + " to stop early." + ) # no-net is written by upstart cloud-init-nonet when network failed # to come up stop_files = [ @@ -331,15 +352,18 @@ def main_init(name, args): existing_files.append(fn) if existing_files: - LOG.debug("[%s] Exiting. stop file %s existed", - mode, existing_files) + LOG.debug( + "[%s] Exiting. stop file %s existed", mode, existing_files + ) return (None, []) else: - LOG.debug("Execution continuing, no previous run detected that" - " would allow us to stop early.") + LOG.debug( + "Execution continuing, no previous run detected that" + " would allow us to stop early." + ) else: existing = "check" - mcfg = util.get_cfg_option_bool(init.cfg, 'manual_cache_clean', False) + mcfg = util.get_cfg_option_bool(init.cfg, "manual_cache_clean", False) if mcfg: LOG.debug("manual cache clean set from config") existing = "trust" @@ -360,8 +384,11 @@ def main_init(name, args): # if in network mode, and the datasource is local # then work was done at that stage. if mode == sources.DSMODE_NETWORK and init.datasource.dsmode != mode: - LOG.debug("[%s] Exiting. datasource %s in local mode", - mode, init.datasource) + LOG.debug( + "[%s] Exiting. datasource %s in local mode", + mode, + init.datasource, + ) return (None, []) except sources.DataSourceNotFoundException: # In the case of 'cloud-init init' without '--local' it is a bit @@ -371,8 +398,9 @@ def main_init(name, args): if mode == sources.DSMODE_LOCAL: LOG.debug("No local datasource found") else: - util.logexc(LOG, ("No instance datasource found!" - " Likely bad things to come!")) + util.logexc( + LOG, "No instance datasource found! Likely bad things to come!" + ) if not args.force: init.apply_network_config(bring_up=bring_up_interfaces) LOG.debug("[%s] Exiting without datasource", mode) @@ -381,46 +409,60 @@ def main_init(name, args): else: return (None, ["No instance datasource found."]) else: - LOG.debug("[%s] barreling on in force mode without datasource", - mode) + LOG.debug( + "[%s] barreling on in force mode without datasource", mode + ) _maybe_persist_instance_data(init) # Stage 6 iid = init.instancify() - LOG.debug("[%s] %s will now be targeting instance id: %s. new=%s", - mode, name, iid, init.is_new_instance()) + LOG.debug( + "[%s] %s will now be targeting instance id: %s. new=%s", + mode, + name, + iid, + init.is_new_instance(), + ) if mode == sources.DSMODE_LOCAL: # Before network comes up, set any configured hostname to allow # dhcp clients to advertize this hostname to any DDNS services # LP: #1746455. - _maybe_set_hostname(init, stage='local', retry_stage='network') + _maybe_set_hostname(init, stage="local", retry_stage="network") init.apply_network_config(bring_up=bring_up_interfaces) if mode == sources.DSMODE_LOCAL: if init.datasource.dsmode != mode: - LOG.debug("[%s] Exiting. datasource %s not in local mode.", - mode, init.datasource) + LOG.debug( + "[%s] Exiting. datasource %s not in local mode.", + mode, + init.datasource, + ) return (init.datasource, []) else: - LOG.debug("[%s] %s is in local mode, will apply init modules now.", - mode, init.datasource) + LOG.debug( + "[%s] %s is in local mode, will apply init modules now.", + mode, + init.datasource, + ) # Give the datasource a chance to use network resources. # This is used on Azure to communicate with the fabric over network. init.setup_datasource() # update fully realizes user-data (pulling in #include if necessary) init.update() - _maybe_set_hostname(init, stage='init-net', retry_stage='modules:config') + _maybe_set_hostname(init, stage="init-net", retry_stage="modules:config") # Stage 7 try: # Attempt to consume the data per instance. # This may run user-data handlers and/or perform # url downloads and such as needed. - (ran, _results) = init.cloudify().run('consume_data', - init.consume_data, - args=[PER_INSTANCE], - freq=PER_INSTANCE) + (ran, _results) = init.cloudify().run( + "consume_data", + init.consume_data, + args=[PER_INSTANCE], + freq=PER_INSTANCE, + ) if not ran: # Just consume anything that is set to run per-always # if nothing ran in the per-instance code @@ -442,8 +484,7 @@ def main_init(name, args): errfmt_orig = errfmt (outfmt, errfmt) = util.get_output_cfg(mods.cfg, name) if outfmt_orig != outfmt or errfmt_orig != errfmt: - LOG.warning("Stdout, stderr changing to (%s, %s)", - outfmt, errfmt) + LOG.warning("Stdout, stderr changing to (%s, %s)", outfmt, errfmt) (outfmt, errfmt) = util.fixup_output(mods.cfg, name) except Exception: util.logexc(LOG, "Failed to re-adjust output redirection!") @@ -459,11 +500,11 @@ def main_init(name, args): def di_report_warn(datasource, cfg): - if 'di_report' not in cfg: + if "di_report" not in cfg: LOG.debug("no di_report found in config.") return - dicfg = cfg['di_report'] + dicfg = cfg["di_report"] if dicfg is None: # ds-identify may write 'di_report:\n #comment\n' # which reads as {'di_report': None} @@ -474,7 +515,7 @@ def di_report_warn(datasource, cfg): LOG.warning("di_report config not a dictionary: %s", dicfg) return - dslist = dicfg.get('datasource_list') + dslist = dicfg.get("datasource_list") if dslist is None: LOG.warning("no 'datasource_list' found in di_report.") return @@ -486,18 +527,26 @@ def di_report_warn(datasource, cfg): # where Name is the thing that shows up in datasource_list. modname = datasource.__module__.rpartition(".")[2] if modname.startswith(sources.DS_PREFIX): - modname = modname[len(sources.DS_PREFIX):] + modname = modname[len(sources.DS_PREFIX) :] else: - LOG.warning("Datasource '%s' came from unexpected module '%s'.", - datasource, modname) + LOG.warning( + "Datasource '%s' came from unexpected module '%s'.", + datasource, + modname, + ) if modname in dslist: - LOG.debug("used datasource '%s' from '%s' was in di_report's list: %s", - datasource, modname, dslist) + LOG.debug( + "used datasource '%s' from '%s' was in di_report's list: %s", + datasource, + modname, + dslist, + ) return - warnings.show_warning('dsid_missing_source', cfg, - source=modname, dslist=str(dslist)) + warnings.show_warning( + "dsid_missing_source", cfg, source=modname, dslist=str(dslist) + ) def main_modules(action_name, args): @@ -521,8 +570,10 @@ def main_modules(action_name, args): init.fetch(existing="trust") except sources.DataSourceNotFoundException: # There was no datasource found, theres nothing to do - msg = ('Can not apply stage %s, no datasource found! Likely bad ' - 'things to come!' % name) + msg = ( + "Can not apply stage %s, no datasource found! Likely bad " + "things to come!" % name + ) util.logexc(LOG, msg) print_exc(msg) if not args.force: @@ -539,8 +590,9 @@ def main_modules(action_name, args): util.logexc(LOG, "Failed to setup output redirection!") if args.debug: # Reset so that all the debug handlers are closed out - LOG.debug(("Logging being reset, this logger may no" - " longer be active shortly")) + LOG.debug( + "Logging being reset, this logger may no longer be active shortly" + ) logging.resetLogging() logging.setupLogging(mods.cfg) apply_reporting_cfg(init.cfg) @@ -573,10 +625,12 @@ def main_single(name, args): # There was no datasource found, # that might be bad (or ok) depending on # the module being ran (so continue on) - util.logexc(LOG, ("Failed to fetch your datasource," - " likely bad things to come!")) - print_exc(("Failed to fetch your datasource," - " likely bad things to come!")) + util.logexc( + LOG, "Failed to fetch your datasource, likely bad things to come!" + ) + print_exc( + "Failed to fetch your datasource, likely bad things to come!" + ) if not args.force: return 1 _maybe_persist_instance_data(init) @@ -598,8 +652,9 @@ def main_single(name, args): util.logexc(LOG, "Failed to setup output redirection!") if args.debug: # Reset so that all the debug handlers are closed out - LOG.debug(("Logging being reset, this logger may no" - " longer be active shortly")) + LOG.debug( + "Logging being reset, this logger may no longer be active shortly" + ) logging.resetLogging() logging.setupLogging(mods.cfg) apply_reporting_cfg(init.cfg) @@ -608,9 +663,7 @@ def main_single(name, args): welcome(name, msg=w_msg) # Stage 5 - (which_ran, failures) = mods.run_single(mod_name, - mod_args, - mod_freq) + (which_ran, failures) = mods.run_single(mod_name, mod_args, mod_freq) if failures: LOG.warning("Ran %s but it failed!", mod_name) return 1 @@ -633,7 +686,12 @@ def status_wrapper(name, args, data_d=None, link_d=None): result_path = os.path.join(data_d, "result.json") result_link = os.path.join(link_d, "result.json") - util.ensure_dirs((data_d, link_d,)) + util.ensure_dirs( + ( + data_d, + link_d, + ) + ) (_name, functor) = args.action @@ -647,14 +705,20 @@ def status_wrapper(name, args, data_d=None, link_d=None): else: raise ValueError("unknown name: %s" % name) - modes = ('init', 'init-local', 'modules-init', 'modules-config', - 'modules-final') + modes = ( + "init", + "init-local", + "modules-init", + "modules-config", + "modules-final", + ) if mode not in modes: raise ValueError( - "Invalid cloud init mode specified '{0}'".format(mode)) + "Invalid cloud init mode specified '{0}'".format(mode) + ) status = None - if mode == 'init-local': + if mode == "init-local": for f in (status_link, result_link, status_path, result_path): util.del_file(f) else: @@ -664,45 +728,46 @@ def status_wrapper(name, args, data_d=None, link_d=None): pass nullstatus = { - 'errors': [], - 'start': None, - 'finished': None, + "errors": [], + "start": None, + "finished": None, } if status is None: - status = {'v1': {}} - status['v1']['datasource'] = None + status = {"v1": {}} + status["v1"]["datasource"] = None for m in modes: - if m not in status['v1']: - status['v1'][m] = nullstatus.copy() + if m not in status["v1"]: + status["v1"][m] = nullstatus.copy() - v1 = status['v1'] - v1['stage'] = mode - v1[mode]['start'] = time.time() + v1 = status["v1"] + v1["stage"] = mode + v1[mode]["start"] = time.time() atomic_helper.write_json(status_path, status) - util.sym_link(os.path.relpath(status_path, link_d), status_link, - force=True) + util.sym_link( + os.path.relpath(status_path, link_d), status_link, force=True + ) try: ret = functor(name, args) - if mode in ('init', 'init-local'): + if mode in ("init", "init-local"): (datasource, errors) = ret if datasource is not None: - v1['datasource'] = str(datasource) + v1["datasource"] = str(datasource) else: errors = ret - v1[mode]['errors'] = [str(e) for e in errors] + v1[mode]["errors"] = [str(e) for e in errors] except Exception as e: util.logexc(LOG, "failed stage %s", mode) print_exc("failed run of stage %s" % mode) - v1[mode]['errors'] = [str(e)] + v1[mode]["errors"] = [str(e)] - v1[mode]['finished'] = time.time() - v1['stage'] = None + v1[mode]["finished"] = time.time() + v1["stage"] = None atomic_helper.write_json(status_path, status) @@ -710,23 +775,26 @@ def status_wrapper(name, args, data_d=None, link_d=None): # write the 'finished' file errors = [] for m in modes: - if v1[m]['errors']: - errors.extend(v1[m].get('errors', [])) + if v1[m]["errors"]: + errors.extend(v1[m].get("errors", [])) atomic_helper.write_json( - result_path, {'v1': {'datasource': v1['datasource'], - 'errors': errors}}) - util.sym_link(os.path.relpath(result_path, link_d), result_link, - force=True) + result_path, + {"v1": {"datasource": v1["datasource"], "errors": errors}}, + ) + util.sym_link( + os.path.relpath(result_path, link_d), result_link, force=True + ) - return len(v1[mode]['errors']) + return len(v1[mode]["errors"]) def _maybe_persist_instance_data(init): """Write instance-data.json file if absent and datasource is restored.""" if init.ds_restored: instance_data_file = os.path.join( - init.paths.run_dir, sources.INSTANCE_JSON_FILE) + init.paths.run_dir, sources.INSTANCE_JSON_FILE + ) if not os.path.exists(instance_data_file): init.datasource.persist_instance_data() @@ -739,18 +807,23 @@ def _maybe_set_hostname(init, stage, retry_stage): """ cloud = init.cloudify() (hostname, _fqdn) = util.get_hostname_fqdn( - init.cfg, cloud, metadata_only=True) + init.cfg, cloud, metadata_only=True + ) if hostname: # meta-data or user-data hostname content try: - cc_set_hostname.handle('set-hostname', init.cfg, cloud, LOG, None) + cc_set_hostname.handle("set-hostname", init.cfg, cloud, LOG, None) except cc_set_hostname.SetHostnameError as e: LOG.debug( - 'Failed setting hostname in %s stage. Will' - ' retry in %s stage. Error: %s.', stage, retry_stage, str(e)) + "Failed setting hostname in %s stage. Will" + " retry in %s stage. Error: %s.", + stage, + retry_stage, + str(e), + ) def main_features(name, args): - sys.stdout.write('\n'.join(sorted(version.FEATURES)) + '\n') + sys.stdout.write("\n".join(sorted(version.FEATURES)) + "\n") def main(sysv_args=None): @@ -760,129 +833,182 @@ def main(sysv_args=None): sysv_args = sysv_args[1:] # Top level args - parser.add_argument('--version', '-v', action='version', - version='%(prog)s ' + (version.version_string())) - parser.add_argument('--file', '-f', action='append', - dest='files', - help=('additional yaml configuration' - ' files to use'), - type=argparse.FileType('rb')) - parser.add_argument('--debug', '-d', action='store_true', - help=('show additional pre-action' - ' logging (default: %(default)s)'), - default=False) - parser.add_argument('--force', action='store_true', - help=('force running even if no datasource is' - ' found (use at your own risk)'), - dest='force', - default=False) + parser.add_argument( + "--version", + "-v", + action="version", + version="%(prog)s " + (version.version_string()), + ) + parser.add_argument( + "--file", + "-f", + action="append", + dest="files", + help="additional yaml configuration files to use", + type=argparse.FileType("rb"), + ) + parser.add_argument( + "--debug", + "-d", + action="store_true", + help="show additional pre-action logging (default: %(default)s)", + default=False, + ) + parser.add_argument( + "--force", + action="store_true", + help=( + "force running even if no datasource is" + " found (use at your own risk)" + ), + dest="force", + default=False, + ) parser.set_defaults(reporter=None) - subparsers = parser.add_subparsers(title='Subcommands', dest='subcommand') + subparsers = parser.add_subparsers(title="Subcommands", dest="subcommand") subparsers.required = True # Each action and its sub-options (if any) - parser_init = subparsers.add_parser('init', - help=('initializes cloud-init and' - ' performs initial modules')) - parser_init.add_argument("--local", '-l', action='store_true', - help="start in local mode (default: %(default)s)", - default=False) + parser_init = subparsers.add_parser( + "init", help="initializes cloud-init and performs initial modules" + ) + parser_init.add_argument( + "--local", + "-l", + action="store_true", + help="start in local mode (default: %(default)s)", + default=False, + ) # This is used so that we can know which action is selected + # the functor to use to run this subcommand - parser_init.set_defaults(action=('init', main_init)) + parser_init.set_defaults(action=("init", main_init)) # These settings are used for the 'config' and 'final' stages - parser_mod = subparsers.add_parser('modules', - help=('activates modules using ' - 'a given configuration key')) - parser_mod.add_argument("--mode", '-m', action='store', - help=("module configuration name " - "to use (default: %(default)s)"), - default='config', - choices=('init', 'config', 'final')) - parser_mod.set_defaults(action=('modules', main_modules)) + parser_mod = subparsers.add_parser( + "modules", help="activates modules using a given configuration key" + ) + parser_mod.add_argument( + "--mode", + "-m", + action="store", + help="module configuration name to use (default: %(default)s)", + default="config", + choices=("init", "config", "final"), + ) + parser_mod.set_defaults(action=("modules", main_modules)) # This subcommand allows you to run a single module - parser_single = subparsers.add_parser('single', - help=('run a single module ')) - parser_single.add_argument("--name", '-n', action="store", - help="module name to run", - required=True) - parser_single.add_argument("--frequency", action="store", - help=("frequency of the module"), - required=False, - choices=list(FREQ_SHORT_NAMES.keys())) - parser_single.add_argument("--report", action="store_true", - help="enable reporting", - required=False) - parser_single.add_argument("module_args", nargs="*", - metavar='argument', - help=('any additional arguments to' - ' pass to this module')) - parser_single.set_defaults(action=('single', main_single)) + parser_single = subparsers.add_parser( + "single", help="run a single module " + ) + parser_single.add_argument( + "--name", + "-n", + action="store", + help="module name to run", + required=True, + ) + parser_single.add_argument( + "--frequency", + action="store", + help="frequency of the module", + required=False, + choices=list(FREQ_SHORT_NAMES.keys()), + ) + parser_single.add_argument( + "--report", + action="store_true", + help="enable reporting", + required=False, + ) + parser_single.add_argument( + "module_args", + nargs="*", + metavar="argument", + help="any additional arguments to pass to this module", + ) + parser_single.set_defaults(action=("single", main_single)) parser_query = subparsers.add_parser( - 'query', - help='Query standardized instance metadata from the command line.') + "query", + help="Query standardized instance metadata from the command line.", + ) parser_dhclient = subparsers.add_parser( - dhclient_hook.NAME, help=dhclient_hook.__doc__) + dhclient_hook.NAME, help=dhclient_hook.__doc__ + ) dhclient_hook.get_parser(parser_dhclient) - parser_features = subparsers.add_parser('features', - help=('list defined features')) - parser_features.set_defaults(action=('features', main_features)) + parser_features = subparsers.add_parser( + "features", help="list defined features" + ) + parser_features.set_defaults(action=("features", main_features)) parser_analyze = subparsers.add_parser( - 'analyze', help='Devel tool: Analyze cloud-init logs and data') + "analyze", help="Devel tool: Analyze cloud-init logs and data" + ) - parser_devel = subparsers.add_parser( - 'devel', help='Run development tools') + parser_devel = subparsers.add_parser("devel", help="Run development tools") parser_collect_logs = subparsers.add_parser( - 'collect-logs', help='Collect and tar all cloud-init debug info') + "collect-logs", help="Collect and tar all cloud-init debug info" + ) parser_clean = subparsers.add_parser( - 'clean', help='Remove logs and artifacts so cloud-init can re-run.') + "clean", help="Remove logs and artifacts so cloud-init can re-run." + ) parser_status = subparsers.add_parser( - 'status', help='Report cloud-init status or wait on completion.') + "status", help="Report cloud-init status or wait on completion." + ) if sysv_args: # Only load subparsers if subcommand is specified to avoid load cost - if sysv_args[0] == 'analyze': + if sysv_args[0] == "analyze": from cloudinit.analyze.__main__ import get_parser as analyze_parser + # Construct analyze subcommand parser analyze_parser(parser_analyze) - elif sysv_args[0] == 'devel': + elif sysv_args[0] == "devel": from cloudinit.cmd.devel.parser import get_parser as devel_parser + # Construct devel subcommand parser devel_parser(parser_devel) - elif sysv_args[0] == 'collect-logs': + elif sysv_args[0] == "collect-logs": from cloudinit.cmd.devel.logs import ( - get_parser as logs_parser, handle_collect_logs_args) + get_parser as logs_parser, + handle_collect_logs_args, + ) + logs_parser(parser_collect_logs) parser_collect_logs.set_defaults( - action=('collect-logs', handle_collect_logs_args)) - elif sysv_args[0] == 'clean': + action=("collect-logs", handle_collect_logs_args) + ) + elif sysv_args[0] == "clean": from cloudinit.cmd.clean import ( - get_parser as clean_parser, handle_clean_args) + get_parser as clean_parser, + handle_clean_args, + ) + clean_parser(parser_clean) - parser_clean.set_defaults( - action=('clean', handle_clean_args)) - elif sysv_args[0] == 'query': + parser_clean.set_defaults(action=("clean", handle_clean_args)) + elif sysv_args[0] == "query": from cloudinit.cmd.query import ( - get_parser as query_parser, handle_args as handle_query_args) + get_parser as query_parser, + handle_args as handle_query_args, + ) + query_parser(parser_query) - parser_query.set_defaults( - action=('render', handle_query_args)) - elif sysv_args[0] == 'status': + parser_query.set_defaults(action=("render", handle_query_args)) + elif sysv_args[0] == "status": from cloudinit.cmd.status import ( - get_parser as status_parser, handle_status_args) + get_parser as status_parser, + handle_status_args, + ) + status_parser(parser_status) - parser_status.set_defaults( - action=('status', handle_status_args)) + parser_status.set_defaults(action=("status", handle_status_args)) args = parser.parse_args(args=sysv_args) @@ -906,14 +1032,20 @@ def main(sysv_args=None): if args.local: rname, rdesc = ("init-local", "searching for local datasources") else: - rname, rdesc = ("init-network", - "searching for network datasources") + rname, rdesc = ( + "init-network", + "searching for network datasources", + ) elif name == "modules": - rname, rdesc = ("modules-%s" % args.mode, - "running modules for %s" % args.mode) + rname, rdesc = ( + "modules-%s" % args.mode, + "running modules for %s" % args.mode, + ) elif name == "single": - rname, rdesc = ("single/%s" % args.name, - "running single module %s" % args.name) + rname, rdesc = ( + "single/%s" % args.name, + "running single module %s" % args.name, + ) report_on = args.report else: rname = name @@ -921,19 +1053,24 @@ def main(sysv_args=None): report_on = False args.reporter = events.ReportEventStack( - rname, rdesc, reporting_enabled=report_on) + rname, rdesc, reporting_enabled=report_on + ) with args.reporter: retval = util.log_time( - logfunc=LOG.debug, msg="cloud-init mode '%s'" % name, - get_uptime=True, func=functor, args=(name, args)) + logfunc=LOG.debug, + msg="cloud-init mode '%s'" % name, + get_uptime=True, + func=functor, + args=(name, args), + ) reporting.flush_events() return retval -if __name__ == '__main__': - if 'TZ' not in os.environ: - os.environ['TZ'] = ":/etc/localtime" +if __name__ == "__main__": + if "TZ" not in os.environ: + os.environ["TZ"] = ":/etc/localtime" return_value = main(sys.argv) if return_value: sys.exit(return_value) diff --git a/cloudinit/cmd/query.py b/cloudinit/cmd/query.py index e53cd855..46f17699 100644 --- a/cloudinit/cmd/query.py +++ b/cloudinit/cmd/query.py @@ -14,22 +14,24 @@ output; if this fails, they are treated as binary. """ import argparse -from errno import EACCES import os import sys +from errno import EACCES +from cloudinit import log, util +from cloudinit.cmd.devel import addLogHandlerCLI, read_cfg_paths from cloudinit.handlers.jinja_template import ( convert_jinja_instance_data, get_jinja_variable_alias, - render_jinja_payload + render_jinja_payload, ) -from cloudinit.cmd.devel import addLogHandlerCLI, read_cfg_paths -from cloudinit import log from cloudinit.sources import ( - INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE, REDACT_SENSITIVE_VALUE) -from cloudinit import util + INSTANCE_JSON_FILE, + INSTANCE_JSON_SENSITIVE_FILE, + REDACT_SENSITIVE_VALUE, +) -NAME = 'query' +NAME = "query" LOG = log.getLogger(NAME) @@ -43,41 +45,79 @@ def get_parser(parser=None): @returns: ArgumentParser with proper argument configuration. """ if not parser: - parser = argparse.ArgumentParser( - prog=NAME, description=__doc__) + parser = argparse.ArgumentParser(prog=NAME, description=__doc__) parser.add_argument( - '-d', '--debug', action='store_true', default=False, - help='Add verbose messages during template render') + "-d", + "--debug", + action="store_true", + default=False, + help="Add verbose messages during template render", + ) parser.add_argument( - '-i', '--instance-data', type=str, - help=('Path to instance-data.json file. Default is /run/cloud-init/%s' - % INSTANCE_JSON_FILE)) + "-i", + "--instance-data", + type=str, + help="Path to instance-data.json file. Default is /run/cloud-init/%s" + % INSTANCE_JSON_FILE, + ) parser.add_argument( - '-l', '--list-keys', action='store_true', default=False, - help=('List query keys available at the provided instance-data' - ' .')) + "-l", + "--list-keys", + action="store_true", + default=False, + help=( + "List query keys available at the provided instance-data" + " ." + ), + ) parser.add_argument( - '-u', '--user-data', type=str, - help=('Path to user-data file. Default is' - ' /var/lib/cloud/instance/user-data.txt')) + "-u", + "--user-data", + type=str, + help=( + "Path to user-data file. Default is" + " /var/lib/cloud/instance/user-data.txt" + ), + ) parser.add_argument( - '-v', '--vendor-data', type=str, - help=('Path to vendor-data file. Default is' - ' /var/lib/cloud/instance/vendor-data.txt')) + "-v", + "--vendor-data", + type=str, + help=( + "Path to vendor-data file. Default is" + " /var/lib/cloud/instance/vendor-data.txt" + ), + ) parser.add_argument( - 'varname', type=str, nargs='?', - help=('A dot-delimited specific variable to query from' - ' instance-data. For example: v1.local_hostname. If the' - ' value is not JSON serializable, it will be base64-encoded and' - ' will contain the prefix "ci-b64:". ')) + "varname", + type=str, + nargs="?", + help=( + "A dot-delimited specific variable to query from" + " instance-data. For example: v1.local_hostname. If the" + " value is not JSON serializable, it will be base64-encoded and" + ' will contain the prefix "ci-b64:". ' + ), + ) parser.add_argument( - '-a', '--all', action='store_true', default=False, dest='dump_all', - help='Dump all available instance-data') + "-a", + "--all", + action="store_true", + default=False, + dest="dump_all", + help="Dump all available instance-data", + ) parser.add_argument( - '-f', '--format', type=str, dest='format', - help=('Optionally specify a custom output format string. Any' - ' instance-data variable can be specified between double-curly' - ' braces. For example -f "{{ v2.cloud_name }}"')) + "-f", + "--format", + type=str, + dest="format", + help=( + "Optionally specify a custom output format string. Any" + " instance-data variable can be specified between double-curly" + ' braces. For example -f "{{ v2.cloud_name }}"' + ), + ) return parser @@ -91,7 +131,7 @@ def load_userdata(ud_file_path): """ bdata = util.load_file(ud_file_path, decode=False) try: - return bdata.decode('utf-8') + return bdata.decode("utf-8") except UnicodeDecodeError: return util.decomp_gzip(bdata, quiet=False, decode=True) @@ -118,13 +158,15 @@ def _read_instance_data(instance_data, user_data, vendor_data) -> dict: redacted_data_fn = os.path.join(paths.run_dir, INSTANCE_JSON_FILE) if uid == 0: sensitive_data_fn = os.path.join( - paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE) + paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE + ) if os.path.exists(sensitive_data_fn): instance_data_fn = sensitive_data_fn else: LOG.warning( - 'Missing root-readable %s. Using redacted %s instead.', - sensitive_data_fn, redacted_data_fn + "Missing root-readable %s. Using redacted %s instead.", + sensitive_data_fn, + redacted_data_fn, ) instance_data_fn = redacted_data_fn else: @@ -132,11 +174,11 @@ def _read_instance_data(instance_data, user_data, vendor_data) -> dict: if user_data: user_data_fn = user_data else: - user_data_fn = os.path.join(paths.instance_link, 'user-data.txt') + user_data_fn = os.path.join(paths.instance_link, "user-data.txt") if vendor_data: vendor_data_fn = vendor_data else: - vendor_data_fn = os.path.join(paths.instance_link, 'vendor-data.txt') + vendor_data_fn = os.path.join(paths.instance_link, "vendor-data.txt") try: instance_json = util.load_file(instance_data_fn) @@ -144,24 +186,30 @@ def _read_instance_data(instance_data, user_data, vendor_data) -> dict: if e.errno == EACCES: LOG.error("No read permission on '%s'. Try sudo", instance_data_fn) else: - LOG.error('Missing instance-data file: %s', instance_data_fn) + LOG.error("Missing instance-data file: %s", instance_data_fn) raise instance_data = util.load_json(instance_json) if uid != 0: - instance_data['userdata'] = ( - '<%s> file:%s' % (REDACT_SENSITIVE_VALUE, user_data_fn)) - instance_data['vendordata'] = ( - '<%s> file:%s' % (REDACT_SENSITIVE_VALUE, vendor_data_fn)) + instance_data["userdata"] = "<%s> file:%s" % ( + REDACT_SENSITIVE_VALUE, + user_data_fn, + ) + instance_data["vendordata"] = "<%s> file:%s" % ( + REDACT_SENSITIVE_VALUE, + vendor_data_fn, + ) else: - instance_data['userdata'] = load_userdata(user_data_fn) - instance_data['vendordata'] = load_userdata(vendor_data_fn) + instance_data["userdata"] = load_userdata(user_data_fn) + instance_data["vendordata"] = load_userdata(vendor_data_fn) return instance_data def _find_instance_data_leaf_by_varname_path( - jinja_vars_without_aliases: dict, jinja_vars_with_aliases: dict, - varname: str, list_keys: bool + jinja_vars_without_aliases: dict, + jinja_vars_with_aliases: dict, + varname: str, + list_keys: bool, ): """Return the value of the dot-delimited varname path in instance-data @@ -174,7 +222,7 @@ def _find_instance_data_leaf_by_varname_path( """ walked_key_path = "" response = jinja_vars_without_aliases - for key_path_part in varname.split('.'): + for key_path_part in varname.split("."): try: # Walk key path using complete aliases dict, yet response # should only contain jinja_without_aliases @@ -205,8 +253,9 @@ def handle_args(name, args): addLogHandlerCLI(LOG, log.DEBUG if args.debug else log.WARNING) if not any([args.list_keys, args.varname, args.format, args.dump_all]): LOG.error( - 'Expected one of the options: --all, --format,' - ' --list-keys or varname') + "Expected one of the options: --all, --format," + " --list-keys or varname" + ) get_parser().print_help() return 1 try: @@ -216,11 +265,13 @@ def handle_args(name, args): except (IOError, OSError): return 1 if args.format: - payload = '## template: jinja\n{fmt}'.format(fmt=args.format) + payload = "## template: jinja\n{fmt}".format(fmt=args.format) rendered_payload = render_jinja_payload( - payload=payload, payload_fn='query commandline', + payload=payload, + payload_fn="query commandline", instance_data=instance_data, - debug=True if args.debug else False) + debug=True if args.debug else False, + ) if rendered_payload: print(rendered_payload) return 0 @@ -240,7 +291,7 @@ def handle_args(name, args): jinja_vars_without_aliases=response, jinja_vars_with_aliases=jinja_vars_with_aliases, varname=args.varname, - list_keys=args.list_keys + list_keys=args.list_keys, ) except (KeyError, ValueError) as e: LOG.error(e) @@ -248,11 +299,10 @@ def handle_args(name, args): if args.list_keys: if not isinstance(response, dict): LOG.error( - "--list-keys provided but '%s' is not a dict", - args.varname + "--list-keys provided but '%s' is not a dict", args.varname ) return 1 - response = '\n'.join(sorted(response.keys())) + response = "\n".join(sorted(response.keys())) if not isinstance(response, str): response = util.json_dumps(response) print(response) @@ -265,7 +315,7 @@ def main(): sys.exit(handle_args(NAME, parser.parse_args())) -if __name__ == '__main__': +if __name__ == "__main__": main() # vi: ts=4 expandtab diff --git a/cloudinit/cmd/status.py b/cloudinit/cmd/status.py index ea79a85b..cff16c34 100644 --- a/cloudinit/cmd/status.py +++ b/cloudinit/cmd/status.py @@ -7,20 +7,20 @@ import argparse import os import sys -from time import gmtime, strftime, sleep +from time import gmtime, sleep, strftime from cloudinit.distros import uses_systemd from cloudinit.stages import Init from cloudinit.util import get_cmdline, load_file, load_json -CLOUDINIT_DISABLED_FILE = '/etc/cloud/cloud-init.disabled' +CLOUDINIT_DISABLED_FILE = "/etc/cloud/cloud-init.disabled" # customer visible status messages -STATUS_ENABLED_NOT_RUN = 'not run' -STATUS_RUNNING = 'running' -STATUS_DONE = 'done' -STATUS_ERROR = 'error' -STATUS_DISABLED = 'disabled' +STATUS_ENABLED_NOT_RUN = "not run" +STATUS_RUNNING = "running" +STATUS_DONE = "done" +STATUS_ERROR = "error" +STATUS_DISABLED = "disabled" def get_parser(parser=None): @@ -34,15 +34,25 @@ def get_parser(parser=None): """ if not parser: parser = argparse.ArgumentParser( - prog='status', - description='Report run status of cloud init') + prog="status", description="Report run status of cloud init" + ) parser.add_argument( - '-l', '--long', action='store_true', default=False, - help=('Report long format of statuses including run stage name and' - ' error messages')) + "-l", + "--long", + action="store_true", + default=False, + help=( + "Report long format of statuses including run stage name and" + " error messages" + ), + ) parser.add_argument( - '-w', '--wait', action='store_true', default=False, - help='Block waiting on cloud-init to complete') + "-w", + "--wait", + action="store_true", + default=False, + help="Block waiting on cloud-init to complete", + ) return parser @@ -55,18 +65,18 @@ def handle_status_args(name, args): status, status_detail, time = _get_status_details(init.paths) if args.wait: while status in (STATUS_ENABLED_NOT_RUN, STATUS_RUNNING): - sys.stdout.write('.') + sys.stdout.write(".") sys.stdout.flush() status, status_detail, time = _get_status_details(init.paths) sleep(0.25) - sys.stdout.write('\n') + sys.stdout.write("\n") if args.long: - print('status: {0}'.format(status)) + print("status: {0}".format(status)) if time: - print('time: {0}'.format(time)) - print('detail:\n{0}'.format(status_detail)) + print("time: {0}".format(time)) + print("detail:\n{0}".format(status_detail)) else: - print('status: {0}'.format(status)) + print("status: {0}".format(status)) return 1 if status == STATUS_ERROR else 0 @@ -81,20 +91,20 @@ def _is_cloudinit_disabled(disable_file, paths): is_disabled = False cmdline_parts = get_cmdline().split() if not uses_systemd(): - reason = 'Cloud-init enabled on sysvinit' - elif 'cloud-init=enabled' in cmdline_parts: - reason = 'Cloud-init enabled by kernel command line cloud-init=enabled' + reason = "Cloud-init enabled on sysvinit" + elif "cloud-init=enabled" in cmdline_parts: + reason = "Cloud-init enabled by kernel command line cloud-init=enabled" elif os.path.exists(disable_file): is_disabled = True - reason = 'Cloud-init disabled by {0}'.format(disable_file) - elif 'cloud-init=disabled' in cmdline_parts: + reason = "Cloud-init disabled by {0}".format(disable_file) + elif "cloud-init=disabled" in cmdline_parts: is_disabled = True - reason = 'Cloud-init disabled by kernel parameter cloud-init=disabled' - elif not os.path.exists(os.path.join(paths.run_dir, 'enabled')): + reason = "Cloud-init disabled by kernel parameter cloud-init=disabled" + elif not os.path.exists(os.path.join(paths.run_dir, "enabled")): is_disabled = True - reason = 'Cloud-init disabled by cloud-init-generator' + reason = "Cloud-init disabled by cloud-init-generator" else: - reason = 'Cloud-init enabled by systemd cloud-init-generator' + reason = "Cloud-init enabled by systemd cloud-init-generator" return (is_disabled, reason) @@ -106,34 +116,35 @@ def _get_status_details(paths): Values are obtained from parsing paths.run_dir/status.json. """ status = STATUS_ENABLED_NOT_RUN - status_detail = '' + status_detail = "" status_v1 = {} - status_file = os.path.join(paths.run_dir, 'status.json') - result_file = os.path.join(paths.run_dir, 'result.json') + status_file = os.path.join(paths.run_dir, "status.json") + result_file = os.path.join(paths.run_dir, "result.json") (is_disabled, reason) = _is_cloudinit_disabled( - CLOUDINIT_DISABLED_FILE, paths) + CLOUDINIT_DISABLED_FILE, paths + ) if is_disabled: status = STATUS_DISABLED status_detail = reason if os.path.exists(status_file): if not os.path.exists(result_file): status = STATUS_RUNNING - status_v1 = load_json(load_file(status_file)).get('v1', {}) + status_v1 = load_json(load_file(status_file)).get("v1", {}) errors = [] latest_event = 0 for key, value in sorted(status_v1.items()): - if key == 'stage': + if key == "stage": if value: status = STATUS_RUNNING - status_detail = 'Running in stage: {0}'.format(value) - elif key == 'datasource': + status_detail = "Running in stage: {0}".format(value) + elif key == "datasource": status_detail = value elif isinstance(value, dict): - errors.extend(value.get('errors', [])) - start = value.get('start') or 0 - finished = value.get('finished') or 0 + errors.extend(value.get("errors", [])) + start = value.get("start") or 0 + finished = value.get("finished") or 0 if finished == 0 and start != 0: status = STATUS_RUNNING event_time = max(start, finished) @@ -141,23 +152,23 @@ def _get_status_details(paths): latest_event = event_time if errors: status = STATUS_ERROR - status_detail = '\n'.join(errors) + status_detail = "\n".join(errors) elif status == STATUS_ENABLED_NOT_RUN and latest_event > 0: status = STATUS_DONE if latest_event: - time = strftime('%a, %d %b %Y %H:%M:%S %z', gmtime(latest_event)) + time = strftime("%a, %d %b %Y %H:%M:%S %z", gmtime(latest_event)) else: - time = '' + time = "" return status, status_detail, time def main(): """Tool to report status of cloud-init.""" parser = get_parser() - sys.exit(handle_status_args('status', parser.parse_args())) + sys.exit(handle_status_args("status", parser.parse_args())) -if __name__ == '__main__': +if __name__ == "__main__": main() # vi: ts=4 expandtab diff --git a/cloudinit/config/__init__.py b/cloudinit/config/__init__.py index 0ef9a748..ed124180 100644 --- a/cloudinit/config/__init__.py +++ b/cloudinit/config/__init__.py @@ -6,9 +6,8 @@ # # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.settings import (PER_INSTANCE, FREQUENCIES) - from cloudinit import log as logging +from cloudinit.settings import FREQUENCIES, PER_INSTANCE LOG = logging.getLogger(__name__) @@ -22,26 +21,27 @@ MOD_PREFIX = "cc_" def form_module_name(name): canon_name = name.replace("-", "_") if canon_name.lower().endswith(".py"): - canon_name = canon_name[0:(len(canon_name) - 3)] + canon_name = canon_name[0 : (len(canon_name) - 3)] canon_name = canon_name.strip() if not canon_name: return None if not canon_name.startswith(MOD_PREFIX): - canon_name = '%s%s' % (MOD_PREFIX, canon_name) + canon_name = "%s%s" % (MOD_PREFIX, canon_name) return canon_name def fixup_module(mod, def_freq=PER_INSTANCE): - if not hasattr(mod, 'frequency'): - setattr(mod, 'frequency', def_freq) + if not hasattr(mod, "frequency"): + setattr(mod, "frequency", def_freq) else: freq = mod.frequency if freq and freq not in FREQUENCIES: LOG.warning("Module %s has an unknown frequency %s", mod, freq) - if not hasattr(mod, 'distros'): - setattr(mod, 'distros', []) - if not hasattr(mod, 'osfamilies'): - setattr(mod, 'osfamilies', []) + if not hasattr(mod, "distros"): + setattr(mod, "distros", []) + if not hasattr(mod, "osfamilies"): + setattr(mod, "osfamilies", []) return mod + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_apk_configure.py b/cloudinit/config/cc_apk_configure.py index d227a58d..a615c814 100644 --- a/cloudinit/config/cc_apk_configure.py +++ b/cloudinit/config/cc_apk_configure.py @@ -9,9 +9,7 @@ from textwrap import dedent from cloudinit import log as logging -from cloudinit import temp_utils -from cloudinit import templater -from cloudinit import util +from cloudinit import temp_utils, templater, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE @@ -54,34 +52,41 @@ REPOSITORIES_TEMPLATE = """\ frequency = PER_INSTANCE -distros = ['alpine'] +distros = ["alpine"] meta = { - 'id': 'cc_apk_configure', - 'name': 'APK Configure', - 'title': 'Configure apk repositories file', - 'description': dedent("""\ + "id": "cc_apk_configure", + "name": "APK Configure", + "title": "Configure apk repositories file", + "description": dedent( + """\ This module handles configuration of the /etc/apk/repositories file. .. note:: To ensure that apk configuration is valid yaml, any strings containing special characters, especially ``:`` should be quoted. - """), - 'distros': distros, - 'examples': [ - dedent("""\ + """ + ), + "distros": distros, + "examples": [ + dedent( + """\ # Keep the existing /etc/apk/repositories file unaltered. apk_repos: preserve_repositories: true - """), - dedent("""\ + """ + ), + dedent( + """\ # Create repositories file for Alpine v3.12 main and community # using default mirror site. apk_repos: alpine_repo: community_enabled: true version: 'v3.12' - """), - dedent("""\ + """ + ), + dedent( + """\ # Create repositories file for Alpine Edge main, community, and # testing using a specified mirror site and also a local repo. apk_repos: @@ -91,21 +96,23 @@ meta = { testing_enabled: true version: 'edge' local_repo_base_url: 'https://my-local-server/local-alpine' - """), + """ + ), ], - 'frequency': frequency, + "frequency": frequency, } schema = { - 'type': 'object', - 'properties': { - 'apk_repos': { - 'type': 'object', - 'properties': { - 'preserve_repositories': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "type": "object", + "properties": { + "apk_repos": { + "type": "object", + "properties": { + "preserve_repositories": { + "type": "boolean", + "default": False, + "description": dedent( + """\ By default, cloud-init will generate a new repositories file ``/etc/apk/repositories`` based on any valid configuration settings specified within a apk_repos @@ -116,33 +123,41 @@ schema = { The ``preserve_repositories`` option overrides all other config keys that would alter ``/etc/apk/repositories``. - """) + """ + ), }, - 'alpine_repo': { - 'type': ['object', 'null'], - 'properties': { - 'base_url': { - 'type': 'string', - 'default': DEFAULT_MIRROR, - 'description': dedent("""\ + "alpine_repo": { + "type": ["object", "null"], + "properties": { + "base_url": { + "type": "string", + "default": DEFAULT_MIRROR, + "description": dedent( + """\ The base URL of an Alpine repository, or mirror, to download official packages from. If not specified then it defaults to ``{}`` - """.format(DEFAULT_MIRROR)) + """.format( + DEFAULT_MIRROR + ) + ), }, - 'community_enabled': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "community_enabled": { + "type": "boolean", + "default": False, + "description": dedent( + """\ Whether to add the Community repo to the repositories file. By default the Community repo is not included. - """) + """ + ), }, - 'testing_enabled': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "testing_enabled": { + "type": "boolean", + "default": False, + "description": dedent( + """\ Whether to add the Testing repo to the repositories file. By default the Testing repo is not included. It is only recommended @@ -151,32 +166,37 @@ schema = { installed from Testing may have dependancies that conflict with those in non-Edge Main or Community repos." - """) + """ + ), }, - 'version': { - 'type': 'string', - 'description': dedent("""\ + "version": { + "type": "string", + "description": dedent( + """\ The Alpine version to use (e.g. ``v3.12`` or ``edge``) - """) + """ + ), }, }, - 'required': ['version'], - 'minProperties': 1, - 'additionalProperties': False, + "required": ["version"], + "minProperties": 1, + "additionalProperties": False, }, - 'local_repo_base_url': { - 'type': 'string', - 'description': dedent("""\ + "local_repo_base_url": { + "type": "string", + "description": dedent( + """\ The base URL of an Alpine repository containing unofficial packages - """) - } + """ + ), + }, }, - 'minProperties': 1, # Either preserve_repositories or alpine_repo - 'additionalProperties': False, + "minProperties": 1, # Either preserve_repositories or alpine_repo + "additionalProperties": False, } - } + }, } __doc__ = get_meta_doc(meta, schema) @@ -195,38 +215,44 @@ def handle(name, cfg, cloud, log, _args): # If there is no "apk_repos" section in the configuration # then do nothing. - apk_section = cfg.get('apk_repos') + apk_section = cfg.get("apk_repos") if not apk_section: - LOG.debug(("Skipping module named %s," - " no 'apk_repos' section found"), name) + LOG.debug( + "Skipping module named %s, no 'apk_repos' section found", name + ) return validate_cloudconfig_schema(cfg, schema) # If "preserve_repositories" is explicitly set to True in # the configuration do nothing. - if util.get_cfg_option_bool(apk_section, 'preserve_repositories', False): - LOG.debug(("Skipping module named %s," - " 'preserve_repositories' is set"), name) + if util.get_cfg_option_bool(apk_section, "preserve_repositories", False): + LOG.debug( + "Skipping module named %s, 'preserve_repositories' is set", name + ) return # If there is no "alpine_repo" subsection of "apk_repos" present in the # configuration then do nothing, as at least "version" is required to # create valid repositories entries. - alpine_repo = apk_section.get('alpine_repo') + alpine_repo = apk_section.get("alpine_repo") if not alpine_repo: - LOG.debug(("Skipping module named %s," - " no 'alpine_repo' configuration found"), name) + LOG.debug( + "Skipping module named %s, no 'alpine_repo' configuration found", + name, + ) return # If there is no "version" value present in configuration then do nothing. - alpine_version = alpine_repo.get('version') + alpine_version = alpine_repo.get("version") if not alpine_version: - LOG.debug(("Skipping module named %s," - " 'version' not specified in alpine_repo"), name) + LOG.debug( + "Skipping module named %s, 'version' not specified in alpine_repo", + name, + ) return - local_repo = apk_section.get('local_repo_base_url', '') + local_repo = apk_section.get("local_repo_base_url", "") _write_repositories_file(alpine_repo, alpine_version, local_repo) @@ -240,22 +266,23 @@ def _write_repositories_file(alpine_repo, alpine_version, local_repo): @param local_repo: A string containing the base URL of a local repo. """ - repo_file = '/etc/apk/repositories' + repo_file = "/etc/apk/repositories" - alpine_baseurl = alpine_repo.get('base_url', DEFAULT_MIRROR) + alpine_baseurl = alpine_repo.get("base_url", DEFAULT_MIRROR) - params = {'alpine_baseurl': alpine_baseurl, - 'alpine_version': alpine_version, - 'community_enabled': alpine_repo.get('community_enabled'), - 'testing_enabled': alpine_repo.get('testing_enabled'), - 'local_repo': local_repo} + params = { + "alpine_baseurl": alpine_baseurl, + "alpine_version": alpine_version, + "community_enabled": alpine_repo.get("community_enabled"), + "testing_enabled": alpine_repo.get("testing_enabled"), + "local_repo": local_repo, + } - tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl") + tfile = temp_utils.mkstemp(prefix="template_name-", suffix=".tmpl") template_fn = tfile[1] # Filepath is second item in tuple util.write_file(template_fn, content=REPOSITORIES_TEMPLATE) - LOG.debug('Generating Alpine repository configuration file: %s', - repo_file) + LOG.debug("Generating Alpine repository configuration file: %s", repo_file) templater.render_to_file(template_fn, repo_file, params) # Clean up temporary template util.del_file(template_fn) diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 2e844c2c..b0728517 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -10,16 +10,14 @@ import glob import os -import re import pathlib +import re from textwrap import dedent -from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit import gpg from cloudinit import log as logging -from cloudinit import subp -from cloudinit import templater -from cloudinit import util +from cloudinit import subp, templater, util +from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) @@ -27,59 +25,46 @@ LOG = logging.getLogger(__name__) # this will match 'XXX:YYY' (ie, 'cloud-archive:foo' or 'ppa:bar') ADD_APT_REPO_MATCH = r"^[\w-]+:\w" -APT_LOCAL_KEYS = '/etc/apt/trusted.gpg' -APT_TRUSTED_GPG_DIR = '/etc/apt/trusted.gpg.d/' -CLOUD_INIT_GPG_DIR = '/etc/apt/cloud-init.gpg.d/' +APT_LOCAL_KEYS = "/etc/apt/trusted.gpg" +APT_TRUSTED_GPG_DIR = "/etc/apt/trusted.gpg.d/" +CLOUD_INIT_GPG_DIR = "/etc/apt/cloud-init.gpg.d/" frequency = PER_INSTANCE distros = ["ubuntu", "debian"] mirror_property = { - 'type': 'array', - 'items': { - 'type': 'object', - 'additionalProperties': False, - 'required': ['arches'], - 'properties': { - 'arches': { - 'type': 'array', - 'items': { - 'type': 'string' - }, - 'minItems': 1 - }, - 'uri': { - 'type': 'string', - 'format': 'uri' - }, - 'search': { - 'type': 'array', - 'items': { - 'type': 'string', - 'format': 'uri' - }, - 'minItems': 1 - }, - 'search_dns': { - 'type': 'boolean', + "type": "array", + "items": { + "type": "object", + "additionalProperties": False, + "required": ["arches"], + "properties": { + "arches": { + "type": "array", + "items": {"type": "string"}, + "minItems": 1, }, - 'keyid': { - 'type': 'string' + "uri": {"type": "string", "format": "uri"}, + "search": { + "type": "array", + "items": {"type": "string", "format": "uri"}, + "minItems": 1, }, - 'key': { - 'type': 'string' + "search_dns": { + "type": "boolean", }, - 'keyserver': { - 'type': 'string' - } - } - } + "keyid": {"type": "string"}, + "key": {"type": "string"}, + "keyserver": {"type": "string"}, + }, + }, } meta = { - 'id': 'cc_apt_configure', - 'name': 'Apt Configure', - 'title': 'Configure apt for the user', - 'description': dedent("""\ + "id": "cc_apt_configure", + "name": "Apt Configure", + "title": "Configure apt for the user", + "description": dedent( + """\ This module handles both configuration of apt options and adding source lists. There are configuration options such as ``apt_get_wrapper`` and ``apt_get_command`` that control how @@ -94,9 +79,12 @@ meta = { .. note:: For more information about apt configuration, see the - ``Additional apt configuration`` example."""), - 'distros': distros, - 'examples': [dedent("""\ + ``Additional apt configuration`` example.""" + ), + "distros": distros, + "examples": [ + dedent( + """\ apt: preserve_sources_list: false disable_suites: @@ -153,21 +141,24 @@ meta = { key: | ------BEGIN PGP PUBLIC KEY BLOCK------- - ------END PGP PUBLIC KEY BLOCK-------""")], - 'frequency': frequency, + ------END PGP PUBLIC KEY BLOCK-------""" + ) + ], + "frequency": frequency, } schema = { - 'type': 'object', - 'properties': { - 'apt': { - 'type': 'object', - 'additionalProperties': False, - 'properties': { - 'preserve_sources_list': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "type": "object", + "properties": { + "apt": { + "type": "object", + "additionalProperties": False, + "properties": { + "preserve_sources_list": { + "type": "boolean", + "default": False, + "description": dedent( + """\ By default, cloud-init will generate a new sources list in ``/etc/apt/sources.list.d`` based on any changes specified in cloud config. To disable this @@ -179,15 +170,15 @@ schema = { all other config keys that would alter ``sources.list`` or ``sources.list.d``, **except** for additional sources to be added - to ``sources.list.d``.""") + to ``sources.list.d``.""" + ), }, - 'disable_suites': { - 'type': 'array', - 'items': { - 'type': 'string' - }, - 'uniqueItems': True, - 'description': dedent("""\ + "disable_suites": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": True, + "description": dedent( + """\ Entries in the sources list can be disabled using ``disable_suites``, which takes a list of suites to be disabled. If the string ``$RELEASE`` is @@ -206,11 +197,13 @@ schema = { When a suite is disabled using ``disable_suites``, its entry in ``sources.list`` is not deleted; it - is just commented out.""") + is just commented out.""" + ), }, - 'primary': { + "primary": { **mirror_property, - 'description': dedent("""\ + "description": dedent( + """\ The primary and security archive mirrors can be specified using the ``primary`` and ``security`` keys, respectively. Both the @@ -264,27 +257,35 @@ schema = { ``http://archive.ubuntu.com/ubuntu``. - ``security`` => \ ``http://security.ubuntu.com/ubuntu`` - """) + """ + ), }, - 'security': { + "security": { **mirror_property, - 'description': dedent("""\ - Please refer to the primary config documentation""") + "description": dedent( + """\ + Please refer to the primary config documentation""" + ), }, - 'add_apt_repo_match': { - 'type': 'string', - 'default': ADD_APT_REPO_MATCH, - 'description': dedent("""\ + "add_apt_repo_match": { + "type": "string", + "default": ADD_APT_REPO_MATCH, + "description": dedent( + """\ All source entries in ``apt-sources`` that match regex in ``add_apt_repo_match`` will be added to the system using ``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it - defaults to ``{}``""".format(ADD_APT_REPO_MATCH)) + defaults to ``{}``""".format( + ADD_APT_REPO_MATCH + ) + ), }, - 'debconf_selections': { - 'type': 'object', - 'items': {'type': 'string'}, - 'description': dedent("""\ + "debconf_selections": { + "type": "object", + "items": {"type": "string"}, + "description": dedent( + """\ Debconf additional configurations can be specified as a dictionary under the ``debconf_selections`` config key, with each key in the dict representing a @@ -308,11 +309,13 @@ schema = { For example: \ ``ippackage ippackage/ip string 127.0.01`` - """) + """ + ), }, - 'sources_list': { - 'type': 'string', - 'description': dedent("""\ + "sources_list": { + "type": "string", + "description": dedent( + """\ Specifies a custom template for rendering ``sources.list`` . If no ``sources_list`` template is given, cloud-init will use sane default. Within @@ -323,45 +326,55 @@ schema = { - ``$RELEASE`` - ``$PRIMARY`` - ``$SECURITY`` - - ``$KEY_FILE``""") + - ``$KEY_FILE``""" + ), }, - 'conf': { - 'type': 'string', - 'description': dedent("""\ + "conf": { + "type": "string", + "description": dedent( + """\ Specify configuration for apt, such as proxy configuration. This configuration is specified as a string. For multiline apt configuration, make sure - to follow yaml syntax.""") + to follow yaml syntax.""" + ), }, - 'https_proxy': { - 'type': 'string', - 'description': dedent("""\ + "https_proxy": { + "type": "string", + "description": dedent( + """\ More convenient way to specify https apt proxy. https proxy url is specified in the format - ``https://[[user][:pass]@]host[:port]/``.""") + ``https://[[user][:pass]@]host[:port]/``.""" + ), }, - 'http_proxy': { - 'type': 'string', - 'description': dedent("""\ + "http_proxy": { + "type": "string", + "description": dedent( + """\ More convenient way to specify http apt proxy. http proxy url is specified in the format - ``http://[[user][:pass]@]host[:port]/``.""") + ``http://[[user][:pass]@]host[:port]/``.""" + ), }, - 'proxy': { - 'type': 'string', - 'description': 'Alias for defining a http apt proxy.' + "proxy": { + "type": "string", + "description": "Alias for defining a http apt proxy.", }, - 'ftp_proxy': { - 'type': 'string', - 'description': dedent("""\ + "ftp_proxy": { + "type": "string", + "description": dedent( + """\ More convenient way to specify ftp apt proxy. ftp proxy url is specified in the format - ``ftp://[[user][:pass]@]host[:port]/``.""") + ``ftp://[[user][:pass]@]host[:port]/``.""" + ), }, - 'sources': { - 'type': 'object', - 'items': {'type': 'string'}, - 'description': dedent("""\ + "sources": { + "type": "object", + "items": {"type": "string"}, + "description": dedent( + """\ Source list entries can be specified as a dictionary under the ``sources`` config key, with each key in the dict representing a different source @@ -394,11 +407,12 @@ schema = { - ``$PRIMARY`` - ``$SECURITY`` - ``$RELEASE`` - - ``$KEY_FILE``""") - } - } + - ``$KEY_FILE``""" + ), + }, + }, } - } + }, } __doc__ = get_meta_doc(meta, schema) @@ -415,18 +429,22 @@ APT_PROXY_FN = "/etc/apt/apt.conf.d/90cloud-init-aptproxy" DEFAULT_KEYSERVER = "keyserver.ubuntu.com" # Default archive mirrors -PRIMARY_ARCH_MIRRORS = {"PRIMARY": "http://archive.ubuntu.com/ubuntu/", - "SECURITY": "http://security.ubuntu.com/ubuntu/"} -PORTS_MIRRORS = {"PRIMARY": "http://ports.ubuntu.com/ubuntu-ports", - "SECURITY": "http://ports.ubuntu.com/ubuntu-ports"} -PRIMARY_ARCHES = ['amd64', 'i386'] -PORTS_ARCHES = ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el', 'riscv64'] +PRIMARY_ARCH_MIRRORS = { + "PRIMARY": "http://archive.ubuntu.com/ubuntu/", + "SECURITY": "http://security.ubuntu.com/ubuntu/", +} +PORTS_MIRRORS = { + "PRIMARY": "http://ports.ubuntu.com/ubuntu-ports", + "SECURITY": "http://ports.ubuntu.com/ubuntu-ports", +} +PRIMARY_ARCHES = ["amd64", "i386"] +PORTS_ARCHES = ["s390x", "arm64", "armhf", "powerpc", "ppc64el", "riscv64"] def get_default_mirrors(arch=None, target=None): """returns the default mirrors for the target. These depend on the - architecture, for more see: - https://wiki.ubuntu.com/UbuntuDevelopment/PackageArchive#Ports""" + architecture, for more see: + https://wiki.ubuntu.com/UbuntuDevelopment/PackageArchive#Ports""" if arch is None: arch = util.get_dpkg_architecture(target) if arch in PRIMARY_ARCHES: @@ -438,8 +456,8 @@ def get_default_mirrors(arch=None, target=None): def handle(name, ocfg, cloud, log, _): """process the config for apt_config. This can be called from - curthooks if a global apt config was provided or via the "apt" - standalone command.""" + curthooks if a global apt config was provided or via the "apt" + standalone command.""" # keeping code close to curtin codebase via entry handler target = None if log is not None: @@ -447,12 +465,14 @@ def handle(name, ocfg, cloud, log, _): LOG = log # feed back converted config, but only work on the subset under 'apt' ocfg = convert_to_v3_apt_format(ocfg) - cfg = ocfg.get('apt', {}) + cfg = ocfg.get("apt", {}) if not isinstance(cfg, dict): raise ValueError( "Expected dictionary for 'apt' config, found {config_type}".format( - config_type=type(cfg))) + config_type=type(cfg) + ) + ) validate_cloudconfig_schema(cfg, schema) apply_debconf_selections(cfg, target) @@ -463,7 +483,7 @@ def _should_configure_on_empty_apt(): # if no config was provided, should apt configuration be done? if util.system_is_snappy(): return False, "system is snappy." - if not (subp.which('apt-get') or subp.which('apt')): + if not (subp.which("apt-get") or subp.which("apt")): return False, "no apt commands." return True, "Apt is available." @@ -478,12 +498,12 @@ def apply_apt(cfg, cloud, target): LOG.debug("handling apt config: %s", cfg) - release = util.lsb_release(target=target)['codename'] + release = util.lsb_release(target=target)["codename"] arch = util.get_dpkg_architecture(target) mirrors = find_apt_mirror_info(cfg, cloud, arch=arch) LOG.debug("Apt Mirror info: %s", mirrors) - if util.is_false(cfg.get('preserve_sources_list', False)): + if util.is_false(cfg.get("preserve_sources_list", False)): add_mirror_keys(cfg, target) generate_sources_list(cfg, release, mirrors, cloud) rename_apt_lists(mirrors, target, arch) @@ -494,25 +514,34 @@ def apply_apt(cfg, cloud, target): LOG.exception("Failed to apply proxy or apt config info:") # Process 'apt_source -> sources {dict}' - if 'sources' in cfg: + if "sources" in cfg: params = mirrors - params['RELEASE'] = release - params['MIRROR'] = mirrors["MIRROR"] + params["RELEASE"] = release + params["MIRROR"] = mirrors["MIRROR"] matcher = None - matchcfg = cfg.get('add_apt_repo_match', ADD_APT_REPO_MATCH) + matchcfg = cfg.get("add_apt_repo_match", ADD_APT_REPO_MATCH) if matchcfg: matcher = re.compile(matchcfg).search - add_apt_sources(cfg['sources'], cloud, target=target, - template_params=params, aa_repo_match=matcher) + add_apt_sources( + cfg["sources"], + cloud, + target=target, + template_params=params, + aa_repo_match=matcher, + ) def debconf_set_selections(selections, target=None): - if not selections.endswith(b'\n'): - selections += b'\n' - subp.subp(['debconf-set-selections'], data=selections, target=target, - capture=True) + if not selections.endswith(b"\n"): + selections += b"\n" + subp.subp( + ["debconf-set-selections"], + data=selections, + target=target, + capture=True, + ) def dpkg_reconfigure(packages, target=None): @@ -532,12 +561,20 @@ def dpkg_reconfigure(packages, target=None): unhandled.append(pkg) if len(unhandled): - LOG.warning("The following packages were installed and preseeded, " - "but cannot be unconfigured: %s", unhandled) + LOG.warning( + "The following packages were installed and preseeded, " + "but cannot be unconfigured: %s", + unhandled, + ) if len(to_config): - subp.subp(['dpkg-reconfigure', '--frontend=noninteractive'] + - list(to_config), data=None, target=target, capture=True) + subp.subp( + ["dpkg-reconfigure", "--frontend=noninteractive"] + + list(to_config), + data=None, + target=target, + capture=True, + ) def apply_debconf_selections(cfg, target=None): @@ -546,13 +583,12 @@ def apply_debconf_selections(cfg, target=None): # set1: | # cloud-init cloud-init/datasources multiselect MAAS # set2: pkg pkg/value string bar - selsets = cfg.get('debconf_selections') + selsets = cfg.get("debconf_selections") if not selsets: LOG.debug("debconf_selections was not set in config") return - selections = '\n'.join( - [selsets[key] for key in sorted(selsets.keys())]) + selections = "\n".join([selsets[key] for key in sorted(selsets.keys())]) debconf_set_selections(selections.encode(), target=target) # get a complete list of packages listed in input @@ -579,7 +615,8 @@ def apply_debconf_selections(cfg, target=None): def clean_cloud_init(target): """clean out any local cloud-init config""" flist = glob.glob( - subp.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*")) + subp.target_path(target, "/etc/cloud/cloud.cfg.d/*dpkg*") + ) LOG.debug("cleaning cloud-init config from: %s", flist) for dpkg_cfg in flist: @@ -588,18 +625,18 @@ def clean_cloud_init(target): def mirrorurl_to_apt_fileprefix(mirror): """mirrorurl_to_apt_fileprefix - Convert a mirror url to the file prefix used by apt on disk to - store cache information for that mirror. - To do so do: - - take off ???:// - - drop tailing / - - convert in string / to _""" + Convert a mirror url to the file prefix used by apt on disk to + store cache information for that mirror. + To do so do: + - take off ???:// + - drop tailing / + - convert in string / to _""" string = mirror if string.endswith("/"): string = string[0:-1] pos = string.find("://") if pos >= 0: - string = string[pos + 3:] + string = string[pos + 3 :] string = string.replace("/", "_") return string @@ -631,8 +668,8 @@ def rename_apt_lists(new_mirrors, target, arch): def mirror_to_placeholder(tmpl, mirror, placeholder): """mirror_to_placeholder - replace the specified mirror in a template with a placeholder string - Checks for existance of the expected mirror and warns if not found""" + replace the specified mirror in a template with a placeholder string + Checks for existance of the expected mirror and warns if not found""" if mirror not in tmpl: LOG.warning("Expected mirror '%s' not found in: %s", mirror, tmpl) return tmpl.replace(mirror, placeholder) @@ -640,13 +677,15 @@ def mirror_to_placeholder(tmpl, mirror, placeholder): def map_known_suites(suite): """there are a few default names which will be auto-extended. - This comes at the inability to use those names literally as suites, - but on the other hand increases readability of the cfg quite a lot""" - mapping = {'updates': '$RELEASE-updates', - 'backports': '$RELEASE-backports', - 'security': '$RELEASE-security', - 'proposed': '$RELEASE-proposed', - 'release': '$RELEASE'} + This comes at the inability to use those names literally as suites, + but on the other hand increases readability of the cfg quite a lot""" + mapping = { + "updates": "$RELEASE-updates", + "backports": "$RELEASE-backports", + "security": "$RELEASE-security", + "proposed": "$RELEASE-proposed", + "release": "$RELEASE", + } try: retsuite = mapping[suite] except KeyError: @@ -656,14 +695,14 @@ def map_known_suites(suite): def disable_suites(disabled, src, release): """reads the config for suites to be disabled and removes those - from the template""" + from the template""" if not disabled: return src retsrc = src for suite in disabled: suite = map_known_suites(suite) - releasesuite = templater.render_string(suite, {'RELEASE': release}) + releasesuite = templater.render_string(suite, {"RELEASE": release}) LOG.debug("Disabling suite %s as %s", suite, releasesuite) newsrc = "" @@ -685,7 +724,7 @@ def disable_suites(disabled, src, release): break if cols[pcol] == releasesuite: - line = '# suite disabled by cloud-init: %s' % line + line = "# suite disabled by cloud-init: %s" % line newsrc += line retsrc = newsrc @@ -694,36 +733,38 @@ def disable_suites(disabled, src, release): def add_mirror_keys(cfg, target): """Adds any keys included in the primary/security mirror clauses""" - for key in ('primary', 'security'): + for key in ("primary", "security"): for mirror in cfg.get(key, []): add_apt_key(mirror, target, file_name=key) def generate_sources_list(cfg, release, mirrors, cloud): """generate_sources_list - create a source.list file based on a custom or default template - by replacing mirrors and release in the template""" + create a source.list file based on a custom or default template + by replacing mirrors and release in the template""" aptsrc = "/etc/apt/sources.list" - params = {'RELEASE': release, 'codename': release} + params = {"RELEASE": release, "codename": release} for k in mirrors: params[k] = mirrors[k] params[k.lower()] = mirrors[k] - tmpl = cfg.get('sources_list', None) + tmpl = cfg.get("sources_list", None) if tmpl is None: LOG.info("No custom template provided, fall back to builtin") - template_fn = cloud.get_template_filename('sources.list.%s' % - (cloud.distro.name)) + template_fn = cloud.get_template_filename( + "sources.list.%s" % (cloud.distro.name) + ) if not template_fn: - template_fn = cloud.get_template_filename('sources.list') + template_fn = cloud.get_template_filename("sources.list") if not template_fn: - LOG.warning("No template found, " - "not rendering /etc/apt/sources.list") + LOG.warning( + "No template found, not rendering /etc/apt/sources.list" + ) return tmpl = util.load_file(template_fn) rendered = templater.render_string(tmpl, params) - disabled = disable_suites(cfg.get('disable_suites'), rendered, release) + disabled = disable_suites(cfg.get("disable_suites"), rendered, release) util.write_file(aptsrc, disabled, mode=0o644) @@ -735,7 +776,7 @@ def add_apt_key_raw(key, file_name, hardened=False, target=None): LOG.debug("Adding key:\n'%s'", key) try: name = pathlib.Path(file_name).stem - return apt_key('add', output_file=name, data=key, hardened=hardened) + return apt_key("add", output_file=name, data=key, hardened=hardened) except subp.ProcessExecutionError: LOG.exception("failed to add apt GPG Key to apt keyring") raise @@ -747,26 +788,26 @@ def add_apt_key(ent, target=None, hardened=False, file_name=None): Supports raw keys or keyid's The latter will as a first step fetched to get the raw key """ - if 'keyid' in ent and 'key' not in ent: + if "keyid" in ent and "key" not in ent: keyserver = DEFAULT_KEYSERVER - if 'keyserver' in ent: - keyserver = ent['keyserver'] + if "keyserver" in ent: + keyserver = ent["keyserver"] - ent['key'] = gpg.getkeybyid(ent['keyid'], keyserver) + ent["key"] = gpg.getkeybyid(ent["keyid"], keyserver) - if 'key' in ent: + if "key" in ent: return add_apt_key_raw( - ent['key'], - file_name or ent['filename'], - hardened=hardened) + ent["key"], file_name or ent["filename"], hardened=hardened + ) def update_packages(cloud): cloud.distro.update_package_sources() -def add_apt_sources(srcdict, cloud, target=None, template_params=None, - aa_repo_match=None): +def add_apt_sources( + srcdict, cloud, target=None, template_params=None, aa_repo_match=None +): """ install keys and repo source .list files defined in 'sources' @@ -795,33 +836,34 @@ def add_apt_sources(srcdict, cloud, target=None, template_params=None, template_params = {} if aa_repo_match is None: - raise ValueError('did not get a valid repo matcher') + raise ValueError("did not get a valid repo matcher") if not isinstance(srcdict, dict): - raise TypeError('unknown apt format: %s' % (srcdict)) + raise TypeError("unknown apt format: %s" % (srcdict)) for filename in srcdict: ent = srcdict[filename] LOG.debug("adding source/key '%s'", ent) - if 'filename' not in ent: - ent['filename'] = filename + if "filename" not in ent: + ent["filename"] = filename - if 'source' in ent and '$KEY_FILE' in ent['source']: + if "source" in ent and "$KEY_FILE" in ent["source"]: key_file = add_apt_key(ent, target, hardened=True) - template_params['KEY_FILE'] = key_file + template_params["KEY_FILE"] = key_file else: key_file = add_apt_key(ent, target) - if 'source' not in ent: + if "source" not in ent: continue - source = ent['source'] + source = ent["source"] source = templater.render_string(source, template_params) - if not ent['filename'].startswith("/"): - ent['filename'] = os.path.join("/etc/apt/sources.list.d/", - ent['filename']) - if not ent['filename'].endswith(".list"): - ent['filename'] += ".list" + if not ent["filename"].startswith("/"): + ent["filename"] = os.path.join( + "/etc/apt/sources.list.d/", ent["filename"] + ) + if not ent["filename"].endswith(".list"): + ent["filename"] += ".list" if aa_repo_match(source): try: @@ -831,7 +873,7 @@ def add_apt_sources(srcdict, cloud, target=None, template_params=None, raise continue - sourcefn = subp.target_path(target, ent['filename']) + sourcefn = subp.target_path(target, ent["filename"]) try: contents = "%s\n" % (source) util.write_file(sourcefn, contents, omode="a") @@ -850,14 +892,14 @@ def convert_v1_to_v2_apt_format(srclist): if isinstance(srclist, list): LOG.debug("apt config: convert V1 to V2 format (source list to dict)") for srcent in srclist: - if 'filename' not in srcent: + if "filename" not in srcent: # file collides for multiple !filename cases for compatibility # yet we need them all processed, so not same dictionary key - srcent['filename'] = "cloud_config_sources.list" + srcent["filename"] = "cloud_config_sources.list" key = util.rand_dict_key(srcdict, "cloud_config_sources.list") else: # all with filename use that as key (matching new format) - key = srcent['filename'] + key = srcent["filename"] srcdict[key] = srcent elif isinstance(srclist, dict): srcdict = srclist @@ -869,7 +911,7 @@ def convert_v1_to_v2_apt_format(srclist): def convert_key(oldcfg, aptcfg, oldkey, newkey): """convert an old key to the new one if the old one exists - returns true if a key was found and converted""" + returns true if a key was found and converted""" if oldcfg.get(oldkey, None) is not None: aptcfg[newkey] = oldcfg.get(oldkey) del oldcfg[oldkey] @@ -879,33 +921,37 @@ def convert_key(oldcfg, aptcfg, oldkey, newkey): def convert_mirror(oldcfg, aptcfg): """convert old apt_mirror keys into the new more advanced mirror spec""" - keymap = [('apt_mirror', 'uri'), - ('apt_mirror_search', 'search'), - ('apt_mirror_search_dns', 'search_dns')] + keymap = [ + ("apt_mirror", "uri"), + ("apt_mirror_search", "search"), + ("apt_mirror_search_dns", "search_dns"), + ] converted = False - newmcfg = {'arches': ['default']} + newmcfg = {"arches": ["default"]} for oldkey, newkey in keymap: if convert_key(oldcfg, newmcfg, oldkey, newkey): converted = True # only insert new style config if anything was converted if converted: - aptcfg['primary'] = [newmcfg] + aptcfg["primary"] = [newmcfg] def convert_v2_to_v3_apt_format(oldcfg): """convert old to new keys and adapt restructured mirror spec""" - mapoldkeys = {'apt_sources': 'sources', - 'apt_mirror': None, - 'apt_mirror_search': None, - 'apt_mirror_search_dns': None, - 'apt_proxy': 'proxy', - 'apt_http_proxy': 'http_proxy', - 'apt_ftp_proxy': 'https_proxy', - 'apt_https_proxy': 'ftp_proxy', - 'apt_preserve_sources_list': 'preserve_sources_list', - 'apt_custom_sources_list': 'sources_list', - 'add_apt_repo_match': 'add_apt_repo_match'} + mapoldkeys = { + "apt_sources": "sources", + "apt_mirror": None, + "apt_mirror_search": None, + "apt_mirror_search_dns": None, + "apt_proxy": "proxy", + "apt_http_proxy": "http_proxy", + "apt_ftp_proxy": "https_proxy", + "apt_https_proxy": "ftp_proxy", + "apt_preserve_sources_list": "preserve_sources_list", + "apt_custom_sources_list": "sources_list", + "add_apt_repo_match": "add_apt_repo_match", + } needtoconvert = [] for oldkey in mapoldkeys: if oldkey in oldcfg: @@ -917,11 +963,13 @@ def convert_v2_to_v3_apt_format(oldcfg): # no old config, so no new one to be created if not needtoconvert: return oldcfg - LOG.debug("apt config: convert V2 to V3 format for keys '%s'", - ", ".join(needtoconvert)) + LOG.debug( + "apt config: convert V2 to V3 format for keys '%s'", + ", ".join(needtoconvert), + ) # if old AND new config are provided, prefer the new one (LP #1616831) - newaptcfg = oldcfg.get('apt', None) + newaptcfg = oldcfg.get("apt", None) if newaptcfg is not None: LOG.debug("apt config: V1/2 and V3 format specified, preferring V3") for oldkey in needtoconvert: @@ -932,10 +980,11 @@ def convert_v2_to_v3_apt_format(oldcfg): # no simple mapping or no collision on this particular key continue if verify != newaptcfg[newkey]: - raise ValueError("Old and New apt format defined with unequal " - "values %s vs %s @ %s" % (verify, - newaptcfg[newkey], - oldkey)) + raise ValueError( + "Old and New apt format defined with unequal " + "values %s vs %s @ %s" + % (verify, newaptcfg[newkey], oldkey) + ) # return conf after clearing conflicting V1/2 keys return oldcfg @@ -955,17 +1004,17 @@ def convert_v2_to_v3_apt_format(oldcfg): raise ValueError("old apt key '%s' left after conversion" % oldkey) # insert new format into config and return full cfg with only v3 content - oldcfg['apt'] = aptcfg + oldcfg["apt"] = aptcfg return oldcfg def convert_to_v3_apt_format(cfg): """convert the old list based format to the new dict based one. After that - convert the old dict keys/format to v3 a.k.a 'new apt config'""" + convert the old dict keys/format to v3 a.k.a 'new apt config'""" # V1 -> V2, the apt_sources entry from list to dict - apt_sources = cfg.get('apt_sources', None) + apt_sources = cfg.get("apt_sources", None) if apt_sources is not None: - cfg['apt_sources'] = convert_v1_to_v2_apt_format(apt_sources) + cfg["apt_sources"] = convert_v1_to_v2_apt_format(apt_sources) # V2 -> V3, move all former globals under the "apt" key # Restructure into new key names and mirror hierarchy @@ -997,7 +1046,12 @@ def search_for_mirror_dns(configured, mirrortype, cfg, cloud): if mydom: doms.append(".%s" % mydom) - doms.extend((".localdomain", "",)) + doms.extend( + ( + ".localdomain", + "", + ) + ) mirror_list = [] distro = cloud.distro.name @@ -1012,12 +1066,11 @@ def search_for_mirror_dns(configured, mirrortype, cfg, cloud): def update_mirror_info(pmirror, smirror, arch, cloud): """sets security mirror to primary if not defined. - returns defaults if no mirrors are defined""" + returns defaults if no mirrors are defined""" if pmirror is not None: if smirror is None: smirror = pmirror - return {'PRIMARY': pmirror, - 'SECURITY': smirror} + return {"PRIMARY": pmirror, "SECURITY": smirror} # None specified at all, get default mirrors from cloud mirror_info = cloud.datasource.get_package_mirror_info() @@ -1026,8 +1079,8 @@ def update_mirror_info(pmirror, smirror, arch, cloud): # arbitrary key/value pairs including 'primary' and 'security' keys. # caller expects dict with PRIMARY and SECURITY. m = mirror_info.copy() - m['PRIMARY'] = m['primary'] - m['SECURITY'] = m['security'] + m["PRIMARY"] = m["primary"] + m["SECURITY"] = m["security"] return m @@ -1037,7 +1090,7 @@ def update_mirror_info(pmirror, smirror, arch, cloud): def get_arch_mirrorconfig(cfg, mirrortype, arch): """out of a list of potential mirror configurations select - and return the one matching the architecture (or default)""" + and return the one matching the architecture (or default)""" # select the mirror specification (if-any) mirror_cfg_list = cfg.get(mirrortype, None) if mirror_cfg_list is None: @@ -1056,8 +1109,8 @@ def get_arch_mirrorconfig(cfg, mirrortype, arch): def get_mirror(cfg, mirrortype, arch, cloud): """pass the three potential stages of mirror specification - returns None is neither of them found anything otherwise the first - hit is returned""" + returns None is neither of them found anything otherwise the first + hit is returned""" mcfg = get_arch_mirrorconfig(cfg, mirrortype, arch) if mcfg is None: return None @@ -1073,18 +1126,19 @@ def get_mirror(cfg, mirrortype, arch, cloud): # fallback to search_dns if specified if mirror is None: # list of mirrors to try to resolve - mirror = search_for_mirror_dns(mcfg.get("search_dns", None), - mirrortype, cfg, cloud) + mirror = search_for_mirror_dns( + mcfg.get("search_dns", None), mirrortype, cfg, cloud + ) return mirror def find_apt_mirror_info(cfg, cloud, arch=None): """find_apt_mirror_info - find an apt_mirror given the cfg provided. - It can check for separate config of primary and security mirrors - If only primary is given security is assumed to be equal to primary - If the generic apt_mirror is given that is defining for both + find an apt_mirror given the cfg provided. + It can check for separate config of primary and security mirrors + If only primary is given security is assumed to be equal to primary + If the generic apt_mirror is given that is defining for both """ if arch is None: @@ -1105,32 +1159,35 @@ def find_apt_mirror_info(cfg, cloud, arch=None): def apply_apt_config(cfg, proxy_fname, config_fname): """apply_apt_config - Applies any apt*proxy config from if specified + Applies any apt*proxy config from if specified """ # Set up any apt proxy - cfgs = (('proxy', 'Acquire::http::Proxy "%s";'), - ('http_proxy', 'Acquire::http::Proxy "%s";'), - ('ftp_proxy', 'Acquire::ftp::Proxy "%s";'), - ('https_proxy', 'Acquire::https::Proxy "%s";')) + cfgs = ( + ("proxy", 'Acquire::http::Proxy "%s";'), + ("http_proxy", 'Acquire::http::Proxy "%s";'), + ("ftp_proxy", 'Acquire::ftp::Proxy "%s";'), + ("https_proxy", 'Acquire::https::Proxy "%s";'), + ) proxies = [fmt % cfg.get(name) for (name, fmt) in cfgs if cfg.get(name)] if len(proxies): LOG.debug("write apt proxy info to %s", proxy_fname) - util.write_file(proxy_fname, '\n'.join(proxies) + '\n') + util.write_file(proxy_fname, "\n".join(proxies) + "\n") elif os.path.isfile(proxy_fname): util.del_file(proxy_fname) LOG.debug("no apt proxy configured, removed %s", proxy_fname) - if cfg.get('conf', None): + if cfg.get("conf", None): LOG.debug("write apt config info to %s", config_fname) - util.write_file(config_fname, cfg.get('conf')) + util.write_file(config_fname, cfg.get("conf")) elif os.path.isfile(config_fname): util.del_file(config_fname) LOG.debug("no apt config configured, removed %s", config_fname) -def apt_key(command, output_file=None, data=None, hardened=False, - human_output=True): +def apt_key( + command, output_file=None, data=None, hardened=False, human_output=True +): """apt-key replacement commands implemented: 'add', 'list', 'finger' @@ -1153,32 +1210,36 @@ def apt_key(command, output_file=None, data=None, hardened=False, key_files = [APT_LOCAL_KEYS] if os.path.isfile(APT_LOCAL_KEYS) else [] for file in os.listdir(APT_TRUSTED_GPG_DIR): - if file.endswith('.gpg') or file.endswith('.asc'): + if file.endswith(".gpg") or file.endswith(".asc"): key_files.append(APT_TRUSTED_GPG_DIR + file) - return key_files if key_files else '' + return key_files if key_files else "" def apt_key_add(): """apt-key add returns filepath to new keyring, or '/dev/null' when an error occurs """ - file_name = '/dev/null' + file_name = "/dev/null" if not output_file: util.logexc( - LOG, 'Unknown filename, failed to add key: "{}"'.format(data)) + LOG, 'Unknown filename, failed to add key: "{}"'.format(data) + ) else: try: - key_dir = \ + key_dir = ( CLOUD_INIT_GPG_DIR if hardened else APT_TRUSTED_GPG_DIR + ) stdout = gpg.dearmor(data) - file_name = '{}{}.gpg'.format(key_dir, output_file) + file_name = "{}{}.gpg".format(key_dir, output_file) util.write_file(file_name, stdout) except subp.ProcessExecutionError: - util.logexc(LOG, 'Gpg error, failed to add key: {}'.format( - data)) + util.logexc( + LOG, "Gpg error, failed to add key: {}".format(data) + ) except UnicodeDecodeError: - util.logexc(LOG, 'Decode error, failed to add key: {}'.format( - data)) + util.logexc( + LOG, "Decode error, failed to add key: {}".format(data) + ) return file_name def apt_key_list(): @@ -1193,19 +1254,20 @@ def apt_key(command, output_file=None, data=None, hardened=False, key_list.append(gpg.list(key_file, human_output=human_output)) except subp.ProcessExecutionError as error: LOG.warning('Failed to list key "%s": %s', key_file, error) - return '\n'.join(key_list) + return "\n".join(key_list) - if command == 'add': + if command == "add": return apt_key_add() - elif command == 'finger' or command == 'list': + elif command == "finger" or command == "list": return apt_key_list() else: raise ValueError( - 'apt_key() commands add, list, and finger are currently supported') + "apt_key() commands add, list, and finger are currently supported" + ) CONFIG_CLEANERS = { - 'cloud-init': clean_cloud_init, + "cloud-init": clean_cloud_init, } # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py index aa186ce2..569849d1 100644 --- a/cloudinit/config/cc_apt_pipelining.py +++ b/cloudinit/config/cc_apt_pipelining.py @@ -29,17 +29,19 @@ not recommended. apt_pipelining: """ -from cloudinit.settings import PER_INSTANCE from cloudinit import util +from cloudinit.settings import PER_INSTANCE frequency = PER_INSTANCE -distros = ['ubuntu', 'debian'] +distros = ["ubuntu", "debian"] DEFAULT_FILE = "/etc/apt/apt.conf.d/90cloud-init-pipelining" -APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n" - 'Acquire::http::Pipeline-Depth "%s";\n') +APT_PIPE_TPL = ( + "//Written by cloud-init per 'apt_pipelining'\n" + 'Acquire::http::Pipeline-Depth "%s";\n' +) # Acquire::http::Pipeline-Depth can be a value # from 0 to 5 indicating how many outstanding requests APT should send. @@ -49,7 +51,7 @@ APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n" def handle(_name, cfg, _cloud, log, _args): - apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", 'os') + apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", "os") apt_pipe_value_s = str(apt_pipe_value).lower().strip() if apt_pipe_value_s == "false": @@ -69,4 +71,5 @@ def write_apt_snippet(setting, log, f_name): util.write_file(f_name, file_contents) log.debug("Wrote %s with apt pipeline depth setting %s", f_name, setting) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py index 06f7a26e..bff11a24 100644 --- a/cloudinit/config/cc_bootcmd.py +++ b/cloudinit/config/cc_bootcmd.py @@ -12,11 +12,9 @@ import os from textwrap import dedent +from cloudinit import subp, temp_utils, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_ALWAYS -from cloudinit import temp_utils -from cloudinit import subp -from cloudinit import util frequency = PER_ALWAYS @@ -26,13 +24,14 @@ frequency = PER_ALWAYS # configuration options before actually attempting to deploy with said # configuration. -distros = ['all'] +distros = ["all"] meta = { - 'id': 'cc_bootcmd', - 'name': 'Bootcmd', - 'title': 'Run arbitrary commands early in the boot process', - 'description': dedent("""\ + "id": "cc_bootcmd", + "name": "Bootcmd", + "title": "Run arbitrary commands early in the boot process", + "description": dedent( + """\ This module runs arbitrary commands very early in the boot process, only slightly after a boothook would run. This is very similar to a boothook, but more user friendly. The environment variable @@ -48,31 +47,37 @@ meta = { when writing files, do not use /tmp dir as it races with systemd-tmpfiles-clean LP: #1707222. Use /run/somedir instead. - """), - 'distros': distros, - 'examples': [dedent("""\ + """ + ), + "distros": distros, + "examples": [ + dedent( + """\ bootcmd: - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ] - """)], - 'frequency': PER_ALWAYS, + """ + ) + ], + "frequency": PER_ALWAYS, } schema = { - 'type': 'object', - 'properties': { - 'bootcmd': { - 'type': 'array', - 'items': { - 'oneOf': [ - {'type': 'array', 'items': {'type': 'string'}}, - {'type': 'string'}] + "type": "object", + "properties": { + "bootcmd": { + "type": "array", + "items": { + "oneOf": [ + {"type": "array", "items": {"type": "string"}}, + {"type": "string"}, + ] }, - 'additionalItems': False, # Reject items of non-string non-list - 'additionalProperties': False, - 'minItems': 1, + "additionalItems": False, # Reject items of non-string non-list + "additionalProperties": False, + "minItems": 1, } - } + }, } __doc__ = get_meta_doc(meta, schema) # Supplement python help() @@ -81,8 +86,9 @@ __doc__ = get_meta_doc(meta, schema) # Supplement python help() def handle(name, cfg, cloud, log, _args): if "bootcmd" not in cfg: - log.debug(("Skipping module named %s," - " no 'bootcmd' key in configuration"), name) + log.debug( + "Skipping module named %s, no 'bootcmd' key in configuration", name + ) return validate_cloudconfig_schema(cfg, schema) @@ -99,11 +105,12 @@ def handle(name, cfg, cloud, log, _args): env = os.environ.copy() iid = cloud.get_instance_id() if iid: - env['INSTANCE_ID'] = str(iid) - cmd = ['/bin/sh', tmpf.name] + env["INSTANCE_ID"] = str(iid) + cmd = ["/bin/sh", tmpf.name] subp.subp(cmd, env=env, capture=False) except Exception: util.logexc(log, "Failed to run bootcmd module %s", name) raise + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py index 9fdaeba1..53b6d0c8 100755 --- a/cloudinit/config/cc_byobu.py +++ b/cloudinit/config/cc_byobu.py @@ -38,11 +38,10 @@ Valid configuration options for this module are: byobu_by_default: """ +from cloudinit import subp, util from cloudinit.distros import ug_util -from cloudinit import subp -from cloudinit import util -distros = ['ubuntu', 'debian'] +distros = ["ubuntu", "debian"] def handle(name, cfg, cloud, log, args): @@ -58,8 +57,14 @@ def handle(name, cfg, cloud, log, args): if value == "user" or value == "system": value = "enable-%s" % value - valid = ("enable-user", "enable-system", "enable", - "disable-user", "disable-system", "disable") + valid = ( + "enable-user", + "enable-system", + "enable", + "disable-user", + "disable-system", + "disable", + ) if value not in valid: log.warning("Unknown value %s for byobu_by_default", value) @@ -81,13 +86,16 @@ def handle(name, cfg, cloud, log, args): (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) (user, _user_config) = ug_util.extract_default(users) if not user: - log.warning(("No default byobu user provided, " - "can not launch %s for the default user"), bl_inst) + log.warning( + "No default byobu user provided, " + "can not launch %s for the default user", + bl_inst, + ) else: - shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst) + shcmd += ' sudo -Hu "%s" byobu-launcher-%s' % (user, bl_inst) shcmd += " || X=$(($X+1)); " if mod_sys: - shcmd += "echo \"%s\" | debconf-set-selections" % dc_val + shcmd += 'echo "%s" | debconf-set-selections' % dc_val shcmd += " && dpkg-reconfigure byobu --frontend=noninteractive" shcmd += " || X=$(($X+1)); " @@ -96,4 +104,5 @@ def handle(name, cfg, cloud, log, args): log.debug("Setting byobu to %s", value) subp.subp(cmd, capture=False) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py index bd7bead9..9de065ab 100644 --- a/cloudinit/config/cc_ca_certs.py +++ b/cloudinit/config/cc_ca_certs.py @@ -41,28 +41,27 @@ can be removed from the system with the configuration option import os -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util DEFAULT_CONFIG = { - 'ca_cert_path': '/usr/share/ca-certificates/', - 'ca_cert_filename': 'cloud-init-ca-certs.crt', - 'ca_cert_config': '/etc/ca-certificates.conf', - 'ca_cert_system_path': '/etc/ssl/certs/', - 'ca_cert_update_cmd': ['update-ca-certificates'] + "ca_cert_path": "/usr/share/ca-certificates/", + "ca_cert_filename": "cloud-init-ca-certs.crt", + "ca_cert_config": "/etc/ca-certificates.conf", + "ca_cert_system_path": "/etc/ssl/certs/", + "ca_cert_update_cmd": ["update-ca-certificates"], } DISTRO_OVERRIDES = { - 'rhel': { - 'ca_cert_path': '/usr/share/pki/ca-trust-source/', - 'ca_cert_filename': 'anchors/cloud-init-ca-certs.crt', - 'ca_cert_config': None, - 'ca_cert_system_path': '/etc/pki/ca-trust/', - 'ca_cert_update_cmd': ['update-ca-trust'] + "rhel": { + "ca_cert_path": "/usr/share/pki/ca-trust-source/", + "ca_cert_filename": "anchors/cloud-init-ca-certs.crt", + "ca_cert_config": None, + "ca_cert_system_path": "/etc/pki/ca-trust/", + "ca_cert_update_cmd": ["update-ca-trust"], } } -distros = ['alpine', 'debian', 'ubuntu', 'rhel'] +distros = ["alpine", "debian", "ubuntu", "rhel"] def _distro_ca_certs_configs(distro_name): @@ -72,8 +71,9 @@ def _distro_ca_certs_configs(distro_name): @returns: Dict of distro configurations for ca-cert. """ cfg = DISTRO_OVERRIDES.get(distro_name, DEFAULT_CONFIG) - cfg['ca_cert_full_path'] = os.path.join(cfg['ca_cert_path'], - cfg['ca_cert_filename']) + cfg["ca_cert_full_path"] = os.path.join( + cfg["ca_cert_path"], cfg["ca_cert_filename"] + ) return cfg @@ -83,7 +83,7 @@ def update_ca_certs(distro_cfg): @param distro_cfg: A hash providing _distro_ca_certs_configs function. """ - subp.subp(distro_cfg['ca_cert_update_cmd'], capture=False) + subp.subp(distro_cfg["ca_cert_update_cmd"], capture=False) def add_ca_certs(distro_cfg, certs): @@ -98,9 +98,9 @@ def add_ca_certs(distro_cfg, certs): return # First ensure they are strings... cert_file_contents = "\n".join([str(c) for c in certs]) - util.write_file(distro_cfg['ca_cert_full_path'], - cert_file_contents, - mode=0o644) + util.write_file( + distro_cfg["ca_cert_full_path"], cert_file_contents, mode=0o644 + ) update_cert_config(distro_cfg) @@ -110,23 +110,27 @@ def update_cert_config(distro_cfg): @param distro_cfg: A hash providing _distro_ca_certs_configs function. """ - if distro_cfg['ca_cert_config'] is None: + if distro_cfg["ca_cert_config"] is None: return - if os.stat(distro_cfg['ca_cert_config']).st_size == 0: + if os.stat(distro_cfg["ca_cert_config"]).st_size == 0: # If the CA_CERT_CONFIG file is empty (i.e. all existing # CA certs have been deleted) then simply output a single # line with the cloud-init cert filename. - out = "%s\n" % distro_cfg['ca_cert_filename'] + out = "%s\n" % distro_cfg["ca_cert_filename"] else: # Append cert filename to CA_CERT_CONFIG file. # We have to strip the content because blank lines in the file # causes subsequent entries to be ignored. (LP: #1077020) - orig = util.load_file(distro_cfg['ca_cert_config']) - cr_cont = '\n'.join([line for line in orig.splitlines() - if line != distro_cfg['ca_cert_filename']]) - out = "%s\n%s\n" % (cr_cont.rstrip(), - distro_cfg['ca_cert_filename']) - util.write_file(distro_cfg['ca_cert_config'], out, omode="wb") + orig = util.load_file(distro_cfg["ca_cert_config"]) + cr_cont = "\n".join( + [ + line + for line in orig.splitlines() + if line != distro_cfg["ca_cert_filename"] + ] + ) + out = "%s\n%s\n" % (cr_cont.rstrip(), distro_cfg["ca_cert_filename"]) + util.write_file(distro_cfg["ca_cert_config"], out, omode="wb") def remove_default_ca_certs(distro_name, distro_cfg): @@ -137,14 +141,15 @@ def remove_default_ca_certs(distro_name, distro_cfg): @param distro_name: String providing the distro class name. @param distro_cfg: A hash providing _distro_ca_certs_configs function. """ - util.delete_dir_contents(distro_cfg['ca_cert_path']) - util.delete_dir_contents(distro_cfg['ca_cert_system_path']) - util.write_file(distro_cfg['ca_cert_config'], "", mode=0o644) + util.delete_dir_contents(distro_cfg["ca_cert_path"]) + util.delete_dir_contents(distro_cfg["ca_cert_system_path"]) + util.write_file(distro_cfg["ca_cert_config"], "", mode=0o644) - if distro_name in ['debian', 'ubuntu']: + if distro_name in ["debian", "ubuntu"]: debconf_sel = ( - "ca-certificates ca-certificates/trust_new_crts " + "select no") - subp.subp(('debconf-set-selections', '-'), debconf_sel) + "ca-certificates ca-certificates/trust_new_crts " + "select no" + ) + subp.subp(("debconf-set-selections", "-"), debconf_sel) def handle(name, cfg, cloud, log, _args): @@ -159,11 +164,13 @@ def handle(name, cfg, cloud, log, _args): """ # If there isn't a ca-certs section in the configuration don't do anything if "ca-certs" not in cfg: - log.debug(("Skipping module named %s," - " no 'ca-certs' key in configuration"), name) + log.debug( + "Skipping module named %s, no 'ca-certs' key in configuration", + name, + ) return - ca_cert_cfg = cfg['ca-certs'] + ca_cert_cfg = cfg["ca-certs"] distro_cfg = _distro_ca_certs_configs(cloud.distro.name) # If there is a remove-defaults option set to true, remove the system @@ -183,4 +190,5 @@ def handle(name, cfg, cloud, log, _args): log.debug("Updating certificates") update_ca_certs(distro_cfg) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index ed734d1c..67889683 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -13,87 +13,91 @@ import json import os from textwrap import dedent -from cloudinit import subp +from cloudinit import subp, temp_utils, templater, url_helper, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema -from cloudinit import templater -from cloudinit import temp_utils -from cloudinit import url_helper -from cloudinit import util from cloudinit.settings import PER_ALWAYS - RUBY_VERSION_DEFAULT = "1.8" -CHEF_DIRS = tuple([ - '/etc/chef', - '/var/log/chef', - '/var/lib/chef', - '/var/cache/chef', - '/var/backups/chef', - '/var/run/chef', -]) -REQUIRED_CHEF_DIRS = tuple([ - '/etc/chef', -]) +CHEF_DIRS = tuple( + [ + "/etc/chef", + "/var/log/chef", + "/var/lib/chef", + "/var/cache/chef", + "/var/backups/chef", + "/var/run/chef", + ] +) +REQUIRED_CHEF_DIRS = tuple( + [ + "/etc/chef", + ] +) # Used if fetching chef from a omnibus style package OMNIBUS_URL = "https://www.chef.io/chef/install.sh" OMNIBUS_URL_RETRIES = 5 -CHEF_VALIDATION_PEM_PATH = '/etc/chef/validation.pem' -CHEF_ENCRYPTED_DATA_BAG_PATH = '/etc/chef/encrypted_data_bag_secret' -CHEF_ENVIRONMENT = '_default' -CHEF_FB_PATH = '/etc/chef/firstboot.json' +CHEF_VALIDATION_PEM_PATH = "/etc/chef/validation.pem" +CHEF_ENCRYPTED_DATA_BAG_PATH = "/etc/chef/encrypted_data_bag_secret" +CHEF_ENVIRONMENT = "_default" +CHEF_FB_PATH = "/etc/chef/firstboot.json" CHEF_RB_TPL_DEFAULTS = { # These are ruby symbols... - 'ssl_verify_mode': ':verify_none', - 'log_level': ':info', + "ssl_verify_mode": ":verify_none", + "log_level": ":info", # These are not symbols... - 'log_location': '/var/log/chef/client.log', - 'validation_key': CHEF_VALIDATION_PEM_PATH, - 'validation_cert': None, - 'client_key': '/etc/chef/client.pem', - 'json_attribs': CHEF_FB_PATH, - 'file_cache_path': '/var/cache/chef', - 'file_backup_path': '/var/backups/chef', - 'pid_file': '/var/run/chef/client.pid', - 'show_time': True, - 'encrypted_data_bag_secret': None, + "log_location": "/var/log/chef/client.log", + "validation_key": CHEF_VALIDATION_PEM_PATH, + "validation_cert": None, + "client_key": "/etc/chef/client.pem", + "json_attribs": CHEF_FB_PATH, + "file_cache_path": "/var/cache/chef", + "file_backup_path": "/var/backups/chef", + "pid_file": "/var/run/chef/client.pid", + "show_time": True, + "encrypted_data_bag_secret": None, } -CHEF_RB_TPL_BOOL_KEYS = frozenset(['show_time']) -CHEF_RB_TPL_PATH_KEYS = frozenset([ - 'log_location', - 'validation_key', - 'client_key', - 'file_cache_path', - 'json_attribs', - 'pid_file', - 'encrypted_data_bag_secret', -]) +CHEF_RB_TPL_BOOL_KEYS = frozenset(["show_time"]) +CHEF_RB_TPL_PATH_KEYS = frozenset( + [ + "log_location", + "validation_key", + "client_key", + "file_cache_path", + "json_attribs", + "pid_file", + "encrypted_data_bag_secret", + ] +) CHEF_RB_TPL_KEYS = list(CHEF_RB_TPL_DEFAULTS.keys()) CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_BOOL_KEYS) CHEF_RB_TPL_KEYS.extend(CHEF_RB_TPL_PATH_KEYS) -CHEF_RB_TPL_KEYS.extend([ - 'server_url', - 'node_name', - 'environment', - 'validation_name', - 'chef_license', -]) +CHEF_RB_TPL_KEYS.extend( + [ + "server_url", + "node_name", + "environment", + "validation_name", + "chef_license", + ] +) CHEF_RB_TPL_KEYS = frozenset(CHEF_RB_TPL_KEYS) -CHEF_RB_PATH = '/etc/chef/client.rb' -CHEF_EXEC_PATH = '/usr/bin/chef-client' -CHEF_EXEC_DEF_ARGS = tuple(['-d', '-i', '1800', '-s', '20']) +CHEF_RB_PATH = "/etc/chef/client.rb" +CHEF_EXEC_PATH = "/usr/bin/chef-client" +CHEF_EXEC_DEF_ARGS = tuple(["-d", "-i", "1800", "-s", "20"]) frequency = PER_ALWAYS distros = ["all"] meta = { - 'id': 'cc_chef', - 'name': 'Chef', - 'title': 'module that configures, starts and installs chef', - 'description': dedent("""\ + "id": "cc_chef", + "name": "Chef", + "title": "module that configures, starts and installs chef", + "description": dedent( + """\ This module enables chef to be installed (from packages, gems, or from omnibus). Before this occurs, chef configuration is written to disk (validation.pem, client.pem, firstboot.json, @@ -101,9 +105,12 @@ meta = { /var/log/chef and so-on). If configured, chef will be installed and started in either daemon or non-daemon mode. If run in non-daemon mode, post run actions are executed to do - finishing activities such as removing validation.pem."""), - 'distros': distros, - 'examples': [dedent(""" + finishing activities such as removing validation.pem.""" + ), + "distros": distros, + "examples": [ + dedent( + """ chef: directories: - /etc/chef @@ -124,180 +131,237 @@ meta = { omnibus_url_retries: 2 server_url: https://chef.yourorg.com:4000 ssl_verify_mode: :verify_peer - validation_name: yourorg-validator""")], - 'frequency': frequency, + validation_name: yourorg-validator""" + ) + ], + "frequency": frequency, } schema = { - 'type': 'object', - 'properties': { - 'chef': { - 'type': 'object', - 'additionalProperties': False, - 'properties': { - 'directories': { - 'type': 'array', - 'items': { - 'type': 'string' - }, - 'uniqueItems': True, - 'description': dedent("""\ + "type": "object", + "properties": { + "chef": { + "type": "object", + "additionalProperties": False, + "properties": { + "directories": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": True, + "description": dedent( + """\ Create the necessary directories for chef to run. By default, it creates the following directories: - {chef_dirs}""").format( + {chef_dirs}""" + ).format( chef_dirs="\n".join( [" - ``{}``".format(d) for d in CHEF_DIRS] ) - ) + ), }, - 'validation_cert': { - 'type': 'string', - 'description': dedent("""\ + "validation_cert": { + "type": "string", + "description": dedent( + """\ Optional string to be written to file validation_key. Special value ``system`` means set use existing file. - """) + """ + ), }, - 'validation_key': { - 'type': 'string', - 'default': CHEF_VALIDATION_PEM_PATH, - 'description': dedent("""\ + "validation_key": { + "type": "string", + "default": CHEF_VALIDATION_PEM_PATH, + "description": dedent( + """\ Optional path for validation_cert. default to - ``{}``.""".format(CHEF_VALIDATION_PEM_PATH)) + ``{}``.""".format( + CHEF_VALIDATION_PEM_PATH + ) + ), }, - 'firstboot_path': { - 'type': 'string', - 'default': CHEF_FB_PATH, - 'description': dedent("""\ + "firstboot_path": { + "type": "string", + "default": CHEF_FB_PATH, + "description": dedent( + """\ Path to write run_list and initial_attributes keys that should also be present in this configuration, defaults - to ``{}``.""".format(CHEF_FB_PATH)) + to ``{}``.""".format( + CHEF_FB_PATH + ) + ), }, - 'exec': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "exec": { + "type": "boolean", + "default": False, + "description": dedent( + """\ define if we should run or not run chef (defaults to false, unless a gem installed is requested where this - will then default to true).""") + will then default to true).""" + ), }, - 'client_key': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['client_key'], - 'description': dedent("""\ + "client_key": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["client_key"], + "description": dedent( + """\ Optional path for client_cert. default to - ``{}``.""".format(CHEF_RB_TPL_DEFAULTS['client_key'])) + ``{}``.""".format( + CHEF_RB_TPL_DEFAULTS["client_key"] + ) + ), }, - 'encrypted_data_bag_secret': { - 'type': 'string', - 'default': None, - 'description': dedent("""\ + "encrypted_data_bag_secret": { + "type": "string", + "default": None, + "description": dedent( + """\ Specifies the location of the secret key used by chef to encrypt data items. By default, this path is set to None, meaning that chef will have to look at the path ``{}`` for it. - """.format(CHEF_ENCRYPTED_DATA_BAG_PATH)) + """.format( + CHEF_ENCRYPTED_DATA_BAG_PATH + ) + ), }, - 'environment': { - 'type': 'string', - 'default': CHEF_ENVIRONMENT, - 'description': dedent("""\ + "environment": { + "type": "string", + "default": CHEF_ENVIRONMENT, + "description": dedent( + """\ Specifies which environment chef will use. By default, it will use the ``{}`` configuration. - """.format(CHEF_ENVIRONMENT)) + """.format( + CHEF_ENVIRONMENT + ) + ), }, - 'file_backup_path': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['file_backup_path'], - 'description': dedent("""\ + "file_backup_path": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["file_backup_path"], + "description": dedent( + """\ Specifies the location in which backup files are stored. By default, it uses the ``{}`` location.""".format( - CHEF_RB_TPL_DEFAULTS['file_backup_path'])) + CHEF_RB_TPL_DEFAULTS["file_backup_path"] + ) + ), }, - 'file_cache_path': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['file_cache_path'], - 'description': dedent("""\ + "file_cache_path": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["file_cache_path"], + "description": dedent( + """\ Specifies the location in which chef cache files will be saved. By default, it uses the ``{}`` location.""".format( - CHEF_RB_TPL_DEFAULTS['file_cache_path'])) + CHEF_RB_TPL_DEFAULTS["file_cache_path"] + ) + ), }, - 'json_attribs': { - 'type': 'string', - 'default': CHEF_FB_PATH, - 'description': dedent("""\ + "json_attribs": { + "type": "string", + "default": CHEF_FB_PATH, + "description": dedent( + """\ Specifies the location in which some chef json data is stored. By default, it uses the - ``{}`` location.""".format(CHEF_FB_PATH)) + ``{}`` location.""".format( + CHEF_FB_PATH + ) + ), }, - 'log_level': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['log_level'], - 'description': dedent("""\ + "log_level": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["log_level"], + "description": dedent( + """\ Defines the level of logging to be stored in the log file. By default this value is set to ``{}``. - """.format(CHEF_RB_TPL_DEFAULTS['log_level'])) + """.format( + CHEF_RB_TPL_DEFAULTS["log_level"] + ) + ), }, - 'log_location': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['log_location'], - 'description': dedent("""\ + "log_location": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["log_location"], + "description": dedent( + """\ Specifies the location of the chef lof file. By default, the location is specified at ``{}``.""".format( - CHEF_RB_TPL_DEFAULTS['log_location'])) + CHEF_RB_TPL_DEFAULTS["log_location"] + ) + ), }, - 'node_name': { - 'type': 'string', - 'description': dedent("""\ + "node_name": { + "type": "string", + "description": dedent( + """\ The name of the node to run. By default, we will - use th instance id as the node name.""") + use th instance id as the node name.""" + ), }, - 'omnibus_url': { - 'type': 'string', - 'default': OMNIBUS_URL, - 'description': dedent("""\ + "omnibus_url": { + "type": "string", + "default": OMNIBUS_URL, + "description": dedent( + """\ Omnibus URL if chef should be installed through Omnibus. By default, it uses the - ``{}``.""".format(OMNIBUS_URL)) + ``{}``.""".format( + OMNIBUS_URL + ) + ), }, - 'omnibus_url_retries': { - 'type': 'integer', - 'default': OMNIBUS_URL_RETRIES, - 'description': dedent("""\ + "omnibus_url_retries": { + "type": "integer", + "default": OMNIBUS_URL_RETRIES, + "description": dedent( + """\ The number of retries that will be attempted to reach - the Omnibus URL""") + the Omnibus URL""" + ), }, - 'omnibus_version': { - 'type': 'string', - 'description': dedent("""\ + "omnibus_version": { + "type": "string", + "description": dedent( + """\ Optional version string to require for omnibus - install.""") + install.""" + ), }, - 'pid_file': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['pid_file'], - 'description': dedent("""\ + "pid_file": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["pid_file"], + "description": dedent( + """\ The location in which a process identification number (pid) is saved. By default, it saves in the ``{}`` location.""".format( - CHEF_RB_TPL_DEFAULTS['pid_file'])) + CHEF_RB_TPL_DEFAULTS["pid_file"] + ) + ), }, - 'server_url': { - 'type': 'string', - 'description': 'The URL for the chef server' + "server_url": { + "type": "string", + "description": "The URL for the chef server", }, - 'show_time': { - 'type': 'boolean', - 'default': True, - 'description': 'Show time in chef logs' + "show_time": { + "type": "boolean", + "default": True, + "description": "Show time in chef logs", }, - 'ssl_verify_mode': { - 'type': 'string', - 'default': CHEF_RB_TPL_DEFAULTS['ssl_verify_mode'], - 'description': dedent("""\ + "ssl_verify_mode": { + "type": "string", + "default": CHEF_RB_TPL_DEFAULTS["ssl_verify_mode"], + "description": dedent( + """\ Set the verify mode for HTTPS requests. We can have two possible values for this parameter: @@ -306,67 +370,76 @@ schema = { - ``:verify_peer``: Validate all SSL certificates. By default, the parameter is set as ``{}``. - """.format(CHEF_RB_TPL_DEFAULTS['ssl_verify_mode'])) + """.format( + CHEF_RB_TPL_DEFAULTS["ssl_verify_mode"] + ) + ), }, - 'validation_name': { - 'type': 'string', - 'description': dedent("""\ + "validation_name": { + "type": "string", + "description": dedent( + """\ The name of the chef-validator key that Chef Infra Client uses to access the Chef Infra Server during - the initial Chef Infra Client run.""") + the initial Chef Infra Client run.""" + ), }, - 'force_install': { - 'type': 'boolean', - 'default': False, - 'description': dedent("""\ + "force_install": { + "type": "boolean", + "default": False, + "description": dedent( + """\ If set to ``True``, forces chef installation, even - if it is already installed.""") + if it is already installed.""" + ), }, - 'initial_attributes': { - 'type': 'object', - 'items': { - 'type': 'string' - }, - 'description': dedent("""\ + "initial_attributes": { + "type": "object", + "items": {"type": "string"}, + "description": dedent( + """\ Specify a list of initial attributes used by the - cookbooks.""") + cookbooks.""" + ), }, - 'install_type': { - 'type': 'string', - 'default': 'packages', - 'description': dedent("""\ + "install_type": { + "type": "string", + "default": "packages", + "description": dedent( + """\ The type of installation for chef. It can be one of the following values: - ``packages`` - ``gems`` - - ``omnibus``""") + - ``omnibus``""" + ), }, - 'run_list': { - 'type': 'array', - 'items': { - 'type': 'string' - }, - 'description': 'A run list for a first boot json.' + "run_list": { + "type": "array", + "items": {"type": "string"}, + "description": "A run list for a first boot json.", }, "chef_license": { - 'type': 'string', - 'description': dedent("""\ + "type": "string", + "description": dedent( + """\ string that indicates if user accepts or not license - related to some of chef products""") - } - } + related to some of chef products""" + ), + }, + }, } - } + }, } __doc__ = get_meta_doc(meta, schema) def post_run_chef(chef_cfg, log): - delete_pem = util.get_cfg_option_bool(chef_cfg, - 'delete_validation_post_exec', - default=False) + delete_pem = util.get_cfg_option_bool( + chef_cfg, "delete_validation_post_exec", default=False + ) if delete_pem and os.path.isfile(CHEF_VALIDATION_PEM_PATH): os.unlink(CHEF_VALIDATION_PEM_PATH) @@ -389,16 +462,20 @@ def get_template_params(iid, chef_cfg, log): else: params[k] = util.get_cfg_option_str(chef_cfg, k) # These ones are overwritten to be exact values... - params.update({ - 'generated_by': util.make_header(), - 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name', - default=iid), - 'environment': util.get_cfg_option_str(chef_cfg, 'environment', - default='_default'), - # These two are mandatory... - 'server_url': chef_cfg['server_url'], - 'validation_name': chef_cfg['validation_name'], - }) + params.update( + { + "generated_by": util.make_header(), + "node_name": util.get_cfg_option_str( + chef_cfg, "node_name", default=iid + ), + "environment": util.get_cfg_option_str( + chef_cfg, "environment", default="_default" + ), + # These two are mandatory... + "server_url": chef_cfg["server_url"], + "validation_name": chef_cfg["validation_name"], + } + ) return params @@ -406,35 +483,38 @@ def handle(name, cfg, cloud, log, _args): """Handler method activated by cloud-init.""" # If there isn't a chef key in the configuration don't do anything - if 'chef' not in cfg: - log.debug(("Skipping module named %s," - " no 'chef' key in configuration"), name) + if "chef" not in cfg: + log.debug( + "Skipping module named %s, no 'chef' key in configuration", name + ) return validate_cloudconfig_schema(cfg, schema) - chef_cfg = cfg['chef'] + chef_cfg = cfg["chef"] # Ensure the chef directories we use exist - chef_dirs = util.get_cfg_option_list(chef_cfg, 'directories') + chef_dirs = util.get_cfg_option_list(chef_cfg, "directories") if not chef_dirs: chef_dirs = list(CHEF_DIRS) for d in itertools.chain(chef_dirs, REQUIRED_CHEF_DIRS): util.ensure_dir(d) - vkey_path = chef_cfg.get('validation_key', CHEF_VALIDATION_PEM_PATH) - vcert = chef_cfg.get('validation_cert') + vkey_path = chef_cfg.get("validation_key", CHEF_VALIDATION_PEM_PATH) + vcert = chef_cfg.get("validation_cert") # special value 'system' means do not overwrite the file # but still render the template to contain 'validation_key' if vcert: if vcert != "system": util.write_file(vkey_path, vcert) elif not os.path.isfile(vkey_path): - log.warning("chef validation_cert provided as 'system', but " - "validation_key path '%s' does not exist.", - vkey_path) + log.warning( + "chef validation_cert provided as 'system', but " + "validation_key path '%s' does not exist.", + vkey_path, + ) # Create the chef config from template - template_fn = cloud.get_template_filename('chef_client.rb') + template_fn = cloud.get_template_filename("chef_client.rb") if template_fn: iid = str(cloud.datasource.get_instance_id()) params = get_template_params(iid, chef_cfg, log) @@ -448,32 +528,33 @@ def handle(name, cfg, cloud, log, _args): util.ensure_dirs(param_paths) templater.render_to_file(template_fn, CHEF_RB_PATH, params) else: - log.warning("No template found, not rendering to %s", - CHEF_RB_PATH) + log.warning("No template found, not rendering to %s", CHEF_RB_PATH) # Set the firstboot json - fb_filename = util.get_cfg_option_str(chef_cfg, 'firstboot_path', - default=CHEF_FB_PATH) + fb_filename = util.get_cfg_option_str( + chef_cfg, "firstboot_path", default=CHEF_FB_PATH + ) if not fb_filename: log.info("First boot path empty, not writing first boot json file") else: initial_json = {} - if 'run_list' in chef_cfg: - initial_json['run_list'] = chef_cfg['run_list'] - if 'initial_attributes' in chef_cfg: - initial_attributes = chef_cfg['initial_attributes'] + if "run_list" in chef_cfg: + initial_json["run_list"] = chef_cfg["run_list"] + if "initial_attributes" in chef_cfg: + initial_attributes = chef_cfg["initial_attributes"] for k in list(initial_attributes.keys()): initial_json[k] = initial_attributes[k] util.write_file(fb_filename, json.dumps(initial_json)) # Try to install chef, if its not already installed... - force_install = util.get_cfg_option_bool(chef_cfg, - 'force_install', default=False) + force_install = util.get_cfg_option_bool( + chef_cfg, "force_install", default=False + ) installed = subp.is_exe(CHEF_EXEC_PATH) if not installed or force_install: run = install_chef(cloud, chef_cfg, log) elif installed: - run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False) + run = util.get_cfg_option_bool(chef_cfg, "exec", default=False) else: run = False if run: @@ -482,18 +563,21 @@ def handle(name, cfg, cloud, log, _args): def run_chef(chef_cfg, log): - log.debug('Running chef-client') + log.debug("Running chef-client") cmd = [CHEF_EXEC_PATH] - if 'exec_arguments' in chef_cfg: - cmd_args = chef_cfg['exec_arguments'] + if "exec_arguments" in chef_cfg: + cmd_args = chef_cfg["exec_arguments"] if isinstance(cmd_args, (list, tuple)): cmd.extend(cmd_args) elif isinstance(cmd_args, str): cmd.append(cmd_args) else: - log.warning("Unknown type %s provided for chef" - " 'exec_arguments' expected list, tuple," - " or string", type(cmd_args)) + log.warning( + "Unknown type %s provided for chef" + " 'exec_arguments' expected list, tuple," + " or string", + type(cmd_args), + ) cmd.extend(CHEF_EXEC_DEF_ARGS) else: cmd.extend(CHEF_EXEC_DEF_ARGS) @@ -507,16 +591,16 @@ def subp_blob_in_tempfile(blob, *args, **kwargs): The 'args' argument to subp will be updated with the full path to the filename as the first argument. """ - basename = kwargs.pop('basename', "subp_blob") + basename = kwargs.pop("basename", "subp_blob") - if len(args) == 0 and 'args' not in kwargs: + if len(args) == 0 and "args" not in kwargs: args = [tuple()] # Use tmpdir over tmpfile to avoid 'text file busy' on execute with temp_utils.tempdir(needs_exe=True) as tmpd: tmpf = os.path.join(tmpd, basename) - if 'args' in kwargs: - kwargs['args'] = [tmpf] + list(kwargs['args']) + if "args" in kwargs: + kwargs["args"] = [tmpf] + list(kwargs["args"]) else: args = list(args) args[0] = [tmpf] + args[0] @@ -543,36 +627,39 @@ def install_chef_from_omnibus(url=None, retries=None, omnibus_version=None): if omnibus_version is None: args = [] else: - args = ['-v', omnibus_version] + args = ["-v", omnibus_version] content = url_helper.readurl(url=url, retries=retries).contents return subp_blob_in_tempfile( - blob=content, args=args, - basename='chef-omnibus-install', capture=False) + blob=content, args=args, basename="chef-omnibus-install", capture=False + ) def install_chef(cloud, chef_cfg, log): # If chef is not installed, we install chef based on 'install_type' - install_type = util.get_cfg_option_str(chef_cfg, 'install_type', - 'packages') - run = util.get_cfg_option_bool(chef_cfg, 'exec', default=False) + install_type = util.get_cfg_option_str( + chef_cfg, "install_type", "packages" + ) + run = util.get_cfg_option_bool(chef_cfg, "exec", default=False) if install_type == "gems": # This will install and run the chef-client from gems - chef_version = util.get_cfg_option_str(chef_cfg, 'version', None) - ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version', - RUBY_VERSION_DEFAULT) + chef_version = util.get_cfg_option_str(chef_cfg, "version", None) + ruby_version = util.get_cfg_option_str( + chef_cfg, "ruby_version", RUBY_VERSION_DEFAULT + ) install_chef_from_gems(ruby_version, chef_version, cloud.distro) # Retain backwards compat, by preferring True instead of False # when not provided/overriden... - run = util.get_cfg_option_bool(chef_cfg, 'exec', default=True) - elif install_type == 'packages': + run = util.get_cfg_option_bool(chef_cfg, "exec", default=True) + elif install_type == "packages": # This will install and run the chef-client from packages - cloud.distro.install_packages(('chef',)) - elif install_type == 'omnibus': + cloud.distro.install_packages(("chef",)) + elif install_type == "omnibus": omnibus_version = util.get_cfg_option_str(chef_cfg, "omnibus_version") install_chef_from_omnibus( url=util.get_cfg_option_str(chef_cfg, "omnibus_url"), retries=util.get_cfg_option_int(chef_cfg, "omnibus_url_retries"), - omnibus_version=omnibus_version) + omnibus_version=omnibus_version, + ) else: log.warning("Unknown chef install type '%s'", install_type) run = False @@ -581,25 +668,47 @@ def install_chef(cloud, chef_cfg, log): def get_ruby_packages(version): # return a list of packages needed to install ruby at version - pkgs = ['ruby%s' % version, 'ruby%s-dev' % version] + pkgs = ["ruby%s" % version, "ruby%s-dev" % version] if version == "1.8": - pkgs.extend(('libopenssl-ruby1.8', 'rubygems1.8')) + pkgs.extend(("libopenssl-ruby1.8", "rubygems1.8")) return pkgs def install_chef_from_gems(ruby_version, chef_version, distro): distro.install_packages(get_ruby_packages(ruby_version)) - if not os.path.exists('/usr/bin/gem'): - util.sym_link('/usr/bin/gem%s' % ruby_version, '/usr/bin/gem') - if not os.path.exists('/usr/bin/ruby'): - util.sym_link('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby') + if not os.path.exists("/usr/bin/gem"): + util.sym_link("/usr/bin/gem%s" % ruby_version, "/usr/bin/gem") + if not os.path.exists("/usr/bin/ruby"): + util.sym_link("/usr/bin/ruby%s" % ruby_version, "/usr/bin/ruby") if chef_version: - subp.subp(['/usr/bin/gem', 'install', 'chef', - '-v %s' % chef_version, '--no-ri', - '--no-rdoc', '--bindir', '/usr/bin', '-q'], capture=False) + subp.subp( + [ + "/usr/bin/gem", + "install", + "chef", + "-v %s" % chef_version, + "--no-ri", + "--no-rdoc", + "--bindir", + "/usr/bin", + "-q", + ], + capture=False, + ) else: - subp.subp(['/usr/bin/gem', 'install', 'chef', - '--no-ri', '--no-rdoc', '--bindir', - '/usr/bin', '-q'], capture=False) + subp.subp( + [ + "/usr/bin/gem", + "install", + "chef", + "--no-ri", + "--no-rdoc", + "--bindir", + "/usr/bin", + "-q", + ], + capture=False, + ) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py index 4d5a6aa2..d09fc129 100644 --- a/cloudinit/config/cc_debug.py +++ b/cloudinit/config/cc_debug.py @@ -30,18 +30,16 @@ location that this cloud-init has been configured with when running. import copy from io import StringIO -from cloudinit import type_utils -from cloudinit import util -from cloudinit import safeyaml +from cloudinit import safeyaml, type_utils, util -SKIP_KEYS = frozenset(['log_cfgs']) +SKIP_KEYS = frozenset(["log_cfgs"]) def _make_header(text): header = StringIO() header.write("-" * 80) header.write("\n") - header.write(text.center(80, ' ')) + header.write(text.center(80, " ")) header.write("\n") header.write("-" * 80) header.write("\n") @@ -56,17 +54,16 @@ def _dumps(obj): def handle(name, cfg, cloud, log, args): """Handler method activated by cloud-init.""" - verbose = util.get_cfg_by_path(cfg, ('debug', 'verbose'), default=True) + verbose = util.get_cfg_by_path(cfg, ("debug", "verbose"), default=True) if args: # if args are provided (from cmdline) then explicitly set verbose out_file = args[0] verbose = True else: - out_file = util.get_cfg_by_path(cfg, ('debug', 'output')) + out_file = util.get_cfg_by_path(cfg, ("debug", "output")) if not verbose: - log.debug(("Skipping module named %s," - " verbose printing disabled"), name) + log.debug("Skipping module named %s, verbose printing disabled", name) return # Clean out some keys that we just don't care about showing... dump_cfg = copy.deepcopy(cfg) @@ -85,8 +82,9 @@ def handle(name, cfg, cloud, log, args): to_print.write(_dumps(cloud.datasource.metadata)) to_print.write("\n") to_print.write(_make_header("Misc")) - to_print.write("Datasource: %s\n" % - (type_utils.obj_name(cloud.datasource))) + to_print.write( + "Datasource: %s\n" % (type_utils.obj_name(cloud.datasource)) + ) to_print.write("Distro: %s\n" % (type_utils.obj_name(cloud.distro))) to_print.write("Hostname: %s\n" % (cloud.get_hostname(True))) to_print.write("Instance ID: %s\n" % (cloud.get_instance_id())) @@ -102,4 +100,5 @@ def handle(name, cfg, cloud, log, args): else: util.multi_log("".join(content_to_file), console=True, stderr=False) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py index 61c769b3..5e528e81 100644 --- a/cloudinit/config/cc_disable_ec2_metadata.py +++ b/cloudinit/config/cc_disable_ec2_metadata.py @@ -26,32 +26,35 @@ by default. disable_ec2_metadata: """ -from cloudinit import subp -from cloudinit import util - +from cloudinit import subp, util from cloudinit.settings import PER_ALWAYS frequency = PER_ALWAYS -REJECT_CMD_IF = ['route', 'add', '-host', '169.254.169.254', 'reject'] -REJECT_CMD_IP = ['ip', 'route', 'add', 'prohibit', '169.254.169.254'] +REJECT_CMD_IF = ["route", "add", "-host", "169.254.169.254", "reject"] +REJECT_CMD_IP = ["ip", "route", "add", "prohibit", "169.254.169.254"] def handle(name, cfg, _cloud, log, _args): disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False) if disabled: reject_cmd = None - if subp.which('ip'): + if subp.which("ip"): reject_cmd = REJECT_CMD_IP - elif subp.which('ifconfig'): + elif subp.which("ifconfig"): reject_cmd = REJECT_CMD_IF else: - log.error(('Neither "route" nor "ip" command found, unable to ' - 'manipulate routing table')) + log.error( + 'Neither "route" nor "ip" command found, unable to ' + "manipulate routing table" + ) return subp.subp(reject_cmd, capture=False) else: - log.debug(("Skipping module named %s," - " disabling the ec2 route not enabled"), name) + log.debug( + "Skipping module named %s, disabling the ec2 route not enabled", + name, + ) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index 440f05f1..4d527c7a 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -100,13 +100,13 @@ A label can be specified for the filesystem using replace_fs: """ -from cloudinit.settings import PER_INSTANCE -from cloudinit import util -from cloudinit import subp import logging import os import shlex +from cloudinit import subp, util +from cloudinit.settings import PER_INSTANCE + frequency = PER_INSTANCE # Define the commands to use @@ -118,7 +118,7 @@ BLKDEV_CMD = subp.which("blockdev") PARTPROBE_CMD = subp.which("partprobe") WIPEFS_CMD = subp.which("wipefs") -LANG_C_ENV = {'LANG': 'C'} +LANG_C_ENV = {"LANG": "C"} LOG = logging.getLogger(__name__) @@ -145,9 +145,12 @@ def handle(_name, cfg, cloud, log, _args): try: log.debug("Creating new partition table/disk") - util.log_time(logfunc=LOG.debug, - msg="Creating partition on %s" % disk, - func=mkpart, args=(disk, definition)) + util.log_time( + logfunc=LOG.debug, + msg="Creating partition on %s" % disk, + func=mkpart, + args=(disk, definition), + ) except Exception as e: util.logexc(LOG, "Failed partitioning operation\n%s" % e) @@ -162,10 +165,13 @@ def handle(_name, cfg, cloud, log, _args): try: log.debug("Creating new filesystem.") - device = definition.get('device') - util.log_time(logfunc=LOG.debug, - msg="Creating fs for %s" % device, - func=mkfs, args=(definition,)) + device = definition.get("device") + util.log_time( + logfunc=LOG.debug, + msg="Creating fs for %s" % device, + func=mkfs, + args=(definition,), + ) except Exception as e: util.logexc(LOG, "Failed during filesystem operation\n%s" % e) @@ -178,16 +184,22 @@ def update_disk_setup_devices(disk_setup, tformer): if transformed is None or transformed == origname: continue if transformed in disk_setup: - LOG.info("Replacing %s in disk_setup for translation of %s", - origname, transformed) + LOG.info( + "Replacing %s in disk_setup for translation of %s", + origname, + transformed, + ) del disk_setup[transformed] disk_setup[transformed] = disk_setup[origname] if isinstance(disk_setup[transformed], dict): - disk_setup[transformed]['_origname'] = origname + disk_setup[transformed]["_origname"] = origname del disk_setup[origname] - LOG.debug("updated disk_setup device entry '%s' to '%s'", - origname, transformed) + LOG.debug( + "updated disk_setup device entry '%s' to '%s'", + origname, + transformed, + ) def update_fs_setup_devices(disk_setup, tformer): @@ -198,7 +210,7 @@ def update_fs_setup_devices(disk_setup, tformer): LOG.warning("entry in disk_setup not a dict: %s", definition) continue - origname = definition.get('device') + origname = definition.get("device") if origname is None: continue @@ -208,19 +220,24 @@ def update_fs_setup_devices(disk_setup, tformer): tformed = tformer(dev) if tformed is not None: dev = tformed - LOG.debug("%s is mapped to disk=%s part=%s", - origname, tformed, part) - definition['_origname'] = origname - definition['device'] = tformed + LOG.debug( + "%s is mapped to disk=%s part=%s", origname, tformed, part + ) + definition["_origname"] = origname + definition["device"] = tformed if part: # In origname with .N, N overrides 'partition' key. - if 'partition' in definition: - LOG.warning("Partition '%s' from dotted device name '%s' " - "overrides 'partition' key in %s", part, origname, - definition) - definition['_partition'] = definition['partition'] - definition['partition'] = part + if "partition" in definition: + LOG.warning( + "Partition '%s' from dotted device name '%s' " + "overrides 'partition' key in %s", + part, + origname, + definition, + ) + definition["_partition"] = definition["partition"] + definition["partition"] = part def value_splitter(values, start=None): @@ -232,7 +249,7 @@ def value_splitter(values, start=None): if start: _values = _values[start:] - for key, value in [x.split('=') for x in _values]: + for key, value in [x.split("=") for x in _values]: yield key, value @@ -251,11 +268,16 @@ def enumerate_disk(device, nodeps=False): name: the device name, i.e. sda """ - lsblk_cmd = [LSBLK_CMD, '--pairs', '--output', 'NAME,TYPE,FSTYPE,LABEL', - device] + lsblk_cmd = [ + LSBLK_CMD, + "--pairs", + "--output", + "NAME,TYPE,FSTYPE,LABEL", + device, + ] if nodeps: - lsblk_cmd.append('--nodeps') + lsblk_cmd.append("--nodeps") info = None try: @@ -269,10 +291,10 @@ def enumerate_disk(device, nodeps=False): for part in parts: d = { - 'name': None, - 'type': None, - 'fstype': None, - 'label': None, + "name": None, + "type": None, + "fstype": None, + "label": None, } for key, value in value_splitter(part): @@ -303,9 +325,9 @@ def is_device_valid(name, partition=False): LOG.warning("Query against device %s failed", name) return False - if partition and d_type == 'part': + if partition and d_type == "part": return True - elif not partition and d_type == 'disk': + elif not partition and d_type == "disk": return True return False @@ -321,7 +343,7 @@ def check_fs(device): """ out, label, fs_type, uuid = None, None, None, None - blkid_cmd = [BLKID_CMD, '-c', '/dev/null', device] + blkid_cmd = [BLKID_CMD, "-c", "/dev/null", device] try: out, _err = subp.subp(blkid_cmd, rcs=[0, 2]) except Exception as e: @@ -332,11 +354,11 @@ def check_fs(device): if out: if len(out.splitlines()) == 1: for key, value in value_splitter(out, start=1): - if key.lower() == 'label': + if key.lower() == "label": label = value - elif key.lower() == 'type': + elif key.lower() == "type": fs_type = value - elif key.lower() == 'uuid': + elif key.lower() == "uuid": uuid = value return label, fs_type, uuid @@ -350,8 +372,14 @@ def is_filesystem(device): return fs_type -def find_device_node(device, fs_type=None, label=None, valid_targets=None, - label_match=True, replace_fs=None): +def find_device_node( + device, + fs_type=None, + label=None, + valid_targets=None, + label_match=True, + replace_fs=None, +): """ Find a device that is either matches the spec, or the first @@ -366,31 +394,32 @@ def find_device_node(device, fs_type=None, label=None, valid_targets=None, label = "" if not valid_targets: - valid_targets = ['disk', 'part'] + valid_targets = ["disk", "part"] raw_device_used = False for d in enumerate_disk(device): - if d['fstype'] == replace_fs and label_match is False: + if d["fstype"] == replace_fs and label_match is False: # We found a device where we want to replace the FS - return ('/dev/%s' % d['name'], False) + return ("/dev/%s" % d["name"], False) - if (d['fstype'] == fs_type and - ((label_match and d['label'] == label) or not label_match)): + if d["fstype"] == fs_type and ( + (label_match and d["label"] == label) or not label_match + ): # If we find a matching device, we return that - return ('/dev/%s' % d['name'], True) + return ("/dev/%s" % d["name"], True) - if d['type'] in valid_targets: + if d["type"] in valid_targets: - if d['type'] != 'disk' or d['fstype']: + if d["type"] != "disk" or d["fstype"]: raw_device_used = True - if d['type'] == 'disk': + if d["type"] == "disk": # Skip the raw disk, its the default pass - elif not d['fstype']: - return ('/dev/%s' % d['name'], False) + elif not d["fstype"]: + return ("/dev/%s" % d["name"], False) if not raw_device_used: return (device, False) @@ -433,7 +462,7 @@ def get_dyn_func(*args): if len(args) < 2: raise Exception("Unable to determine dynamic funcation name") - func_name = (args[0] % args[1]) + func_name = args[0] % args[1] func_args = args[2:] try: @@ -448,8 +477,8 @@ def get_dyn_func(*args): def get_hdd_size(device): try: - size_in_bytes, _ = subp.subp([BLKDEV_CMD, '--getsize64', device]) - sector_size, _ = subp.subp([BLKDEV_CMD, '--getss', device]) + size_in_bytes, _ = subp.subp([BLKDEV_CMD, "--getsize64", device]) + sector_size, _ = subp.subp([BLKDEV_CMD, "--getss", device]) except Exception as e: raise Exception("Failed to get %s size\n%s" % (device, e)) from e @@ -481,13 +510,13 @@ def check_partition_mbr_layout(device, layout): if device in _line[0]: # We don't understand extended partitions yet - if _line[-1].lower() in ['extended', 'empty']: + if _line[-1].lower() in ["extended", "empty"]: continue # Find the partition types type_label = None for x in sorted(range(1, len(_line)), reverse=True): - if _line[x].isdigit() and _line[x] != '/': + if _line[x].isdigit() and _line[x] != "/": type_label = _line[x] break @@ -496,7 +525,7 @@ def check_partition_mbr_layout(device, layout): def check_partition_gpt_layout(device, layout): - prt_cmd = [SGDISK_CMD, '-p', device] + prt_cmd = [SGDISK_CMD, "-p", device] try: out, _err = subp.subp(prt_cmd, update_env=LANG_C_ENV) except Exception as e: @@ -522,7 +551,7 @@ def check_partition_gpt_layout(device, layout): # Number Start (sector) End (sector) Size Code Name # 1 2048 206847 100.0 MiB 0700 Microsoft basic data for line in out_lines: - if line.strip().startswith('Number'): + if line.strip().startswith("Number"): break codes = [line.strip().split()[5] for line in out_lines] @@ -545,10 +574,16 @@ def check_partition_layout(table_type, device, layout): function called check_partition_%s_layout """ found_layout = get_dyn_func( - "check_partition_%s_layout", table_type, device, layout) - - LOG.debug("called check_partition_%s_layout(%s, %s), returned: %s", - table_type, device, layout, found_layout) + "check_partition_%s_layout", table_type, device, layout + ) + + LOG.debug( + "called check_partition_%s_layout(%s, %s), returned: %s", + table_type, + device, + layout, + found_layout, + ) if isinstance(layout, bool): # if we are using auto partitioning, or "True" be happy # if a single partition exists. @@ -559,10 +594,12 @@ def check_partition_layout(table_type, device, layout): elif len(found_layout) == len(layout): # This just makes sure that the number of requested # partitions and the type labels are right - layout_types = [str(x[1]) if isinstance(x, (tuple, list)) else None - for x in layout] - LOG.debug("Layout types=%s. Found types=%s", - layout_types, found_layout) + layout_types = [ + str(x[1]) if isinstance(x, (tuple, list)) else None for x in layout + ] + LOG.debug( + "Layout types=%s. Found types=%s", layout_types, found_layout + ) for itype, ftype in zip(layout_types, found_layout): if itype is not None and str(ftype) != str(itype): return False @@ -588,8 +625,9 @@ def get_partition_mbr_layout(size, layout): # Create a single partition return "0," - if ((len(layout) == 0 and isinstance(layout, list)) or - not isinstance(layout, list)): + if (len(layout) == 0 and isinstance(layout, list)) or not isinstance( + layout, list + ): raise Exception("Partition layout is invalid") last_part_num = len(layout) @@ -617,8 +655,10 @@ def get_partition_mbr_layout(size, layout): sfdisk_definition = "\n".join(part_definition) if len(part_definition) > 4: - raise Exception("Calculated partition definition is too big\n%s" % - sfdisk_definition) + raise Exception( + "Calculated partition definition is too big\n%s" + % sfdisk_definition + ) return sfdisk_definition @@ -632,14 +672,15 @@ def get_partition_gpt_layout(size, layout): if isinstance(partition, list): if len(partition) != 2: raise Exception( - "Partition was incorrectly defined: %s" % partition) + "Partition was incorrectly defined: %s" % partition + ) percent, partition_type = partition else: percent = partition partition_type = None part_size = int(float(size) * (float(percent) / 100)) - partition_specs.append((partition_type, [0, '+{}'.format(part_size)])) + partition_specs.append((partition_type, [0, "+{}".format(part_size)])) # The last partition should use up all remaining space partition_specs[-1][-1][-1] = 0 @@ -649,7 +690,7 @@ def get_partition_gpt_layout(size, layout): def purge_disk_ptable(device): # wipe the first and last megabyte of a disk (or file) # gpt stores partition table both at front and at end. - null = '\0' + null = "\0" start_len = 1024 * 1024 end_len = 1024 * 1024 with open(device, "rb+") as fp: @@ -668,14 +709,14 @@ def purge_disk(device): # wipe any file systems first for d in enumerate_disk(device): - if d['type'] not in ["disk", "crypt"]: - wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d['name']] + if d["type"] not in ["disk", "crypt"]: + wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d["name"]] try: - LOG.info("Purging filesystem on /dev/%s", d['name']) + LOG.info("Purging filesystem on /dev/%s", d["name"]) subp.subp(wipefs_cmd) except Exception as e: raise Exception( - "Failed FS purge of /dev/%s" % d['name'] + "Failed FS purge of /dev/%s" % d["name"] ) from e purge_disk_ptable(device) @@ -701,7 +742,7 @@ def read_parttbl(device): if PARTPROBE_CMD is not None: probe_cmd = [PARTPROBE_CMD, device] else: - probe_cmd = [BLKDEV_CMD, '--rereadpt', device] + probe_cmd = [BLKDEV_CMD, "--rereadpt", device] util.udevadm_settle() try: subp.subp(probe_cmd) @@ -730,17 +771,24 @@ def exec_mkpart_mbr(device, layout): def exec_mkpart_gpt(device, layout): try: - subp.subp([SGDISK_CMD, '-Z', device]) + subp.subp([SGDISK_CMD, "-Z", device]) for index, (partition_type, (start, end)) in enumerate(layout): index += 1 - subp.subp([SGDISK_CMD, - '-n', '{}:{}:{}'.format(index, start, end), device]) + subp.subp( + [ + SGDISK_CMD, + "-n", + "{}:{}:{}".format(index, start, end), + device, + ] + ) if partition_type is not None: # convert to a 4 char (or more) string right padded with 0 # 82 -> 8200. 'Linux' -> 'Linux' pinput = str(partition_type).ljust(4, "0") subp.subp( - [SGDISK_CMD, '-t', '{}:{}'.format(index, pinput), device]) + [SGDISK_CMD, "-t", "{}:{}".format(index, pinput), device] + ) except Exception: LOG.warning("Failed to partition device %s", device) raise @@ -766,8 +814,10 @@ def assert_and_settle_device(device): if not os.path.exists(device): util.udevadm_settle() if not os.path.exists(device): - raise RuntimeError("Device %s did not exist and was not created " - "with a udevadm settle." % device) + raise RuntimeError( + "Device %s did not exist and was not created " + "with a udevadm settle." % device + ) # Whether or not the device existed above, it is possible that udev # events that would populate udev database (for reading by lsdname) have @@ -794,9 +844,9 @@ def mkpart(device, definition): device = os.path.realpath(device) LOG.debug("Checking values for %s definition", device) - overwrite = definition.get('overwrite', False) - layout = definition.get('layout', False) - table_type = definition.get('table_type', 'mbr') + overwrite = definition.get("overwrite", False) + layout = definition.get("layout", False) + table_type = definition.get("table_type", "mbr") # Check if the default device is a partition or not LOG.debug("Checking against default devices") @@ -809,7 +859,8 @@ def mkpart(device, definition): LOG.debug("Checking if device %s is a valid device", device) if not is_device_valid(device): raise Exception( - 'Device {device} is not a disk device!'.format(device=device)) + "Device {device} is not a disk device!".format(device=device) + ) # Remove the partition table entries if isinstance(layout, str) and layout.lower() == "remove": @@ -845,21 +896,21 @@ def lookup_force_flag(fs): A force flag might be -F or -F, this look it up """ flags = { - 'ext': '-F', - 'btrfs': '-f', - 'xfs': '-f', - 'reiserfs': '-f', - 'swap': '-f', + "ext": "-F", + "btrfs": "-f", + "xfs": "-f", + "reiserfs": "-f", + "swap": "-f", } - if 'ext' in fs.lower(): - fs = 'ext' + if "ext" in fs.lower(): + fs = "ext" if fs.lower() in flags: return flags[fs] LOG.warning("Force flag for %s is unknown.", fs) - return '' + return "" def mkfs(fs_cfg): @@ -883,14 +934,14 @@ def mkfs(fs_cfg): When 'cmd' is provided then no other parameter is required. """ - label = fs_cfg.get('label') - device = fs_cfg.get('device') - partition = str(fs_cfg.get('partition', 'any')) - fs_type = fs_cfg.get('filesystem') - fs_cmd = fs_cfg.get('cmd', []) - fs_opts = fs_cfg.get('extra_opts', []) - fs_replace = fs_cfg.get('replace_fs', False) - overwrite = fs_cfg.get('overwrite', False) + label = fs_cfg.get("label") + device = fs_cfg.get("device") + partition = str(fs_cfg.get("partition", "any")) + fs_type = fs_cfg.get("filesystem") + fs_cmd = fs_cfg.get("cmd", []) + fs_opts = fs_cfg.get("extra_opts", []) + fs_replace = fs_cfg.get("replace_fs", False) + overwrite = fs_cfg.get("overwrite", False) # ensure that we get a real device rather than a symbolic link assert_and_settle_device(device) @@ -903,14 +954,19 @@ def mkfs(fs_cfg): # Handle manual definition of partition if partition.isdigit(): device = "%s%s" % (device, partition) - LOG.debug("Manual request of partition %s for %s", - partition, device) + LOG.debug( + "Manual request of partition %s for %s", partition, device + ) # Check to see if the fs already exists LOG.debug("Checking device %s", device) check_label, check_fstype, _ = check_fs(device) - LOG.debug("Device '%s' has check_label='%s' check_fstype=%s", - device, check_label, check_fstype) + LOG.debug( + "Device '%s' has check_label='%s' check_fstype=%s", + device, + check_label, + check_fstype, + ) if check_label == label and check_fstype == fs_type: LOG.debug("Existing file system found at %s", device) @@ -924,19 +980,23 @@ def mkfs(fs_cfg): else: LOG.debug("Device %s is cleared for formating", device) - elif partition and str(partition).lower() in ('auto', 'any'): + elif partition and str(partition).lower() in ("auto", "any"): # For auto devices, we match if the filesystem does exist odevice = device LOG.debug("Identifying device to create %s filesytem on", label) # any mean pick the first match on the device with matching fs_type label_match = True - if partition.lower() == 'any': + if partition.lower() == "any": label_match = False - device, reuse = find_device_node(device, fs_type=fs_type, label=label, - label_match=label_match, - replace_fs=fs_replace) + device, reuse = find_device_node( + device, + fs_type=fs_type, + label=label, + label_match=label_match, + replace_fs=fs_replace, + ) LOG.debug("Automatic device for %s identified as %s", odevice, device) if reuse: @@ -947,18 +1007,25 @@ def mkfs(fs_cfg): LOG.debug("Replacing file system on %s as instructed.", device) if not device: - LOG.debug("No device aviable that matches request. " - "Skipping fs creation for %s", fs_cfg) + LOG.debug( + "No device aviable that matches request. " + "Skipping fs creation for %s", + fs_cfg, + ) return - elif not partition or str(partition).lower() == 'none': + elif not partition or str(partition).lower() == "none": LOG.debug("Using the raw device to place filesystem %s on", label) else: LOG.debug("Error in device identification handling.") return - LOG.debug("File system type '%s' with label '%s' will be created on %s", - fs_type, label, device) + LOG.debug( + "File system type '%s' with label '%s' will be created on %s", + fs_type, + label, + device, + ) # Make sure the device is defined if not device: @@ -969,26 +1036,29 @@ def mkfs(fs_cfg): if not (fs_type or fs_cmd): raise Exception( "No way to create filesystem '{label}'. fs_type or fs_cmd " - "must be set.".format(label=label)) + "must be set.".format(label=label) + ) # Create the commands shell = False if fs_cmd: - fs_cmd = fs_cfg['cmd'] % { - 'label': label, - 'filesystem': fs_type, - 'device': device, + fs_cmd = fs_cfg["cmd"] % { + "label": label, + "filesystem": fs_type, + "device": device, } shell = True if overwrite: LOG.warning( "fs_setup:overwrite ignored because cmd was specified: %s", - fs_cmd) + fs_cmd, + ) if fs_opts: LOG.warning( "fs_setup:extra_opts ignored because cmd was specified: %s", - fs_cmd) + fs_cmd, + ) else: # Find the mkfs command mkfs_cmd = subp.which("mkfs.%s" % fs_type) @@ -996,8 +1066,11 @@ def mkfs(fs_cfg): mkfs_cmd = subp.which("mk%s" % fs_type) if not mkfs_cmd: - LOG.warning("Cannot create fstype '%s'. No mkfs.%s command", - fs_type, fs_type) + LOG.warning( + "Cannot create fstype '%s'. No mkfs.%s command", + fs_type, + fs_type, + ) return fs_cmd = [mkfs_cmd, device] @@ -1022,4 +1095,5 @@ def mkfs(fs_cfg): except Exception as e: raise Exception("Failed to exec of '%s':\n%s" % (fs_cmd, e)) from e + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py index 40eee052..a928082b 100644 --- a/cloudinit/config/cc_emit_upstart.py +++ b/cloudinit/config/cc_emit_upstart.py @@ -24,12 +24,12 @@ user configuration should be required. import os from cloudinit import log as logging -from cloudinit.settings import PER_ALWAYS from cloudinit import subp +from cloudinit.settings import PER_ALWAYS frequency = PER_ALWAYS -distros = ['ubuntu', 'debian'] +distros = ["ubuntu", "debian"] LOG = logging.getLogger(__name__) @@ -39,15 +39,18 @@ def is_upstart_system(): return False myenv = os.environ.copy() - if 'UPSTART_SESSION' in myenv: - del myenv['UPSTART_SESSION'] - check_cmd = ['initctl', 'version'] + if "UPSTART_SESSION" in myenv: + del myenv["UPSTART_SESSION"] + check_cmd = ["initctl", "version"] try: (out, _err) = subp.subp(check_cmd, env=myenv) - return 'upstart' in out + return "upstart" in out except subp.ProcessExecutionError as e: - LOG.debug("'%s' returned '%s', not using upstart", - ' '.join(check_cmd), e.exit_code) + LOG.debug( + "'%s' returned '%s', not using upstart", + " ".join(check_cmd), + e.exit_code, + ) return False @@ -56,7 +59,7 @@ def handle(name, _cfg, cloud, log, args): if not event_names: # Default to the 'cloud-config' # event for backwards compat. - event_names = ['cloud-config'] + event_names = ["cloud-config"] if not is_upstart_system(): log.debug("not upstart system, '%s' disabled", name) @@ -64,11 +67,12 @@ def handle(name, _cfg, cloud, log, args): cfgpath = cloud.paths.get_ipath_cur("cloud_config") for n in event_names: - cmd = ['initctl', 'emit', str(n), 'CLOUD_CFG=%s' % cfgpath] + cmd = ["initctl", "emit", str(n), "CLOUD_CFG=%s" % cfgpath] try: subp.subp(cmd) except Exception as e: # TODO(harlowja), use log exception from utils?? log.warning("Emission of upstart event %s failed due to: %s", n, e) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py index 91f50e22..50a81744 100644 --- a/cloudinit/config/cc_fan.py +++ b/cloudinit/config/cc_fan.py @@ -38,60 +38,62 @@ If cloud-init sees a ``fan`` entry in cloud-config it will: """ from cloudinit import log as logging +from cloudinit import subp, util from cloudinit.settings import PER_INSTANCE -from cloudinit import subp -from cloudinit import util LOG = logging.getLogger(__name__) frequency = PER_INSTANCE BUILTIN_CFG = { - 'config': None, - 'config_path': '/etc/network/fan', + "config": None, + "config_path": "/etc/network/fan", } def stop_update_start(distro, service, config_file, content): try: - distro.manage_service('stop', service) + distro.manage_service("stop", service) stop_failed = False except subp.ProcessExecutionError as e: stop_failed = True LOG.warning("failed to stop %s: %s", service, e) - if not content.endswith('\n'): - content += '\n' + if not content.endswith("\n"): + content += "\n" util.write_file(config_file, content, omode="w") try: - distro.manage_service('start', service) + distro.manage_service("start", service) if stop_failed: LOG.warning("success: %s started", service) except subp.ProcessExecutionError as e: LOG.warning("failed to start %s: %s", service, e) - distro.manage_service('enable', service) + distro.manage_service("enable", service) def handle(name, cfg, cloud, log, args): - cfgin = cfg.get('fan') + cfgin = cfg.get("fan") if not cfgin: cfgin = {} mycfg = util.mergemanydict([cfgin, BUILTIN_CFG]) - if not mycfg.get('config'): + if not mycfg.get("config"): LOG.debug("%s: no 'fan' config entry. disabling", name) return - util.write_file(mycfg.get('config_path'), mycfg.get('config'), omode="w") + util.write_file(mycfg.get("config_path"), mycfg.get("config"), omode="w") distro = cloud.distro - if not subp.which('fanctl'): - distro.install_packages(['ubuntu-fan']) + if not subp.which("fanctl"): + distro.install_packages(["ubuntu-fan"]) stop_update_start( distro, - service='ubuntu-fan', config_file=mycfg.get('config_path'), - content=mycfg.get('config')) + service="ubuntu-fan", + config_file=mycfg.get("config_path"), + content=mycfg.get("config"), + ) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py index 4fa5297e..f443ccd8 100644 --- a/cloudinit/config/cc_final_message.py +++ b/cloudinit/config/cc_final_message.py @@ -31,10 +31,7 @@ specified as a jinja template with the following variables set: """ -from cloudinit import templater -from cloudinit import util -from cloudinit import version - +from cloudinit import templater, util, version from cloudinit.settings import PER_ALWAYS frequency = PER_ALWAYS @@ -49,7 +46,7 @@ FINAL_MESSAGE_DEF = ( def handle(_name, cfg, cloud, log, args): - msg_in = '' + msg_in = "" if len(args) != 0: msg_in = str(args[0]) else: @@ -64,14 +61,18 @@ def handle(_name, cfg, cloud, log, args): cver = version.version_string() try: subs = { - 'uptime': uptime, - 'timestamp': ts, - 'version': cver, - 'datasource': str(cloud.datasource), + "uptime": uptime, + "timestamp": ts, + "version": cver, + "datasource": str(cloud.datasource), } subs.update(dict([(k.upper(), v) for k, v in subs.items()])) - util.multi_log("%s\n" % (templater.render_string(msg_in, subs)), - console=False, stderr=True, log=log) + util.multi_log( + "%s\n" % (templater.render_string(msg_in, subs)), + console=False, + stderr=True, + log=log, + ) except Exception: util.logexc(log, "Failed to render final message template") @@ -85,4 +86,5 @@ def handle(_name, cfg, cloud, log, args): if cloud.datasource.is_disconnected: log.warning("Used fallback datasource") + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_foo.py b/cloudinit/config/cc_foo.py index 924b967c..3c307153 100644 --- a/cloudinit/config/cc_foo.py +++ b/cloudinit/config/cc_foo.py @@ -53,4 +53,5 @@ frequency = PER_INSTANCE def handle(name, _cfg, _cloud, log, _args): log.debug("Hi from module %s", name) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 1ddc9dc7..43334caa 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -70,17 +70,15 @@ import re import stat from cloudinit import log as logging +from cloudinit import subp, temp_utils, util from cloudinit.settings import PER_ALWAYS -from cloudinit import subp -from cloudinit import temp_utils -from cloudinit import util frequency = PER_ALWAYS DEFAULT_CONFIG = { - 'mode': 'auto', - 'devices': ['/'], - 'ignore_growroot_disabled': False, + "mode": "auto", + "devices": ["/"], + "ignore_growroot_disabled": False, } @@ -131,7 +129,7 @@ class ResizeFailedException(Exception): class ResizeGrowPart(object): def available(self): myenv = os.environ.copy() - myenv['LANG'] = 'C' + myenv["LANG"] = "C" try: (out, _err) = subp.subp(["growpart", "--help"], env=myenv) @@ -144,7 +142,7 @@ class ResizeGrowPart(object): def resize(self, diskdev, partnum, partdev): myenv = os.environ.copy() - myenv['LANG'] = 'C' + myenv["LANG"] = "C" before = get_size(partdev) # growpart uses tmp dir to store intermediate states @@ -153,14 +151,19 @@ class ResizeGrowPart(object): growpart_tmp = os.path.join(tmpd, "growpart") if not os.path.exists(growpart_tmp): os.mkdir(growpart_tmp, 0o700) - myenv['TMPDIR'] = growpart_tmp + myenv["TMPDIR"] = growpart_tmp try: - subp.subp(["growpart", '--dry-run', diskdev, partnum], - env=myenv) + subp.subp( + ["growpart", "--dry-run", diskdev, partnum], env=myenv + ) except subp.ProcessExecutionError as e: if e.exit_code != 1: - util.logexc(LOG, "Failed growpart --dry-run for (%s, %s)", - diskdev, partnum) + util.logexc( + LOG, + "Failed growpart --dry-run for (%s, %s)", + diskdev, + partnum, + ) raise ResizeFailedException(e) from e return (before, before) @@ -176,7 +179,7 @@ class ResizeGrowPart(object): class ResizeGpart(object): def available(self): myenv = os.environ.copy() - myenv['LANG'] = 'C' + myenv["LANG"] = "C" try: (_out, err) = subp.subp(["gpart", "help"], env=myenv, rcs=[0, 1]) @@ -234,11 +237,11 @@ def device_part_info(devpath): # the device, like /dev/vtbd0p2. if util.is_FreeBSD(): freebsd_part = "/dev/" + util.find_freebsd_part(devpath) - m = re.search('^(/dev/.+)p([0-9])$', freebsd_part) + m = re.search("^(/dev/.+)p([0-9])$", freebsd_part) return (m.group(1), m.group(2)) elif util.is_DragonFlyBSD(): dragonflybsd_part = "/dev/" + util.find_dragonflybsd_part(devpath) - m = re.search('^(/dev/.+)s([0-9])$', dragonflybsd_part) + m = re.search("^(/dev/.+)s([0-9])$", dragonflybsd_part) return (m.group(1), m.group(2)) if not os.path.exists(syspath): @@ -275,7 +278,7 @@ def devent2dev(devent): container = util.is_container() # Ensure the path is a block device. - if (dev == "/dev/root" and not container): + if dev == "/dev/root" and not container: dev = util.rootdev_from_cmdline(util.get_cmdline()) if dev is None: if os.path.exists(dev): @@ -293,65 +296,102 @@ def resize_devices(resizer, devices): try: blockdev = devent2dev(devent) except ValueError as e: - info.append((devent, RESIZE.SKIPPED, - "unable to convert to device: %s" % e,)) + info.append( + ( + devent, + RESIZE.SKIPPED, + "unable to convert to device: %s" % e, + ) + ) continue try: statret = os.stat(blockdev) except OSError as e: - info.append((devent, RESIZE.SKIPPED, - "stat of '%s' failed: %s" % (blockdev, e),)) + info.append( + ( + devent, + RESIZE.SKIPPED, + "stat of '%s' failed: %s" % (blockdev, e), + ) + ) continue - if (not stat.S_ISBLK(statret.st_mode) and - not stat.S_ISCHR(statret.st_mode)): - info.append((devent, RESIZE.SKIPPED, - "device '%s' not a block device" % blockdev,)) + if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR( + statret.st_mode + ): + info.append( + ( + devent, + RESIZE.SKIPPED, + "device '%s' not a block device" % blockdev, + ) + ) continue try: (disk, ptnum) = device_part_info(blockdev) except (TypeError, ValueError) as e: - info.append((devent, RESIZE.SKIPPED, - "device_part_info(%s) failed: %s" % (blockdev, e),)) + info.append( + ( + devent, + RESIZE.SKIPPED, + "device_part_info(%s) failed: %s" % (blockdev, e), + ) + ) continue try: (old, new) = resizer.resize(disk, ptnum, blockdev) if old == new: - info.append((devent, RESIZE.NOCHANGE, - "no change necessary (%s, %s)" % (disk, ptnum),)) + info.append( + ( + devent, + RESIZE.NOCHANGE, + "no change necessary (%s, %s)" % (disk, ptnum), + ) + ) else: - info.append((devent, RESIZE.CHANGED, - "changed (%s, %s) from %s to %s" % - (disk, ptnum, old, new),)) + info.append( + ( + devent, + RESIZE.CHANGED, + "changed (%s, %s) from %s to %s" + % (disk, ptnum, old, new), + ) + ) except ResizeFailedException as e: - info.append((devent, RESIZE.FAILED, - "failed to resize: disk=%s, ptnum=%s: %s" % - (disk, ptnum, e),)) + info.append( + ( + devent, + RESIZE.FAILED, + "failed to resize: disk=%s, ptnum=%s: %s" + % (disk, ptnum, e), + ) + ) return info def handle(_name, cfg, _cloud, log, _args): - if 'growpart' not in cfg: - log.debug("No 'growpart' entry in cfg. Using default: %s" % - DEFAULT_CONFIG) - cfg['growpart'] = DEFAULT_CONFIG + if "growpart" not in cfg: + log.debug( + "No 'growpart' entry in cfg. Using default: %s" % DEFAULT_CONFIG + ) + cfg["growpart"] = DEFAULT_CONFIG - mycfg = cfg.get('growpart') + mycfg = cfg.get("growpart") if not isinstance(mycfg, dict): log.warning("'growpart' in config was not a dict") return - mode = mycfg.get('mode', "auto") + mode = mycfg.get("mode", "auto") if util.is_false(mode): log.debug("growpart disabled: mode=%s" % mode) return - if util.is_false(mycfg.get('ignore_growroot_disabled', False)): + if util.is_false(mycfg.get("ignore_growroot_disabled", False)): if os.path.isfile("/etc/growroot-disabled"): log.debug("growpart disabled: /etc/growroot-disabled exists") log.debug("use ignore_growroot_disabled to ignore") @@ -370,8 +410,12 @@ def handle(_name, cfg, _cloud, log, _args): raise e return - resized = util.log_time(logfunc=log.debug, msg="resize_devices", - func=resize_devices, args=(resizer, devices)) + resized = util.log_time( + logfunc=log.debug, + msg="resize_devices", + func=resize_devices, + args=(resizer, devices), + ) for (entry, action, msg) in resized: if action == RESIZE.CHANGED: log.info("'%s' resized: %s" % (entry, msg)) @@ -379,6 +423,6 @@ def handle(_name, cfg, _cloud, log, _args): log.debug("'%s' %s: %s" % (entry, action, msg)) -RESIZERS = (('growpart', ResizeGrowPart), ('gpart', ResizeGpart)) +RESIZERS = (("growpart", ResizeGrowPart), ("gpart", ResizeGpart)) # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py index eb03c664..ad7243d9 100644 --- a/cloudinit/config/cc_grub_dpkg.py +++ b/cloudinit/config/cc_grub_dpkg.py @@ -43,11 +43,10 @@ seeded with empty values, and install_devices_empty is set to true. import os -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util from cloudinit.subp import ProcessExecutionError -distros = ['ubuntu', 'debian'] +distros = ["ubuntu", "debian"] def fetch_idevs(log): @@ -60,8 +59,9 @@ def fetch_idevs(log): try: # get the root disk where the /boot directory resides. - disk = subp.subp(['grub-probe', '-t', 'disk', '/boot'], - capture=True)[0].strip() + disk = subp.subp(["grub-probe", "-t", "disk", "/boot"], capture=True)[ + 0 + ].strip() except ProcessExecutionError as e: # grub-common may not be installed, especially on containers # FileNotFoundError is a nested exception of ProcessExecutionError @@ -81,26 +81,30 @@ def fetch_idevs(log): if not disk or not os.path.exists(disk): # If we failed to detect a disk, we can return early - return '' + return "" try: # check if disk exists and use udevadm to fetch symlinks - devices = subp.subp( - ['udevadm', 'info', '--root', '--query=symlink', disk], - capture=True - )[0].strip().split() + devices = ( + subp.subp( + ["udevadm", "info", "--root", "--query=symlink", disk], + capture=True, + )[0] + .strip() + .split() + ) except Exception: util.logexc( log, "udevadm DEVLINKS symlink query failed for disk='%s'", disk ) - log.debug('considering these device symlinks: %s', ','.join(devices)) + log.debug("considering these device symlinks: %s", ",".join(devices)) # filter symlinks for /dev/disk/by-id entries - devices = [dev for dev in devices if 'disk/by-id' in dev] - log.debug('filtered to these disk/by-id symlinks: %s', ','.join(devices)) + devices = [dev for dev in devices if "disk/by-id" in dev] + log.debug("filtered to these disk/by-id symlinks: %s", ",".join(devices)) # select first device if there is one, else fall back to plain name idevs = sorted(devices)[0] if devices else disk - log.debug('selected %s', idevs) + log.debug("selected %s", idevs) return idevs @@ -111,14 +115,15 @@ def handle(name, cfg, _cloud, log, _args): if not mycfg: mycfg = {} - enabled = mycfg.get('enabled', True) + enabled = mycfg.get("enabled", True) if util.is_false(enabled): log.debug("%s disabled by config grub_dpkg/enabled=%s", name, enabled) return idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None) idevs_empty = util.get_cfg_option_str( - mycfg, "grub-pc/install_devices_empty", None) + mycfg, "grub-pc/install_devices_empty", None + ) if idevs is None: idevs = fetch_idevs(log) @@ -128,16 +133,21 @@ def handle(name, cfg, _cloud, log, _args): # now idevs and idevs_empty are set to determined values # or, those set by user - dconf_sel = (("grub-pc grub-pc/install_devices string %s\n" - "grub-pc grub-pc/install_devices_empty boolean %s\n") % - (idevs, idevs_empty)) + dconf_sel = ( + "grub-pc grub-pc/install_devices string %s\n" + "grub-pc grub-pc/install_devices_empty boolean %s\n" + % (idevs, idevs_empty) + ) - log.debug("Setting grub debconf-set-selections with '%s','%s'" % - (idevs, idevs_empty)) + log.debug( + "Setting grub debconf-set-selections with '%s','%s'" + % (idevs, idevs_empty) + ) try: - subp.subp(['debconf-set-selections'], dconf_sel) + subp.subp(["debconf-set-selections"], dconf_sel) except Exception: util.logexc(log, "Failed to run debconf-set-selections for grub-dpkg") + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_install_hotplug.py b/cloudinit/config/cc_install_hotplug.py index 9b4075cc..952d9f13 100644 --- a/cloudinit/config/cc_install_hotplug.py +++ b/cloudinit/config/cc_install_hotplug.py @@ -3,15 +3,12 @@ import os from textwrap import dedent -from cloudinit import util -from cloudinit import subp -from cloudinit import stages +from cloudinit import stages, subp, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.distros import ALL_DISTROS -from cloudinit.event import EventType, EventScope +from cloudinit.event import EventScope, EventType from cloudinit.settings import PER_INSTANCE - frequency = PER_INSTANCE distros = [ALL_DISTROS] @@ -19,7 +16,8 @@ meta = { "id": "cc_install_hotplug", "name": "Install Hotplug", "title": "Install hotplug if supported and enabled", - "description": dedent("""\ + "description": dedent( + """\ This module will install the udev rules to enable hotplug if supported by the datasource and enabled in the userdata. The udev rules will be installed as @@ -32,21 +30,26 @@ meta = { network configuration. Currently supported datasources: Openstack, EC2 - """), + """ + ), "distros": distros, "examples": [ - dedent("""\ + dedent( + """\ # Enable hotplug of network devices updates: network: when: ["hotplug"] - """), - dedent("""\ + """ + ), + dedent( + """\ # Enable network hotplug alongside boot event updates: network: when: ["boot", "hotplug"] - """), + """ + ), ], "frequency": frequency, } @@ -74,14 +77,14 @@ schema = { "boot-legacy", "boot", "hotplug", - ] - } + ], + }, } - } + }, } - } + }, } - } + }, } __doc__ = get_meta_doc(meta, schema) @@ -100,14 +103,15 @@ LABEL="cloudinit_end" def handle(_name, cfg, cloud, log, _args): validate_cloudconfig_schema(cfg, schema) network_hotplug_enabled = ( - 'updates' in cfg and - 'network' in cfg['updates'] and - 'when' in cfg['updates']['network'] and - 'hotplug' in cfg['updates']['network']['when'] + "updates" in cfg + and "network" in cfg["updates"] + and "when" in cfg["updates"]["network"] + and "hotplug" in cfg["updates"]["network"]["when"] ) hotplug_supported = EventType.HOTPLUG in ( - cloud.datasource.get_supported_events( - [EventType.HOTPLUG]).get(EventScope.NETWORK, set()) + cloud.datasource.get_supported_events([EventType.HOTPLUG]).get( + EventScope.NETWORK, set() + ) ) hotplug_enabled = stages.update_event_enabled( datasource=cloud.datasource, diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py index d72b5244..ab35e136 100644 --- a/cloudinit/config/cc_keys_to_console.py +++ b/cloudinit/config/cc_keys_to_console.py @@ -38,49 +38,53 @@ host keys are not written to console. import os +from cloudinit import subp, util from cloudinit.settings import PER_INSTANCE -from cloudinit import subp -from cloudinit import util frequency = PER_INSTANCE # This is a tool that cloud init provides -HELPER_TOOL_TPL = '%s/cloud-init/write-ssh-key-fingerprints' +HELPER_TOOL_TPL = "%s/cloud-init/write-ssh-key-fingerprints" def _get_helper_tool_path(distro): try: base_lib = distro.usr_lib_exec except AttributeError: - base_lib = '/usr/lib' + base_lib = "/usr/lib" return HELPER_TOOL_TPL % base_lib def handle(name, cfg, cloud, log, _args): if util.is_false(cfg.get("ssh", {}).get("emit_keys_to_console", True)): - log.debug(("Skipping module named %s, " - "logging of SSH host keys disabled"), name) + log.debug( + "Skipping module named %s, logging of SSH host keys disabled", name + ) return helper_path = _get_helper_tool_path(cloud.distro) if not os.path.exists(helper_path): - log.warning(("Unable to activate module %s," - " helper tool not found at %s"), name, helper_path) + log.warning( + "Unable to activate module %s, helper tool not found at %s", + name, + helper_path, + ) return - fp_blacklist = util.get_cfg_option_list(cfg, - "ssh_fp_console_blacklist", []) - key_blacklist = util.get_cfg_option_list(cfg, - "ssh_key_console_blacklist", - ["ssh-dss"]) + fp_blacklist = util.get_cfg_option_list( + cfg, "ssh_fp_console_blacklist", [] + ) + key_blacklist = util.get_cfg_option_list( + cfg, "ssh_key_console_blacklist", ["ssh-dss"] + ) try: - cmd = [helper_path, ','.join(fp_blacklist), ','.join(key_blacklist)] + cmd = [helper_path, ",".join(fp_blacklist), ",".join(key_blacklist)] (stdout, _stderr) = subp.subp(cmd) - util.multi_log("%s\n" % (stdout.strip()), - stderr=False, console=True) + util.multi_log("%s\n" % (stdout.strip()), stderr=False, console=True) except Exception: log.warning("Writing keys to the system console failed!") raise + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py index 299c4d01..03ebf411 100644 --- a/cloudinit/config/cc_landscape.py +++ b/cloudinit/config/cc_landscape.py @@ -60,10 +60,7 @@ from io import BytesIO from configobj import ConfigObj -from cloudinit import type_utils -from cloudinit import subp -from cloudinit import util - +from cloudinit import subp, type_utils, util from cloudinit.settings import PER_INSTANCE frequency = PER_INSTANCE @@ -71,15 +68,15 @@ frequency = PER_INSTANCE LSC_CLIENT_CFG_FILE = "/etc/landscape/client.conf" LS_DEFAULT_FILE = "/etc/default/landscape-client" -distros = ['ubuntu'] +distros = ["ubuntu"] # defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2 LSC_BUILTIN_CFG = { - 'client': { - 'log_level': "info", - 'url': "https://landscape.canonical.com/message-system", - 'ping_url': "http://landscape.canonical.com/ping", - 'data_path': "/var/lib/landscape/client", + "client": { + "log_level": "info", + "url": "https://landscape.canonical.com/message-system", + "ping_url": "http://landscape.canonical.com/ping", + "data_path": "/var/lib/landscape/client", } } @@ -97,11 +94,13 @@ def handle(_name, cfg, cloud, log, _args): raise RuntimeError( "'landscape' key existed in config, but not a dictionary type," " is a {_type} instead".format( - _type=type_utils.obj_name(ls_cloudcfg))) + _type=type_utils.obj_name(ls_cloudcfg) + ) + ) if not ls_cloudcfg: return - cloud.distro.install_packages(('landscape-client',)) + cloud.distro.install_packages(("landscape-client",)) merge_data = [ LSC_BUILTIN_CFG, @@ -135,4 +134,5 @@ def merge_together(objs): cfg.merge(ConfigObj(obj)) return cfg + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py index 7fed9abd..487f58f7 100644 --- a/cloudinit/config/cc_locale.py +++ b/cloudinit/config/cc_locale.py @@ -14,45 +14,48 @@ from cloudinit import util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE - frequency = PER_INSTANCE -distros = ['all'] +distros = ["all"] meta = { - 'id': 'cc_locale', - 'name': 'Locale', - 'title': 'Set system locale', - 'description': dedent( + "id": "cc_locale", + "name": "Locale", + "title": "Set system locale", + "description": dedent( """\ Configure the system locale and apply it system wide. By default use the locale specified by the datasource.""" ), - 'distros': distros, - 'examples': [ - dedent("""\ + "distros": distros, + "examples": [ + dedent( + """\ # Set the locale to ar_AE locale: ar_AE - """), - dedent("""\ + """ + ), + dedent( + """\ # Set the locale to fr_CA in /etc/alternate_path/locale locale: fr_CA locale_configfile: /etc/alternate_path/locale - """), + """ + ), ], - 'frequency': frequency, + "frequency": frequency, } schema = { - 'type': 'object', - 'properties': { - 'locale': { - 'type': 'string', - 'description': ( + "type": "object", + "properties": { + "locale": { + "type": "string", + "description": ( "The locale to set as the system's locale (e.g. ar_PS)" ), }, - 'locale_configfile': { - 'type': 'string', - 'description': ( + "locale_configfile": { + "type": "string", + "description": ( "The file in which to write the locale configuration (defaults" " to the distro's default location)" ), @@ -70,8 +73,9 @@ def handle(name, cfg, cloud, log, args): locale = util.get_cfg_option_str(cfg, "locale", cloud.get_locale()) if util.is_false(locale): - log.debug("Skipping module named %s, disabled by config: %s", - name, locale) + log.debug( + "Skipping module named %s, disabled by config: %s", name, locale + ) return validate_cloudconfig_schema(cfg, schema) @@ -80,4 +84,5 @@ def handle(name, cfg, cloud, log, args): locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile") cloud.distro.apply_locale(locale, locale_cfgfile) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py index 486037d9..13ddcbe9 100644 --- a/cloudinit/config/cc_lxd.py +++ b/cloudinit/config/cc_lxd.py @@ -47,12 +47,12 @@ lxd-bridge will be configured accordingly. domain: """ -from cloudinit import log as logging -from cloudinit import subp -from cloudinit import util import os -distros = ['ubuntu'] +from cloudinit import log as logging +from cloudinit import subp, util + +distros = ["ubuntu"] LOG = logging.getLogger(__name__) @@ -61,36 +61,42 @@ _DEFAULT_NETWORK_NAME = "lxdbr0" def handle(name, cfg, cloud, log, args): # Get config - lxd_cfg = cfg.get('lxd') + lxd_cfg = cfg.get("lxd") if not lxd_cfg: - log.debug("Skipping module named %s, not present or disabled by cfg", - name) + log.debug( + "Skipping module named %s, not present or disabled by cfg", name + ) return if not isinstance(lxd_cfg, dict): - log.warning("lxd config must be a dictionary. found a '%s'", - type(lxd_cfg)) + log.warning( + "lxd config must be a dictionary. found a '%s'", type(lxd_cfg) + ) return # Grab the configuration - init_cfg = lxd_cfg.get('init') + init_cfg = lxd_cfg.get("init") if not isinstance(init_cfg, dict): - log.warning("lxd/init config must be a dictionary. found a '%s'", - type(init_cfg)) + log.warning( + "lxd/init config must be a dictionary. found a '%s'", + type(init_cfg), + ) init_cfg = {} - bridge_cfg = lxd_cfg.get('bridge', {}) + bridge_cfg = lxd_cfg.get("bridge", {}) if not isinstance(bridge_cfg, dict): - log.warning("lxd/bridge config must be a dictionary. found a '%s'", - type(bridge_cfg)) + log.warning( + "lxd/bridge config must be a dictionary. found a '%s'", + type(bridge_cfg), + ) bridge_cfg = {} # Install the needed packages packages = [] if not subp.which("lxd"): - packages.append('lxd') + packages.append("lxd") - if init_cfg.get("storage_backend") == "zfs" and not subp.which('zfs'): - packages.append('zfsutils-linux') + if init_cfg.get("storage_backend") == "zfs" and not subp.which("zfs"): + packages.append("zfsutils-linux") if len(packages): try: @@ -102,23 +108,30 @@ def handle(name, cfg, cloud, log, args): # Set up lxd if init config is given if init_cfg: init_keys = ( - 'network_address', 'network_port', 'storage_backend', - 'storage_create_device', 'storage_create_loop', - 'storage_pool', 'trust_password') - subp.subp(['lxd', 'waitready', '--timeout=300']) - cmd = ['lxd', 'init', '--auto'] + "network_address", + "network_port", + "storage_backend", + "storage_create_device", + "storage_create_loop", + "storage_pool", + "trust_password", + ) + subp.subp(["lxd", "waitready", "--timeout=300"]) + cmd = ["lxd", "init", "--auto"] for k in init_keys: if init_cfg.get(k): - cmd.extend(["--%s=%s" % - (k.replace('_', '-'), str(init_cfg[k]))]) + cmd.extend( + ["--%s=%s" % (k.replace("_", "-"), str(init_cfg[k]))] + ) subp.subp(cmd) # Set up lxd-bridge if bridge config is given dconf_comm = "debconf-communicate" if bridge_cfg: net_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME) - if os.path.exists("/etc/default/lxd-bridge") \ - and subp.which(dconf_comm): + if os.path.exists("/etc/default/lxd-bridge") and subp.which( + dconf_comm + ): # Bridge configured through packaging debconf = bridge_to_debconf(bridge_cfg) @@ -126,39 +139,47 @@ def handle(name, cfg, cloud, log, args): # Update debconf database try: log.debug("Setting lxd debconf via " + dconf_comm) - data = "\n".join(["set %s %s" % (k, v) - for k, v in debconf.items()]) + "\n" - subp.subp(['debconf-communicate'], data) + data = ( + "\n".join( + ["set %s %s" % (k, v) for k, v in debconf.items()] + ) + + "\n" + ) + subp.subp(["debconf-communicate"], data) except Exception: - util.logexc(log, "Failed to run '%s' for lxd with" % - dconf_comm) + util.logexc( + log, "Failed to run '%s' for lxd with" % dconf_comm + ) # Remove the existing configuration file (forces re-generation) util.del_file("/etc/default/lxd-bridge") # Run reconfigure log.debug("Running dpkg-reconfigure for lxd") - subp.subp(['dpkg-reconfigure', 'lxd', - '--frontend=noninteractive']) + subp.subp(["dpkg-reconfigure", "lxd", "--frontend=noninteractive"]) else: # Built-in LXD bridge support cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg) maybe_cleanup_default( - net_name=net_name, did_init=bool(init_cfg), - create=bool(cmd_create), attach=bool(cmd_attach)) + net_name=net_name, + did_init=bool(init_cfg), + create=bool(cmd_create), + attach=bool(cmd_attach), + ) if cmd_create: - log.debug("Creating lxd bridge: %s" % - " ".join(cmd_create)) + log.debug("Creating lxd bridge: %s" % " ".join(cmd_create)) _lxc(cmd_create) if cmd_attach: - log.debug("Setting up default lxd bridge: %s" % - " ".join(cmd_attach)) + log.debug( + "Setting up default lxd bridge: %s" % " ".join(cmd_attach) + ) _lxc(cmd_attach) elif bridge_cfg: raise RuntimeError( - "Unable to configure lxd bridge without %s." + dconf_comm) + "Unable to configure lxd bridge without %s." + dconf_comm + ) def bridge_to_debconf(bridge_cfg): @@ -180,33 +201,32 @@ def bridge_to_debconf(bridge_cfg): if bridge_cfg.get("ipv4_address"): debconf["lxd/bridge-ipv4"] = "true" - debconf["lxd/bridge-ipv4-address"] = \ - bridge_cfg.get("ipv4_address") - debconf["lxd/bridge-ipv4-netmask"] = \ - bridge_cfg.get("ipv4_netmask") - debconf["lxd/bridge-ipv4-dhcp-first"] = \ - bridge_cfg.get("ipv4_dhcp_first") - debconf["lxd/bridge-ipv4-dhcp-last"] = \ - bridge_cfg.get("ipv4_dhcp_last") - debconf["lxd/bridge-ipv4-dhcp-leases"] = \ - bridge_cfg.get("ipv4_dhcp_leases") - debconf["lxd/bridge-ipv4-nat"] = \ - bridge_cfg.get("ipv4_nat", "true") + debconf["lxd/bridge-ipv4-address"] = bridge_cfg.get("ipv4_address") + debconf["lxd/bridge-ipv4-netmask"] = bridge_cfg.get("ipv4_netmask") + debconf["lxd/bridge-ipv4-dhcp-first"] = bridge_cfg.get( + "ipv4_dhcp_first" + ) + debconf["lxd/bridge-ipv4-dhcp-last"] = bridge_cfg.get( + "ipv4_dhcp_last" + ) + debconf["lxd/bridge-ipv4-dhcp-leases"] = bridge_cfg.get( + "ipv4_dhcp_leases" + ) + debconf["lxd/bridge-ipv4-nat"] = bridge_cfg.get("ipv4_nat", "true") if bridge_cfg.get("ipv6_address"): debconf["lxd/bridge-ipv6"] = "true" - debconf["lxd/bridge-ipv6-address"] = \ - bridge_cfg.get("ipv6_address") - debconf["lxd/bridge-ipv6-netmask"] = \ - bridge_cfg.get("ipv6_netmask") - debconf["lxd/bridge-ipv6-nat"] = \ - bridge_cfg.get("ipv6_nat", "false") + debconf["lxd/bridge-ipv6-address"] = bridge_cfg.get("ipv6_address") + debconf["lxd/bridge-ipv6-netmask"] = bridge_cfg.get("ipv6_netmask") + debconf["lxd/bridge-ipv6-nat"] = bridge_cfg.get( + "ipv6_nat", "false" + ) if bridge_cfg.get("domain"): debconf["lxd/bridge-domain"] = bridge_cfg.get("domain") else: - raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode")) + raise Exception('invalid bridge mode "%s"' % bridge_cfg.get("mode")) return debconf @@ -217,37 +237,41 @@ def bridge_to_cmd(bridge_cfg): bridge_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME) cmd_create = [] - cmd_attach = ["network", "attach-profile", bridge_name, - "default", "eth0"] + cmd_attach = ["network", "attach-profile", bridge_name, "default", "eth0"] if bridge_cfg.get("mode") == "existing": return None, cmd_attach if bridge_cfg.get("mode") != "new": - raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode")) + raise Exception('invalid bridge mode "%s"' % bridge_cfg.get("mode")) cmd_create = ["network", "create", bridge_name] if bridge_cfg.get("ipv4_address") and bridge_cfg.get("ipv4_netmask"): - cmd_create.append("ipv4.address=%s/%s" % - (bridge_cfg.get("ipv4_address"), - bridge_cfg.get("ipv4_netmask"))) + cmd_create.append( + "ipv4.address=%s/%s" + % (bridge_cfg.get("ipv4_address"), bridge_cfg.get("ipv4_netmask")) + ) if bridge_cfg.get("ipv4_nat", "true") == "true": cmd_create.append("ipv4.nat=true") - if bridge_cfg.get("ipv4_dhcp_first") and \ - bridge_cfg.get("ipv4_dhcp_last"): - dhcp_range = "%s-%s" % (bridge_cfg.get("ipv4_dhcp_first"), - bridge_cfg.get("ipv4_dhcp_last")) + if bridge_cfg.get("ipv4_dhcp_first") and bridge_cfg.get( + "ipv4_dhcp_last" + ): + dhcp_range = "%s-%s" % ( + bridge_cfg.get("ipv4_dhcp_first"), + bridge_cfg.get("ipv4_dhcp_last"), + ) cmd_create.append("ipv4.dhcp.ranges=%s" % dhcp_range) else: cmd_create.append("ipv4.address=none") if bridge_cfg.get("ipv6_address") and bridge_cfg.get("ipv6_netmask"): - cmd_create.append("ipv6.address=%s/%s" % - (bridge_cfg.get("ipv6_address"), - bridge_cfg.get("ipv6_netmask"))) + cmd_create.append( + "ipv6.address=%s/%s" + % (bridge_cfg.get("ipv6_address"), bridge_cfg.get("ipv6_netmask")) + ) if bridge_cfg.get("ipv6_nat", "false") == "true": cmd_create.append("ipv6.nat=true") @@ -262,14 +286,17 @@ def bridge_to_cmd(bridge_cfg): def _lxc(cmd): - env = {'LC_ALL': 'C', - 'HOME': os.environ.get('HOME', '/root'), - 'USER': os.environ.get('USER', 'root')} - subp.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env) + env = { + "LC_ALL": "C", + "HOME": os.environ.get("HOME", "/root"), + "USER": os.environ.get("USER", "root"), + } + subp.subp(["lxc"] + list(cmd) + ["--force-local"], update_env=env) -def maybe_cleanup_default(net_name, did_init, create, attach, - profile="default", nic_name="eth0"): +def maybe_cleanup_default( + net_name, did_init, create, attach, profile="default", nic_name="eth0" +): """Newer versions of lxc (3.0.1+) create a lxdbr0 network when 'lxd init --auto' is run. Older versions did not. @@ -306,4 +333,5 @@ def maybe_cleanup_default(net_name, did_init, create, attach, raise e LOG.debug(msg, nic_name, profile, fail_assume_enoent) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py index 41ea4fc9..1b0158ec 100644 --- a/cloudinit/config/cc_mcollective.py +++ b/cloudinit/config/cc_mcollective.py @@ -56,18 +56,21 @@ import io from configobj import ConfigObj from cloudinit import log as logging -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem" PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem" -SERVER_CFG = '/etc/mcollective/server.cfg' +SERVER_CFG = "/etc/mcollective/server.cfg" LOG = logging.getLogger(__name__) -def configure(config, server_cfg=SERVER_CFG, - pubcert_file=PUBCERT_FILE, pricert_file=PRICERT_FILE): +def configure( + config, + server_cfg=SERVER_CFG, + pubcert_file=PUBCERT_FILE, + pricert_file=PRICERT_FILE, +): # Read server.cfg (if it exists) values from the # original file in order to be able to mix the rest up. try: @@ -77,20 +80,20 @@ def configure(config, server_cfg=SERVER_CFG, if e.errno != errno.ENOENT: raise else: - LOG.debug("Did not find file %s (starting with an empty" - " config)", server_cfg) + LOG.debug( + "Did not find file %s (starting with an empty config)", + server_cfg, + ) mcollective_config = ConfigObj() for (cfg_name, cfg) in config.items(): - if cfg_name == 'public-cert': + if cfg_name == "public-cert": util.write_file(pubcert_file, cfg, mode=0o644) - mcollective_config[ - 'plugin.ssl_server_public'] = pubcert_file - mcollective_config['securityprovider'] = 'ssl' - elif cfg_name == 'private-cert': + mcollective_config["plugin.ssl_server_public"] = pubcert_file + mcollective_config["securityprovider"] = "ssl" + elif cfg_name == "private-cert": util.write_file(pricert_file, cfg, mode=0o600) - mcollective_config[ - 'plugin.ssl_server_private'] = pricert_file - mcollective_config['securityprovider'] = 'ssl' + mcollective_config["plugin.ssl_server_private"] = pricert_file + mcollective_config["securityprovider"] = "ssl" else: if isinstance(cfg, str): # Just set it in the 'main' section @@ -126,21 +129,24 @@ def configure(config, server_cfg=SERVER_CFG, def handle(name, cfg, cloud, log, _args): # If there isn't a mcollective key in the configuration don't do anything - if 'mcollective' not in cfg: - log.debug(("Skipping module named %s, " - "no 'mcollective' key in configuration"), name) + if "mcollective" not in cfg: + log.debug( + "Skipping module named %s, no 'mcollective' key in configuration", + name, + ) return - mcollective_cfg = cfg['mcollective'] + mcollective_cfg = cfg["mcollective"] # Start by installing the mcollective package ... cloud.distro.install_packages(("mcollective",)) # ... and then update the mcollective configuration - if 'conf' in mcollective_cfg: - configure(config=mcollective_cfg['conf']) + if "conf" in mcollective_cfg: + configure(config=mcollective_cfg["conf"]) # restart mcollective to handle updated config - subp.subp(['service', 'mcollective', 'restart'], capture=False) + subp.subp(["service", "mcollective", "restart"], capture=False) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py index 79bcc27d..4fafb4af 100644 --- a/cloudinit/config/cc_migrator.py +++ b/cloudinit/config/cc_migrator.py @@ -29,16 +29,14 @@ false`` in config. import os import shutil -from cloudinit import helpers -from cloudinit import util - +from cloudinit import helpers, util from cloudinit.settings import PER_ALWAYS frequency = PER_ALWAYS def _migrate_canon_sems(cloud): - paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem')) + paths = (cloud.paths.get_ipath("sem"), cloud.paths.get_cpath("sem")) am_adjusted = 0 for sem_path in paths: if not sem_path or not os.path.exists(sem_path): @@ -57,12 +55,12 @@ def _migrate_canon_sems(cloud): def _migrate_legacy_sems(cloud, log): legacy_adjust = { - 'apt-update-upgrade': [ - 'apt-configure', - 'package-update-upgrade-install', + "apt-update-upgrade": [ + "apt-configure", + "package-update-upgrade-install", ], } - paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem')) + paths = (cloud.paths.get_ipath("sem"), cloud.paths.get_cpath("sem")) for sem_path in paths: if not sem_path or not os.path.exists(sem_path): continue @@ -78,8 +76,9 @@ def _migrate_legacy_sems(cloud, log): util.del_file(os.path.join(sem_path, p)) (_name, freq) = os.path.splitext(p) for m in migrate_to: - log.debug("Migrating %s => %s with the same frequency", - p, m) + log.debug( + "Migrating %s => %s with the same frequency", p, m + ) with sem_helper.lock(m, freq): pass @@ -90,8 +89,10 @@ def handle(name, cfg, cloud, log, _args): log.debug("Skipping module named %s, migration disabled", name) return sems_moved = _migrate_canon_sems(cloud) - log.debug("Migrated %s semaphore files to there canonicalized names", - sems_moved) + log.debug( + "Migrated %s semaphore files to there canonicalized names", sems_moved + ) _migrate_legacy_sems(cloud, log) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index eeb008d2..ec2e46ff 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -62,15 +62,12 @@ swap file is created. maxsize: """ -from string import whitespace - import logging import os import re +from string import whitespace -from cloudinit import type_utils -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, type_utils, util # Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0 DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$" @@ -105,21 +102,25 @@ def is_network_device(name): def _get_nth_partition_for_device(device_path, partition_number): - potential_suffixes = [str(partition_number), 'p%s' % (partition_number,), - '-part%s' % (partition_number,)] + potential_suffixes = [ + str(partition_number), + "p%s" % (partition_number,), + "-part%s" % (partition_number,), + ] for suffix in potential_suffixes: - potential_partition_device = '%s%s' % (device_path, suffix) + potential_partition_device = "%s%s" % (device_path, suffix) if os.path.exists(potential_partition_device): return potential_partition_device return None def _is_block_device(device_path, partition_path=None): - device_name = os.path.realpath(device_path).split('/')[-1] - sys_path = os.path.join('/sys/block/', device_name) + device_name = os.path.realpath(device_path).split("/")[-1] + sys_path = os.path.join("/sys/block/", device_name) if partition_path is not None: sys_path = os.path.join( - sys_path, os.path.realpath(partition_path).split('/')[-1]) + sys_path, os.path.realpath(partition_path).split("/")[-1] + ) return os.path.exists(sys_path) @@ -159,8 +160,9 @@ def sanitize_devname(startname, transformer, log, aliases=None): if partition_number is None: partition_path = _get_nth_partition_for_device(device_path, 1) else: - partition_path = _get_nth_partition_for_device(device_path, - partition_number) + partition_path = _get_nth_partition_for_device( + device_path, partition_number + ) if partition_path is None: return None @@ -174,12 +176,12 @@ def sanitize_devname(startname, transformer, log, aliases=None): def suggested_swapsize(memsize=None, maxsize=None, fsys=None): # make a suggestion on the size of swap for this system. if memsize is None: - memsize = util.read_meminfo()['total'] + memsize = util.read_meminfo()["total"] GB = 2 ** 30 sugg_max = 8 * GB - info = {'avail': 'na', 'max_in': maxsize, 'mem': memsize} + info = {"avail": "na", "max_in": maxsize, "mem": memsize} if fsys is None and maxsize is None: # set max to 8GB default if no filesystem given @@ -187,18 +189,18 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None): elif fsys: statvfs = os.statvfs(fsys) avail = statvfs.f_frsize * statvfs.f_bfree - info['avail'] = avail + info["avail"] = avail if maxsize is None: # set to 25% of filesystem space maxsize = min(int(avail / 4), sugg_max) - elif maxsize > ((avail * .9)): + elif maxsize > ((avail * 0.9)): # set to 90% of available disk space - maxsize = int(avail * .9) + maxsize = int(avail * 0.9) elif maxsize is None: maxsize = sugg_max - info['max'] = maxsize + info["max"] = maxsize formulas = [ # < 1G: swap = double memory @@ -226,7 +228,7 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None): if size is not None: size = maxsize - info['size'] = size + info["size"] = size MB = 2 ** 20 pinfo = {} @@ -236,9 +238,14 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None): else: pinfo[k] = v - LOG.debug("suggest %s swap for %s memory with '%s'" - " disk given max=%s [max=%s]'", pinfo['size'], pinfo['mem'], - pinfo['avail'], pinfo['max_in'], pinfo['max']) + LOG.debug( + "suggest %s swap for %s memory with '%s' disk given max=%s [max=%s]'", + pinfo["size"], + pinfo["mem"], + pinfo["avail"], + pinfo["max_in"], + pinfo["max"], + ) return size @@ -248,14 +255,23 @@ def create_swapfile(fname: str, size: str) -> None: errmsg = "Failed to create swapfile '%s' of size %sMB via %s: %s" def create_swap(fname, size, method): - LOG.debug("Creating swapfile in '%s' on fstype '%s' using '%s'", - fname, fstype, method) + LOG.debug( + "Creating swapfile in '%s' on fstype '%s' using '%s'", + fname, + fstype, + method, + ) if method == "fallocate": - cmd = ['fallocate', '-l', '%sM' % size, fname] + cmd = ["fallocate", "-l", "%sM" % size, fname] elif method == "dd": - cmd = ['dd', 'if=/dev/zero', 'of=%s' % fname, 'bs=1M', - 'count=%s' % size] + cmd = [ + "dd", + "if=/dev/zero", + "of=%s" % fname, + "bs=1M", + "count=%s" % size, + ] try: subp.subp(cmd, capture=True) @@ -269,8 +285,9 @@ def create_swapfile(fname: str, size: str) -> None: fstype = util.get_mount_info(swap_dir)[1] - if (fstype == "xfs" and - util.kernel_version() < (4, 18)) or fstype == "btrfs": + if ( + fstype == "xfs" and util.kernel_version() < (4, 18) + ) or fstype == "btrfs": create_swap(fname, size, "dd") else: try: @@ -282,7 +299,7 @@ def create_swapfile(fname: str, size: str) -> None: if os.path.exists(fname): util.chmod(fname, 0o600) try: - subp.subp(['mkswap', fname]) + subp.subp(["mkswap", fname]) except subp.ProcessExecutionError: util.del_file(fname) raise @@ -297,37 +314,42 @@ def setup_swapfile(fname, size=None, maxsize=None): swap_dir = os.path.dirname(fname) if str(size).lower() == "auto": try: - memsize = util.read_meminfo()['total'] + memsize = util.read_meminfo()["total"] except IOError: LOG.debug("Not creating swap: failed to read meminfo") return util.ensure_dir(swap_dir) - size = suggested_swapsize(fsys=swap_dir, maxsize=maxsize, - memsize=memsize) + size = suggested_swapsize( + fsys=swap_dir, maxsize=maxsize, memsize=memsize + ) mibsize = str(int(size / (2 ** 20))) if not size: LOG.debug("Not creating swap: suggested size was 0") return - util.log_time(LOG.debug, msg="Setting up swap file", func=create_swapfile, - args=[fname, mibsize]) + util.log_time( + LOG.debug, + msg="Setting up swap file", + func=create_swapfile, + args=[fname, mibsize], + ) return fname def handle_swapcfg(swapcfg): """handle the swap config, calling setup_swap if necessary. - return None or (filename, size) + return None or (filename, size) """ if not isinstance(swapcfg, dict): LOG.warning("input for swap config was not a dict.") return None - fname = swapcfg.get('filename', '/swap.img') - size = swapcfg.get('size', 0) - maxsize = swapcfg.get('maxsize', None) + fname = swapcfg.get("filename", "/swap.img") + size = swapcfg.get("size", 0) + maxsize = swapcfg.get("maxsize", None) if not (size and fname): LOG.debug("no need to setup swap") @@ -335,8 +357,10 @@ def handle_swapcfg(swapcfg): if os.path.exists(fname): if not os.path.exists("/proc/swaps"): - LOG.debug("swap file %s exists, but no /proc/swaps exists, " - "being safe", fname) + LOG.debug( + "swap file %s exists, but no /proc/swaps exists, being safe", + fname, + ) return fname try: for line in util.load_file("/proc/swaps").splitlines(): @@ -345,8 +369,9 @@ def handle_swapcfg(swapcfg): return fname LOG.debug("swap file %s exists, but not in /proc/swaps", fname) except Exception: - LOG.warning("swap file %s exists. Error reading /proc/swaps", - fname) + LOG.warning( + "swap file %s exists. Error reading /proc/swaps", fname + ) return fname try: @@ -373,8 +398,10 @@ def handle(_name, cfg, cloud, log, _args): defvals = cfg.get("mount_default_fields", defvals) # these are our default set of mounts - defmnts = [["ephemeral0", "/mnt", "auto", defvals[3], "0", "2"], - ["swap", "none", "swap", "sw", "0", "0"]] + defmnts = [ + ["ephemeral0", "/mnt", "auto", defvals[3], "0", "2"], + ["swap", "none", "swap", "sw", "0", "0"], + ] cfgmnt = [] if "mounts" in cfg: @@ -404,13 +431,17 @@ def handle(_name, cfg, cloud, log, _args): for i in range(len(cfgmnt)): # skip something that wasn't a list if not isinstance(cfgmnt[i], list): - log.warning("Mount option %s not a list, got a %s instead", - (i + 1), type_utils.obj_name(cfgmnt[i])) + log.warning( + "Mount option %s not a list, got a %s instead", + (i + 1), + type_utils.obj_name(cfgmnt[i]), + ) continue start = str(cfgmnt[i][0]) - sanitized = sanitize_devname(start, cloud.device_name_to_device, log, - aliases=device_aliases) + sanitized = sanitize_devname( + start, cloud.device_name_to_device, log, aliases=device_aliases + ) if sanitized != start: log.debug("changed %s => %s" % (start, sanitized)) @@ -418,8 +449,11 @@ def handle(_name, cfg, cloud, log, _args): log.debug("Ignoring nonexistent named mount %s", start) continue elif sanitized in fstab_devs: - log.info("Device %s already defined in fstab: %s", - sanitized, fstab_devs[sanitized]) + log.info( + "Device %s already defined in fstab: %s", + sanitized, + fstab_devs[sanitized], + ) continue cfgmnt[i][0] = sanitized @@ -452,8 +486,9 @@ def handle(_name, cfg, cloud, log, _args): # entry has the same device name for defmnt in defmnts: start = defmnt[0] - sanitized = sanitize_devname(start, cloud.device_name_to_device, log, - aliases=device_aliases) + sanitized = sanitize_devname( + start, cloud.device_name_to_device, log, aliases=device_aliases + ) if sanitized != start: log.debug("changed default device %s => %s" % (start, sanitized)) @@ -461,8 +496,11 @@ def handle(_name, cfg, cloud, log, _args): log.debug("Ignoring nonexistent default named mount %s", start) continue elif sanitized in fstab_devs: - log.debug("Device %s already defined in fstab: %s", - sanitized, fstab_devs[sanitized]) + log.debug( + "Device %s already defined in fstab: %s", + sanitized, + fstab_devs[sanitized], + ) continue defmnt[0] = sanitized @@ -474,8 +512,7 @@ def handle(_name, cfg, cloud, log, _args): break if cfgmnt_has: - log.debug(("Not including %s, already" - " previously included"), start) + log.debug("Not including %s, already previously included", start) continue cfgmnt.append(defmnt) @@ -488,7 +525,7 @@ def handle(_name, cfg, cloud, log, _args): else: actlist.append(x) - swapret = handle_swapcfg(cfg.get('swap', {})) + swapret = handle_swapcfg(cfg.get("swap", {})) if swapret: actlist.append([swapret, "none", "swap", "sw", "0", "0"]) @@ -507,10 +544,11 @@ def handle(_name, cfg, cloud, log, _args): needswap = True if line[1].startswith("/"): dirs.append(line[1]) - cc_lines.append('\t'.join(line)) + cc_lines.append("\t".join(line)) - mount_points = [v['mountpoint'] for k, v in util.mounts().items() - if 'mountpoint' in v] + mount_points = [ + v["mountpoint"] for k, v in util.mounts().items() if "mountpoint" in v + ] for d in dirs: try: util.ensure_dir(d) @@ -525,11 +563,12 @@ def handle(_name, cfg, cloud, log, _args): sadds = [WS.sub(" ", n) for n in cc_lines] sdrops = [WS.sub(" ", n) for n in fstab_removed] - sops = (["- " + drop for drop in sdrops if drop not in sadds] + - ["+ " + add for add in sadds if add not in sdrops]) + sops = ["- " + drop for drop in sdrops if drop not in sadds] + [ + "+ " + add for add in sadds if add not in sdrops + ] fstab_lines.extend(cc_lines) - contents = "%s\n" % ('\n'.join(fstab_lines)) + contents = "%s\n" % "\n".join(fstab_lines) util.write_file(FSTAB_PATH, contents) activate_cmds = [] @@ -549,7 +588,7 @@ def handle(_name, cfg, cloud, log, _args): fmt = "Activating swap and mounts with: %s" for cmd in activate_cmds: - fmt = "Activate mounts: %s:" + ' '.join(cmd) + fmt = "Activate mounts: %s:" + " ".join(cmd) try: subp.subp(cmd) log.debug(fmt, "PASS") @@ -557,4 +596,5 @@ def handle(_name, cfg, cloud, log, _args): log.warning(fmt, "FAIL") util.logexc(log, fmt, "FAIL") + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index c55d5d86..a31da9bb 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -11,124 +11,132 @@ import os from textwrap import dedent from cloudinit import log as logging -from cloudinit import temp_utils -from cloudinit import templater -from cloudinit import type_utils -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, temp_utils, templater, type_utils, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) frequency = PER_INSTANCE -NTP_CONF = '/etc/ntp.conf' +NTP_CONF = "/etc/ntp.conf" NR_POOL_SERVERS = 4 -distros = ['almalinux', 'alpine', 'centos', 'cloudlinux', 'debian', - 'eurolinux', 'fedora', 'miraclelinux', 'openEuler', 'opensuse', - 'photon', 'rhel', 'rocky', 'sles', 'ubuntu', 'virtuozzo'] +distros = [ + "almalinux", + "alpine", + "centos", + "cloudlinux", + "debian", + "eurolinux", + "fedora", + "miraclelinux", + "openEuler", + "opensuse", + "photon", + "rhel", + "rocky", + "sles", + "ubuntu", + "virtuozzo", +] NTP_CLIENT_CONFIG = { - 'chrony': { - 'check_exe': 'chronyd', - 'confpath': '/etc/chrony.conf', - 'packages': ['chrony'], - 'service_name': 'chrony', - 'template_name': 'chrony.conf.{distro}', - 'template': None, + "chrony": { + "check_exe": "chronyd", + "confpath": "/etc/chrony.conf", + "packages": ["chrony"], + "service_name": "chrony", + "template_name": "chrony.conf.{distro}", + "template": None, }, - 'ntp': { - 'check_exe': 'ntpd', - 'confpath': NTP_CONF, - 'packages': ['ntp'], - 'service_name': 'ntp', - 'template_name': 'ntp.conf.{distro}', - 'template': None, + "ntp": { + "check_exe": "ntpd", + "confpath": NTP_CONF, + "packages": ["ntp"], + "service_name": "ntp", + "template_name": "ntp.conf.{distro}", + "template": None, }, - 'ntpdate': { - 'check_exe': 'ntpdate', - 'confpath': NTP_CONF, - 'packages': ['ntpdate'], - 'service_name': 'ntpdate', - 'template_name': 'ntp.conf.{distro}', - 'template': None, + "ntpdate": { + "check_exe": "ntpdate", + "confpath": NTP_CONF, + "packages": ["ntpdate"], + "service_name": "ntpdate", + "template_name": "ntp.conf.{distro}", + "template": None, }, - 'systemd-timesyncd': { - 'check_exe': '/lib/systemd/systemd-timesyncd', - 'confpath': '/etc/systemd/timesyncd.conf.d/cloud-init.conf', - 'packages': [], - 'service_name': 'systemd-timesyncd', - 'template_name': 'timesyncd.conf', - 'template': None, + "systemd-timesyncd": { + "check_exe": "/lib/systemd/systemd-timesyncd", + "confpath": "/etc/systemd/timesyncd.conf.d/cloud-init.conf", + "packages": [], + "service_name": "systemd-timesyncd", + "template_name": "timesyncd.conf", + "template": None, }, } # This is Distro-specific configuration overrides of the base config DISTRO_CLIENT_CONFIG = { - 'alpine': { - 'chrony': { - 'confpath': '/etc/chrony/chrony.conf', - 'service_name': 'chronyd', + "alpine": { + "chrony": { + "confpath": "/etc/chrony/chrony.conf", + "service_name": "chronyd", }, - 'ntp': { - 'confpath': '/etc/ntp.conf', - 'packages': [], - 'service_name': 'ntpd', + "ntp": { + "confpath": "/etc/ntp.conf", + "packages": [], + "service_name": "ntpd", }, }, - 'debian': { - 'chrony': { - 'confpath': '/etc/chrony/chrony.conf', + "debian": { + "chrony": { + "confpath": "/etc/chrony/chrony.conf", }, }, - 'opensuse': { - 'chrony': { - 'service_name': 'chronyd', + "opensuse": { + "chrony": { + "service_name": "chronyd", }, - 'ntp': { - 'confpath': '/etc/ntp.conf', - 'service_name': 'ntpd', + "ntp": { + "confpath": "/etc/ntp.conf", + "service_name": "ntpd", }, - 'systemd-timesyncd': { - 'check_exe': '/usr/lib/systemd/systemd-timesyncd', + "systemd-timesyncd": { + "check_exe": "/usr/lib/systemd/systemd-timesyncd", }, }, - 'photon': { - 'chrony': { - 'service_name': 'chronyd', + "photon": { + "chrony": { + "service_name": "chronyd", }, - 'ntp': { - 'service_name': 'ntpd', - 'confpath': '/etc/ntp.conf' - }, - 'systemd-timesyncd': { - 'check_exe': '/usr/lib/systemd/systemd-timesyncd', - 'confpath': '/etc/systemd/timesyncd.conf', + "ntp": {"service_name": "ntpd", "confpath": "/etc/ntp.conf"}, + "systemd-timesyncd": { + "check_exe": "/usr/lib/systemd/systemd-timesyncd", + "confpath": "/etc/systemd/timesyncd.conf", }, }, - 'rhel': { - 'ntp': { - 'service_name': 'ntpd', + "rhel": { + "ntp": { + "service_name": "ntpd", }, - 'chrony': { - 'service_name': 'chronyd', + "chrony": { + "service_name": "chronyd", }, }, - 'sles': { - 'chrony': { - 'service_name': 'chronyd', + "sles": { + "chrony": { + "service_name": "chronyd", }, - 'ntp': { - 'confpath': '/etc/ntp.conf', - 'service_name': 'ntpd', + "ntp": { + "confpath": "/etc/ntp.conf", + "service_name": "ntpd", }, - 'systemd-timesyncd': { - 'check_exe': '/usr/lib/systemd/systemd-timesyncd', + "systemd-timesyncd": { + "check_exe": "/usr/lib/systemd/systemd-timesyncd", }, }, - 'ubuntu': { - 'chrony': { - 'confpath': '/etc/chrony/chrony.conf', + "ubuntu": { + "chrony": { + "confpath": "/etc/chrony/chrony.conf", }, }, } @@ -141,10 +149,11 @@ DISTRO_CLIENT_CONFIG = { # configuration. meta = { - 'id': 'cc_ntp', - 'name': 'NTP', - 'title': 'enable and configure ntp', - 'description': dedent("""\ + "id": "cc_ntp", + "name": "NTP", + "title": "enable and configure ntp", + "description": dedent( + """\ Handle ntp configuration. If ntp is not installed on the system and ntp configuration is specified, ntp will be installed. If there is a default ntp config file in the image or one is present in the @@ -152,16 +161,20 @@ meta = { appended to the filename before any changes are made. A list of ntp pools and ntp servers can be provided under the ``ntp`` config key. If no ntp ``servers`` or ``pools`` are provided, 4 pools will be used - in the format ``{0-3}.{distro}.pool.ntp.org``."""), - 'distros': distros, - 'examples': [ - dedent("""\ + in the format ``{0-3}.{distro}.pool.ntp.org``.""" + ), + "distros": distros, + "examples": [ + dedent( + """\ # Override ntp with chrony configuration on Ubuntu ntp: enabled: true ntp_client: chrony # Uses cloud-init default chrony configuration - """), - dedent("""\ + """ + ), + dedent( + """\ # Provide a custom ntp client configuration ntp: enabled: true @@ -188,120 +201,137 @@ meta = { servers: - ntp.server.local - ntp.ubuntu.com - - 192.168.23.2""")], - 'frequency': PER_INSTANCE, + - 192.168.23.2""" + ), + ], + "frequency": PER_INSTANCE, } schema = { - 'type': 'object', - 'properties': { - 'ntp': { - 'type': ['object', 'null'], - 'properties': { - 'pools': { - 'type': 'array', - 'items': { - 'type': 'string', - 'format': 'hostname' - }, - 'uniqueItems': True, - 'description': dedent("""\ + "type": "object", + "properties": { + "ntp": { + "type": ["object", "null"], + "properties": { + "pools": { + "type": "array", + "items": {"type": "string", "format": "hostname"}, + "uniqueItems": True, + "description": dedent( + """\ List of ntp pools. If both pools and servers are empty, 4 default pool servers will be provided of the format ``{0-3}.{distro}.pool.ntp.org``. NOTE: for Alpine Linux when using the Busybox NTP client this setting will be ignored due to the limited - functionality of Busybox's ntpd.""") + functionality of Busybox's ntpd.""" + ), }, - 'servers': { - 'type': 'array', - 'items': { - 'type': 'string', - 'format': 'hostname' - }, - 'uniqueItems': True, - 'description': dedent("""\ + "servers": { + "type": "array", + "items": {"type": "string", "format": "hostname"}, + "uniqueItems": True, + "description": dedent( + """\ List of ntp servers. If both pools and servers are empty, 4 default pool servers will be provided with - the format ``{0-3}.{distro}.pool.ntp.org``.""") + the format ``{0-3}.{distro}.pool.ntp.org``.""" + ), }, - 'ntp_client': { - 'type': 'string', - 'default': 'auto', - 'description': dedent("""\ + "ntp_client": { + "type": "string", + "default": "auto", + "description": dedent( + """\ Name of an NTP client to use to configure system NTP. When unprovided or 'auto' the default client preferred by the distribution will be used. The following built-in client names can be used to override existing configuration defaults: chrony, ntp, ntpdate, - systemd-timesyncd."""), + systemd-timesyncd.""" + ), }, - 'enabled': { - 'type': 'boolean', - 'default': True, - 'description': dedent("""\ + "enabled": { + "type": "boolean", + "default": True, + "description": dedent( + """\ Attempt to enable ntp clients if set to True. If set to False, ntp client will not be configured or - installed"""), + installed""" + ), }, - 'config': { - 'description': dedent("""\ + "config": { + "description": dedent( + """\ Configuration settings or overrides for the - ``ntp_client`` specified."""), - 'type': ['object'], - 'properties': { - 'confpath': { - 'type': 'string', - 'description': dedent("""\ + ``ntp_client`` specified.""" + ), + "type": ["object"], + "properties": { + "confpath": { + "type": "string", + "description": dedent( + """\ The path to where the ``ntp_client`` - configuration is written."""), + configuration is written.""" + ), }, - 'check_exe': { - 'type': 'string', - 'description': dedent("""\ + "check_exe": { + "type": "string", + "description": dedent( + """\ The executable name for the ``ntp_client``. For example, ntp service ``check_exe`` is - 'ntpd' because it runs the ntpd binary."""), + 'ntpd' because it runs the ntpd binary.""" + ), }, - 'packages': { - 'type': 'array', - 'items': { - 'type': 'string', + "packages": { + "type": "array", + "items": { + "type": "string", }, - 'uniqueItems': True, - 'description': dedent("""\ + "uniqueItems": True, + "description": dedent( + """\ List of packages needed to be installed for the - selected ``ntp_client``."""), + selected ``ntp_client``.""" + ), }, - 'service_name': { - 'type': 'string', - 'description': dedent("""\ + "service_name": { + "type": "string", + "description": dedent( + """\ The systemd or sysvinit service name used to start and stop the ``ntp_client`` - service."""), + service.""" + ), }, - 'template': { - 'type': 'string', - 'description': dedent("""\ + "template": { + "type": "string", + "description": dedent( + """\ Inline template allowing users to define their own ``ntp_client`` configuration template. The value must start with '## template:jinja' to enable use of templating support. - """), + """ + ), }, }, # Don't use REQUIRED_NTP_CONFIG_KEYS to allow for override # of builtin client values. - 'minProperties': 1, # If we have config, define something - 'additionalProperties': False + "minProperties": 1, # If we have config, define something + "additionalProperties": False, }, }, - 'additionalProperties': False + "additionalProperties": False, } - } + }, } -REQUIRED_NTP_CONFIG_KEYS = frozenset([ - 'check_exe', 'confpath', 'packages', 'service_name']) +REQUIRED_NTP_CONFIG_KEYS = frozenset( + ["check_exe", "confpath", "packages", "service_name"] +) __doc__ = get_meta_doc(meta, schema) # Supplement python help() @@ -334,21 +364,23 @@ def select_ntp_client(ntp_client, distro): distro_cfg = distro_ntp_client_configs(distro.name) # user specified client, return its config - if ntp_client and ntp_client != 'auto': - LOG.debug('Selected NTP client "%s" via user-data configuration', - ntp_client) + if ntp_client and ntp_client != "auto": + LOG.debug( + 'Selected NTP client "%s" via user-data configuration', ntp_client + ) return distro_cfg.get(ntp_client, {}) # default to auto if unset in distro - distro_ntp_client = distro.get_option('ntp_client', 'auto') + distro_ntp_client = distro.get_option("ntp_client", "auto") clientcfg = {} if distro_ntp_client == "auto": for client in distro.preferred_ntp_clients: cfg = distro_cfg.get(client) - if subp.which(cfg.get('check_exe')): - LOG.debug('Selected NTP client "%s", already installed', - client) + if subp.which(cfg.get("check_exe")): + LOG.debug( + 'Selected NTP client "%s", already installed', client + ) clientcfg = cfg break @@ -356,11 +388,14 @@ def select_ntp_client(ntp_client, distro): client = distro.preferred_ntp_clients[0] LOG.debug( 'Selected distro preferred NTP client "%s", not yet installed', - client) + client, + ) clientcfg = distro_cfg.get(client) else: - LOG.debug('Selected NTP client "%s" via distro system config', - distro_ntp_client) + LOG.debug( + 'Selected NTP client "%s" via distro system config', + distro_ntp_client, + ) clientcfg = distro_cfg.get(distro_ntp_client, {}) return clientcfg @@ -378,7 +413,7 @@ def install_ntp_client(install_func, packages=None, check_exe="ntpd"): if subp.which(check_exe): return if packages is None: - packages = ['ntp'] + packages = ["ntp"] install_func(packages) @@ -403,25 +438,34 @@ def generate_server_names(distro): names = [] pool_distro = distro - if distro == 'sles': + if distro == "sles": # For legal reasons x.pool.sles.ntp.org does not exist, # use the opensuse pool - pool_distro = 'opensuse' - elif distro == 'alpine' or distro == 'eurolinux': + pool_distro = "opensuse" + elif distro == "alpine" or distro == "eurolinux": # Alpine-specific pool (i.e. x.alpine.pool.ntp.org) does not exist # so use general x.pool.ntp.org instead. The same applies to EuroLinux - pool_distro = '' + pool_distro = "" for x in range(0, NR_POOL_SERVERS): - names.append(".".join( - [n for n in [str(x)] + [pool_distro] + ['pool.ntp.org'] if n])) + names.append( + ".".join( + [n for n in [str(x)] + [pool_distro] + ["pool.ntp.org"] if n] + ) + ) return names -def write_ntp_config_template(distro_name, service_name=None, servers=None, - pools=None, path=None, template_fn=None, - template=None): +def write_ntp_config_template( + distro_name, + service_name=None, + servers=None, + pools=None, + path=None, + template_fn=None, + template=None, +): """Render a ntp client configuration for the specified client. @param distro_name: string. The distro class name. @@ -444,27 +488,30 @@ def write_ntp_config_template(distro_name, service_name=None, servers=None, if not pools: pools = [] - if (len(servers) == 0 and distro_name == 'alpine' and - service_name == 'ntpd'): + if ( + len(servers) == 0 + and distro_name == "alpine" + and service_name == "ntpd" + ): # Alpine's Busybox ntpd only understands "servers" configuration # and not "pool" configuration. servers = generate_server_names(distro_name) - LOG.debug( - 'Adding distro default ntp servers: %s', ','.join(servers)) + LOG.debug("Adding distro default ntp servers: %s", ",".join(servers)) elif len(servers) == 0 and len(pools) == 0: pools = generate_server_names(distro_name) LOG.debug( - 'Adding distro default ntp pool servers: %s', ','.join(pools)) + "Adding distro default ntp pool servers: %s", ",".join(pools) + ) if not path: - raise ValueError('Invalid value for path parameter') + raise ValueError("Invalid value for path parameter") if not template_fn and not template: - raise ValueError('Not template_fn or template provided') + raise ValueError("Not template_fn or template provided") - params = {'servers': servers, 'pools': pools} + params = {"servers": servers, "pools": pools} if template: - tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl") + tfile = temp_utils.mkstemp(prefix="template_name-", suffix=".tmpl") template_fn = tfile[1] # filepath is second item in tuple util.write_file(template_fn, content=template) @@ -487,50 +534,62 @@ def supplemental_schema_validation(ntp_config): errors = [] missing = REQUIRED_NTP_CONFIG_KEYS.difference(set(ntp_config.keys())) if missing: - keys = ', '.join(sorted(missing)) + keys = ", ".join(sorted(missing)) errors.append( - 'Missing required ntp:config keys: {keys}'.format(keys=keys)) - elif not any([ntp_config.get('template'), - ntp_config.get('template_name')]): + "Missing required ntp:config keys: {keys}".format(keys=keys) + ) + elif not any( + [ntp_config.get("template"), ntp_config.get("template_name")] + ): errors.append( - 'Either ntp:config:template or ntp:config:template_name values' - ' are required') + "Either ntp:config:template or ntp:config:template_name values" + " are required" + ) for key, value in sorted(ntp_config.items()): - keypath = 'ntp:config:' + key - if key == 'confpath': + keypath = "ntp:config:" + key + if key == "confpath": if not all([value, isinstance(value, str)]): errors.append( - 'Expected a config file path {keypath}.' - ' Found ({value})'.format(keypath=keypath, value=value)) - elif key == 'packages': + "Expected a config file path {keypath}." + " Found ({value})".format(keypath=keypath, value=value) + ) + elif key == "packages": if not isinstance(value, list): errors.append( - 'Expected a list of required package names for {keypath}.' - ' Found ({value})'.format(keypath=keypath, value=value)) - elif key in ('template', 'template_name'): + "Expected a list of required package names for {keypath}." + " Found ({value})".format(keypath=keypath, value=value) + ) + elif key in ("template", "template_name"): if value is None: # Either template or template_name can be none continue if not isinstance(value, str): errors.append( - 'Expected a string type for {keypath}.' - ' Found ({value})'.format(keypath=keypath, value=value)) + "Expected a string type for {keypath}." + " Found ({value})".format(keypath=keypath, value=value) + ) elif not isinstance(value, str): errors.append( - 'Expected a string type for {keypath}.' - ' Found ({value})'.format(keypath=keypath, value=value)) + "Expected a string type for {keypath}. Found ({value})".format( + keypath=keypath, value=value + ) + ) if errors: - raise ValueError(r'Invalid ntp configuration:\n{errors}'.format( - errors='\n'.join(errors))) + raise ValueError( + r"Invalid ntp configuration:\n{errors}".format( + errors="\n".join(errors) + ) + ) def handle(name, cfg, cloud, log, _args): """Enable and configure ntp.""" - if 'ntp' not in cfg: + if "ntp" not in cfg: LOG.debug( - "Skipping module named %s, not present or disabled by cfg", name) + "Skipping module named %s, not present or disabled by cfg", name + ) return - ntp_cfg = cfg['ntp'] + ntp_cfg = cfg["ntp"] if ntp_cfg is None: ntp_cfg = {} # Allow empty config which will install the package @@ -538,52 +597,61 @@ def handle(name, cfg, cloud, log, _args): if not isinstance(ntp_cfg, (dict)): raise RuntimeError( "'ntp' key existed in config, but not a dictionary type," - " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg))) + " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg)) + ) validate_cloudconfig_schema(cfg, schema) # Allow users to explicitly enable/disable - enabled = ntp_cfg.get('enabled', True) + enabled = ntp_cfg.get("enabled", True) if util.is_false(enabled): LOG.debug("Skipping module named %s, disabled by cfg", name) return # Select which client is going to be used and get the configuration - ntp_client_config = select_ntp_client(ntp_cfg.get('ntp_client'), - cloud.distro) + ntp_client_config = select_ntp_client( + ntp_cfg.get("ntp_client"), cloud.distro + ) # Allow user ntp config to override distro configurations ntp_client_config = util.mergemanydict( - [ntp_client_config, ntp_cfg.get('config', {})], reverse=True) + [ntp_client_config, ntp_cfg.get("config", {})], reverse=True + ) supplemental_schema_validation(ntp_client_config) - rename_ntp_conf(confpath=ntp_client_config.get('confpath')) + rename_ntp_conf(confpath=ntp_client_config.get("confpath")) template_fn = None - if not ntp_client_config.get('template'): - template_name = ( - ntp_client_config.get('template_name').replace('{distro}', - cloud.distro.name)) + if not ntp_client_config.get("template"): + template_name = ntp_client_config.get("template_name").replace( + "{distro}", cloud.distro.name + ) template_fn = cloud.get_template_filename(template_name) if not template_fn: - msg = ('No template found, not rendering %s' % - ntp_client_config.get('template_name')) + msg = ( + "No template found, not rendering %s" + % ntp_client_config.get("template_name") + ) raise RuntimeError(msg) - write_ntp_config_template(cloud.distro.name, - service_name=ntp_client_config.get( - 'service_name'), - servers=ntp_cfg.get('servers', []), - pools=ntp_cfg.get('pools', []), - path=ntp_client_config.get('confpath'), - template_fn=template_fn, - template=ntp_client_config.get('template')) - - install_ntp_client(cloud.distro.install_packages, - packages=ntp_client_config['packages'], - check_exe=ntp_client_config['check_exe']) + write_ntp_config_template( + cloud.distro.name, + service_name=ntp_client_config.get("service_name"), + servers=ntp_cfg.get("servers", []), + pools=ntp_cfg.get("pools", []), + path=ntp_client_config.get("confpath"), + template_fn=template_fn, + template=ntp_client_config.get("template"), + ) + + install_ntp_client( + cloud.distro.install_packages, + packages=ntp_client_config["packages"], + check_exe=ntp_client_config["check_exe"], + ) try: - cloud.distro.manage_service('reload', - ntp_client_config.get('service_name')) + cloud.distro.manage_service( + "reload", ntp_client_config.get("service_name") + ) except subp.ProcessExecutionError as e: LOG.exception("Failed to reload/start ntp service: %s", e) raise diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py index 036baf85..14cdfab8 100644 --- a/cloudinit/config/cc_package_update_upgrade_install.py +++ b/cloudinit/config/cc_package_update_upgrade_install.py @@ -43,8 +43,7 @@ import os import time from cloudinit import log as logging -from cloudinit import subp -from cloudinit import util +from cloudinit import subp, util REBOOT_FILE = "/var/run/reboot-required" REBOOT_CMD = ["/sbin/reboot"] @@ -68,17 +67,19 @@ def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2): log.debug("Rebooted, but still running after %s seconds", int(elapsed)) # If we got here, not good elapsed = time.time() - start - raise RuntimeError(("Reboot did not happen" - " after %s seconds!") % (int(elapsed))) + raise RuntimeError( + "Reboot did not happen after %s seconds!" % (int(elapsed)) + ) def handle(_name, cfg, cloud, log, _args): # Handle the old style + new config names - update = _multi_cfg_bool_get(cfg, 'apt_update', 'package_update') - upgrade = _multi_cfg_bool_get(cfg, 'package_upgrade', 'apt_upgrade') - reboot_if_required = _multi_cfg_bool_get(cfg, 'apt_reboot_if_required', - 'package_reboot_if_required') - pkglist = util.get_cfg_option_list(cfg, 'packages', []) + update = _multi_cfg_bool_get(cfg, "apt_update", "package_update") + upgrade = _multi_cfg_bool_get(cfg, "package_upgrade", "apt_upgrade") + reboot_if_required = _multi_cfg_bool_get( + cfg, "apt_reboot_if_required", "package_reboot_if_required" + ) + pkglist = util.get_cfg_option_list(cfg, "packages", []) errors = [] if update or len(pkglist) or upgrade: @@ -109,8 +110,9 @@ def handle(_name, cfg, cloud, log, _args): reboot_fn_exists = os.path.isfile(REBOOT_FILE) if (upgrade or pkglist) and reboot_if_required and reboot_fn_exists: try: - log.warning("Rebooting after upgrade or install per " - "%s", REBOOT_FILE) + log.warning( + "Rebooting after upgrade or install per %s", REBOOT_FILE + ) # Flush the above warning + anything else out... logging.flushLoggers(log) _fire_reboot(log) @@ -119,8 +121,10 @@ def handle(_name, cfg, cloud, log, _args): errors.append(e) if len(errors): - log.warning("%s failed with exceptions, re-raising the last one", - len(errors)) + log.warning( + "%s failed with exceptions, re-raising the last one", len(errors) + ) raise errors[-1] + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index 733c3910..cc1fe53e 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -41,22 +41,19 @@ keys to post. Available keys are: tries: 10 """ -from cloudinit import templater -from cloudinit import url_helper -from cloudinit import util - +from cloudinit import templater, url_helper, util from cloudinit.settings import PER_INSTANCE frequency = PER_INSTANCE POST_LIST_ALL = [ - 'pub_key_dsa', - 'pub_key_rsa', - 'pub_key_ecdsa', - 'pub_key_ed25519', - 'instance_id', - 'hostname', - 'fqdn' + "pub_key_dsa", + "pub_key_rsa", + "pub_key_ecdsa", + "pub_key_ed25519", + "instance_id", + "hostname", + "fqdn", ] @@ -74,48 +71,58 @@ def handle(name, cfg, cloud, log, args): if len(args) != 0: ph_cfg = util.read_conf(args[0]) else: - if 'phone_home' not in cfg: - log.debug(("Skipping module named %s, " - "no 'phone_home' configuration found"), name) + if "phone_home" not in cfg: + log.debug( + "Skipping module named %s, " + "no 'phone_home' configuration found", + name, + ) return - ph_cfg = cfg['phone_home'] - - if 'url' not in ph_cfg: - log.warning(("Skipping module named %s, " - "no 'url' found in 'phone_home' configuration"), name) + ph_cfg = cfg["phone_home"] + + if "url" not in ph_cfg: + log.warning( + "Skipping module named %s, " + "no 'url' found in 'phone_home' configuration", + name, + ) return - url = ph_cfg['url'] - post_list = ph_cfg.get('post', 'all') - tries = ph_cfg.get('tries') + url = ph_cfg["url"] + post_list = ph_cfg.get("post", "all") + tries = ph_cfg.get("tries") try: tries = int(tries) except Exception: tries = 10 - util.logexc(log, "Configuration entry 'tries' is not an integer, " - "using %s instead", tries) + util.logexc( + log, + "Configuration entry 'tries' is not an integer, using %s instead", + tries, + ) if post_list == "all": post_list = POST_LIST_ALL all_keys = {} - all_keys['instance_id'] = cloud.get_instance_id() - all_keys['hostname'] = cloud.get_hostname() - all_keys['fqdn'] = cloud.get_hostname(fqdn=True) + all_keys["instance_id"] = cloud.get_instance_id() + all_keys["hostname"] = cloud.get_hostname() + all_keys["fqdn"] = cloud.get_hostname(fqdn=True) pubkeys = { - 'pub_key_dsa': '/etc/ssh/ssh_host_dsa_key.pub', - 'pub_key_rsa': '/etc/ssh/ssh_host_rsa_key.pub', - 'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub', - 'pub_key_ed25519': '/etc/ssh/ssh_host_ed25519_key.pub', + "pub_key_dsa": "/etc/ssh/ssh_host_dsa_key.pub", + "pub_key_rsa": "/etc/ssh/ssh_host_rsa_key.pub", + "pub_key_ecdsa": "/etc/ssh/ssh_host_ecdsa_key.pub", + "pub_key_ed25519": "/etc/ssh/ssh_host_ed25519_key.pub", } for (n, path) in pubkeys.items(): try: all_keys[n] = util.load_file(path) except Exception: - util.logexc(log, "%s: failed to open, can not phone home that " - "data!", path) + util.logexc( + log, "%s: failed to open, can not phone home that data!", path + ) submit_keys = {} for k in post_list: @@ -123,28 +130,37 @@ def handle(name, cfg, cloud, log, args): submit_keys[k] = all_keys[k] else: submit_keys[k] = None - log.warning(("Requested key %s from 'post'" - " configuration list not available"), k) + log.warning( + "Requested key %s from 'post'" + " configuration list not available", + k, + ) # Get them read to be posted real_submit_keys = {} for (k, v) in submit_keys.items(): if v is None: - real_submit_keys[k] = 'N/A' + real_submit_keys[k] = "N/A" else: real_submit_keys[k] = str(v) # Incase the url is parameterized url_params = { - 'INSTANCE_ID': all_keys['instance_id'], + "INSTANCE_ID": all_keys["instance_id"], } url = templater.render_string(url, url_params) try: url_helper.read_file_or_url( - url, data=real_submit_keys, retries=tries, sec_between=3, - ssl_details=util.fetch_ssl_details(cloud.paths)) + url, + data=real_submit_keys, + retries=tries, + sec_between=3, + ssl_details=util.fetch_ssl_details(cloud.paths), + ) except Exception: - util.logexc(log, "Failed to post phone home data to %s in %s tries", - url, tries) + util.logexc( + log, "Failed to post phone home data to %s in %s tries", url, tries + ) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 5780a7e9..d4eb68c0 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -58,9 +58,8 @@ import re import subprocess import time +from cloudinit import subp, util from cloudinit.settings import PER_INSTANCE -from cloudinit import subp -from cloudinit import util frequency = PER_INSTANCE @@ -75,9 +74,9 @@ def givecmdline(pid): # PID COMM ARGS # 1 init /bin/init -- if util.is_FreeBSD(): - (output, _err) = subp.subp(['procstat', '-c', str(pid)]) + (output, _err) = subp.subp(["procstat", "-c", str(pid)]) line = output.splitlines()[1] - m = re.search(r'\d+ (\w|\.|-)+\s+(/\w.+)', line) + m = re.search(r"\d+ (\w|\.|-)+\s+(/\w.+)", line) return m.group(2) else: return util.load_file("/proc/%s/cmdline" % pid) @@ -106,8 +105,9 @@ def check_condition(cond, log=None): return False else: if log: - log.warning(pre + "unexpected exit %s. " % ret + - "do not apply change.") + log.warning( + pre + "unexpected exit %s. " % ret + "do not apply change." + ) return False except Exception as e: if log: @@ -138,16 +138,24 @@ def handle(_name, cfg, cloud, log, _args): devnull_fp = open(os.devnull, "w") - log.debug("After pid %s ends, will execute: %s" % (mypid, ' '.join(args))) + log.debug("After pid %s ends, will execute: %s" % (mypid, " ".join(args))) - util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log, - condition, execmd, [args, devnull_fp]) + util.fork_cb( + run_after_pid_gone, + mypid, + cmdline, + timeout, + log, + condition, + execmd, + [args, devnull_fp], + ) def load_power_state(cfg, distro): # returns a tuple of shutdown_command, timeout # shutdown_command is None if no config found - pstate = cfg.get('power_state') + pstate = cfg.get("power_state") if pstate is None: return (None, None, None) @@ -155,22 +163,25 @@ def load_power_state(cfg, distro): if not isinstance(pstate, dict): raise TypeError("power_state is not a dict.") - modes_ok = ['halt', 'poweroff', 'reboot'] + modes_ok = ["halt", "poweroff", "reboot"] mode = pstate.get("mode") if mode not in distro.shutdown_options_map: raise TypeError( - "power_state[mode] required, must be one of: %s. found: '%s'." % - (','.join(modes_ok), mode)) + "power_state[mode] required, must be one of: %s. found: '%s'." + % (",".join(modes_ok), mode) + ) - args = distro.shutdown_command(mode=mode, - delay=pstate.get("delay", "now"), - message=pstate.get("message")) + args = distro.shutdown_command( + mode=mode, + delay=pstate.get("delay", "now"), + message=pstate.get("message"), + ) try: - timeout = float(pstate.get('timeout', 30.0)) + timeout = float(pstate.get("timeout", 30.0)) except ValueError as e: raise ValueError( - "failed to convert timeout '%s' to float." % pstate['timeout'] + "failed to convert timeout '%s' to float." % pstate["timeout"] ) from e condition = pstate.get("condition", True) @@ -186,8 +197,12 @@ def doexit(sysexit): def execmd(exe_args, output=None, data_in=None): ret = 1 try: - proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE, - stdout=output, stderr=subprocess.STDOUT) + proc = subprocess.Popen( + exe_args, + stdin=subprocess.PIPE, + stdout=output, + stderr=subprocess.STDOUT, + ) proc.communicate(data_in) ret = proc.returncode except Exception: @@ -230,7 +245,7 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args): except Exception as e: fatal("Unexpected Exception: %s" % e) - time.sleep(.25) + time.sleep(0.25) if not msg: fatal("Unexpected error in run_after_pid_gone") @@ -246,4 +261,5 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args): func(*args) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index dc20fc44..f51f49bc 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -108,23 +108,20 @@ key (by default the agent will execute with the ``--test`` flag). import os import socket -import yaml from io import StringIO -from cloudinit import helpers -from cloudinit import subp -from cloudinit import temp_utils -from cloudinit import util -from cloudinit import url_helper +import yaml -AIO_INSTALL_URL = 'https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh' # noqa: E501 -PUPPET_AGENT_DEFAULT_ARGS = ['--test'] +from cloudinit import helpers, subp, temp_utils, url_helper, util +AIO_INSTALL_URL = "https://raw.githubusercontent.com/puppetlabs/install-puppet/main/install.sh" # noqa: E501 +PUPPET_AGENT_DEFAULT_ARGS = ["--test"] -class PuppetConstants(object): - def __init__(self, puppet_conf_file, puppet_ssl_dir, - csr_attributes_path, log): +class PuppetConstants(object): + def __init__( + self, puppet_conf_file, puppet_ssl_dir, csr_attributes_path, log + ): self.conf_path = puppet_conf_file self.ssl_dir = puppet_ssl_dir self.ssl_cert_dir = os.path.join(puppet_ssl_dir, "certs") @@ -134,18 +131,27 @@ class PuppetConstants(object): def _autostart_puppet(log): # Set puppet to automatically start - if os.path.exists('/etc/default/puppet'): - subp.subp(['sed', '-i', - '-e', 's/^START=.*/START=yes/', - '/etc/default/puppet'], capture=False) - elif os.path.exists('/bin/systemctl'): - subp.subp(['/bin/systemctl', 'enable', 'puppet.service'], - capture=False) - elif os.path.exists('/sbin/chkconfig'): - subp.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False) + if os.path.exists("/etc/default/puppet"): + subp.subp( + [ + "sed", + "-i", + "-e", + "s/^START=.*/START=yes/", + "/etc/default/puppet", + ], + capture=False, + ) + elif os.path.exists("/bin/systemctl"): + subp.subp( + ["/bin/systemctl", "enable", "puppet.service"], capture=False + ) + elif os.path.exists("/sbin/chkconfig"): + subp.subp(["/sbin/chkconfig", "puppet", "on"], capture=False) else: - log.warning(("Sorry we do not know how to enable" - " puppet services on this system")) + log.warning( + "Sorry we do not know how to enable puppet services on this system" + ) def get_config_value(puppet_bin, setting): @@ -153,12 +159,13 @@ def get_config_value(puppet_bin, setting): :param puppet_bin: path to puppet binary :param setting: setting to query """ - out, _ = subp.subp([puppet_bin, 'config', 'print', setting]) + out, _ = subp.subp([puppet_bin, "config", "print", setting]) return out.rstrip() -def install_puppet_aio(url=AIO_INSTALL_URL, version=None, - collection=None, cleanup=True): +def install_puppet_aio( + url=AIO_INSTALL_URL, version=None, collection=None, cleanup=True +): """Install puppet-agent from the puppetlabs repositories using the one-shot shell script @@ -169,62 +176,70 @@ def install_puppet_aio(url=AIO_INSTALL_URL, version=None, """ args = [] if version is not None: - args = ['-v', version] + args = ["-v", version] if collection is not None: - args += ['-c', collection] + args += ["-c", collection] # Purge puppetlabs repos after installation if cleanup: - args += ['--cleanup'] + args += ["--cleanup"] content = url_helper.readurl(url=url, retries=5).contents # Use tmpdir over tmpfile to avoid 'text file busy' on execute with temp_utils.tempdir(needs_exe=True) as tmpd: - tmpf = os.path.join(tmpd, 'puppet-install') + tmpf = os.path.join(tmpd, "puppet-install") util.write_file(tmpf, content, mode=0o700) return subp.subp([tmpf] + args, capture=False) def handle(name, cfg, cloud, log, _args): # If there isn't a puppet key in the configuration don't do anything - if 'puppet' not in cfg: - log.debug(("Skipping module named %s," - " no 'puppet' configuration found"), name) + if "puppet" not in cfg: + log.debug( + "Skipping module named %s, no 'puppet' configuration found", name + ) return - puppet_cfg = cfg['puppet'] + puppet_cfg = cfg["puppet"] # Start by installing the puppet package if necessary... - install = util.get_cfg_option_bool(puppet_cfg, 'install', True) - version = util.get_cfg_option_str(puppet_cfg, 'version', None) - collection = util.get_cfg_option_str(puppet_cfg, 'collection', None) + install = util.get_cfg_option_bool(puppet_cfg, "install", True) + version = util.get_cfg_option_str(puppet_cfg, "version", None) + collection = util.get_cfg_option_str(puppet_cfg, "collection", None) install_type = util.get_cfg_option_str( - puppet_cfg, 'install_type', 'packages') - cleanup = util.get_cfg_option_bool(puppet_cfg, 'cleanup', True) - run = util.get_cfg_option_bool(puppet_cfg, 'exec', default=False) - start_puppetd = util.get_cfg_option_bool(puppet_cfg, - 'start_service', - default=True) + puppet_cfg, "install_type", "packages" + ) + cleanup = util.get_cfg_option_bool(puppet_cfg, "cleanup", True) + run = util.get_cfg_option_bool(puppet_cfg, "exec", default=False) + start_puppetd = util.get_cfg_option_bool( + puppet_cfg, "start_service", default=True + ) aio_install_url = util.get_cfg_option_str( - puppet_cfg, 'aio_install_url', default=AIO_INSTALL_URL) + puppet_cfg, "aio_install_url", default=AIO_INSTALL_URL + ) # AIO and distro packages use different paths - if install_type == 'aio': - puppet_user = 'root' - puppet_bin = '/opt/puppetlabs/bin/puppet' - puppet_package = 'puppet-agent' + if install_type == "aio": + puppet_user = "root" + puppet_bin = "/opt/puppetlabs/bin/puppet" + puppet_package = "puppet-agent" else: # default to 'packages' - puppet_user = 'puppet' - puppet_bin = 'puppet' - puppet_package = 'puppet' + puppet_user = "puppet" + puppet_bin = "puppet" + puppet_package = "puppet" package_name = util.get_cfg_option_str( - puppet_cfg, 'package_name', puppet_package) + puppet_cfg, "package_name", puppet_package + ) if not install and version: - log.warning(("Puppet install set to false but version supplied," - " doing nothing.")) + log.warning( + "Puppet install set to false but version supplied, doing nothing." + ) elif install: - log.debug(("Attempting to install puppet %s from %s"), - version if version else 'latest', install_type) + log.debug( + "Attempting to install puppet %s from %s", + version if version else "latest", + install_type, + ) if install_type == "packages": cloud.distro.install_packages((package_name, version)) @@ -235,17 +250,21 @@ def handle(name, cfg, cloud, log, _args): run = False conf_file = util.get_cfg_option_str( - puppet_cfg, 'conf_file', get_config_value(puppet_bin, 'config')) + puppet_cfg, "conf_file", get_config_value(puppet_bin, "config") + ) ssl_dir = util.get_cfg_option_str( - puppet_cfg, 'ssl_dir', get_config_value(puppet_bin, 'ssldir')) + puppet_cfg, "ssl_dir", get_config_value(puppet_bin, "ssldir") + ) csr_attributes_path = util.get_cfg_option_str( - puppet_cfg, 'csr_attributes_path', - get_config_value(puppet_bin, 'csr_attributes')) + puppet_cfg, + "csr_attributes_path", + get_config_value(puppet_bin, "csr_attributes"), + ) p_constants = PuppetConstants(conf_file, ssl_dir, csr_attributes_path, log) # ... and then update the puppet configuration - if 'conf' in puppet_cfg: + if "conf" in puppet_cfg: # Add all sections from the conf object to puppet.conf contents = util.load_file(p_constants.conf_path) # Create object for reading puppet.conf values @@ -254,30 +273,31 @@ def handle(name, cfg, cloud, log, _args): # mix the rest up. First clean them up # (TODO(harlowja) is this really needed??) cleaned_lines = [i.lstrip() for i in contents.splitlines()] - cleaned_contents = '\n'.join(cleaned_lines) + cleaned_contents = "\n".join(cleaned_lines) # Move to puppet_config.read_file when dropping py2.7 puppet_config.read_file( - StringIO(cleaned_contents), - source=p_constants.conf_path) - for (cfg_name, cfg) in puppet_cfg['conf'].items(): + StringIO(cleaned_contents), source=p_constants.conf_path + ) + for (cfg_name, cfg) in puppet_cfg["conf"].items(): # Cert configuration is a special case # Dump the puppetserver ca certificate in the correct place - if cfg_name == 'ca_cert': + if cfg_name == "ca_cert": # Puppet ssl sub-directory isn't created yet # Create it with the proper permissions and ownership util.ensure_dir(p_constants.ssl_dir, 0o771) - util.chownbyname(p_constants.ssl_dir, puppet_user, 'root') + util.chownbyname(p_constants.ssl_dir, puppet_user, "root") util.ensure_dir(p_constants.ssl_cert_dir) - util.chownbyname(p_constants.ssl_cert_dir, puppet_user, 'root') + util.chownbyname(p_constants.ssl_cert_dir, puppet_user, "root") util.write_file(p_constants.ssl_cert_path, cfg) - util.chownbyname(p_constants.ssl_cert_path, - puppet_user, 'root') + util.chownbyname( + p_constants.ssl_cert_path, puppet_user, "root" + ) else: # Iterate through the config items, we'll use ConfigParser.set # to overwrite or create new items as needed for (o, v) in cfg.items(): - if o == 'certname': + if o == "certname": # Expand %f as the fqdn # TODO(harlowja) should this use the cloud fqdn?? v = v.replace("%f", socket.getfqdn()) @@ -288,14 +308,16 @@ def handle(name, cfg, cloud, log, _args): puppet_config.set(cfg_name, o, v) # We got all our config as wanted we'll rename # the previous puppet.conf and create our new one - util.rename(p_constants.conf_path, "%s.old" - % (p_constants.conf_path)) + util.rename( + p_constants.conf_path, "%s.old" % (p_constants.conf_path) + ) util.write_file(p_constants.conf_path, puppet_config.stringify()) - if 'csr_attributes' in puppet_cfg: - util.write_file(p_constants.csr_attributes_path, - yaml.dump(puppet_cfg['csr_attributes'], - default_flow_style=False)) + if "csr_attributes" in puppet_cfg: + util.write_file( + p_constants.csr_attributes_path, + yaml.dump(puppet_cfg["csr_attributes"], default_flow_style=False), + ) # Set it up so it autostarts if start_puppetd: @@ -303,18 +325,21 @@ def handle(name, cfg, cloud, log, _args): # Run the agent if needed if run: - log.debug('Running puppet-agent') - cmd = [puppet_bin, 'agent'] - if 'exec_args' in puppet_cfg: - cmd_args = puppet_cfg['exec_args'] + log.debug("Running puppet-agent") + cmd = [puppet_bin, "agent"] + if "exec_args" in puppet_cfg: + cmd_args = puppet_cfg["exec_args"] if isinstance(cmd_args, (list, tuple)): cmd.extend(cmd_args) elif isinstance(cmd_args, str): cmd.extend(cmd_args.split()) else: - log.warning("Unknown type %s provided for puppet" - " 'exec_args' expected list, tuple," - " or string", type(cmd_args)) + log.warning( + "Unknown type %s provided for puppet" + " 'exec_args' expected list, tuple," + " or string", + type(cmd_args), + ) cmd.extend(PUPPET_AGENT_DEFAULT_ARGS) else: cmd.extend(PUPPET_AGENT_DEFAULT_ARGS) @@ -322,6 +347,7 @@ def handle(name, cfg, cloud, log, _args): if start_puppetd: # Start puppetd - subp.subp(['service', 'puppet', 'start'], capture=False) + subp.subp(["service", "puppet", "start"], capture=False) + # vi: ts=4 expandtab diff --git a/cloudinit/config/cc_refresh_rmc_and_interface.py b/cloudinit/config/cc_refresh_rmc_and_interface.py index d5e0ecb2..87be5348 100644 --- a/cloudinit/config/cc_refresh_rmc_and_interface.py +++ b/cloudinit/config/cc_refresh_rmc_and_interface.py @@ -34,20 +34,18 @@ This module handles """ +import errno + from cloudinit import log as logging +from cloudinit import netinfo, subp, util from cloudinit.settings import PER_ALWAYS -from cloudinit import util -from cloudinit import subp -from cloudinit import netinfo - -import errno frequency = PER_ALWAYS LOG = logging.getLogger(__name__) # Ensure that /opt/rsct/bin has been added to standard PATH of the # distro. The symlink to rmcctrl is /usr/sbin/rsct/bin/rmcctrl . -RMCCTRL = 'rmcctrl' +RMCCTRL = "rmcctrl" def handle(name, _cfg, _cloud, _log, _args): @@ -56,10 +54,11 @@ def handle(name, _cfg, _cloud, _log, _args): return LOG.debug( - 'Making the IPv6 up explicitly. ' - 'Ensuring IPv6 interface is not being handled by NetworkManager ' - 'and it is restarted to re-establish the communication with ' - 'the hypervisor') + "Making the IPv6 up explicitly. " + "Ensuring IPv6 interface is not being handled by NetworkManager " + "and it is restarted to re-establish the communication with " + "the hypervisor" + ) ifaces = find_ipv6_ifaces() @@ -80,7 +79,7 @@ def find_ipv6_ifaces(): ifaces = [] for iface, data in info.items(): if iface == "lo": - LOG.debug('Skipping localhost interface') + LOG.debug("Skipping localhost interface") if len(data.get("ipv4", [])) != 0: # skip this interface, as it has ipv4 addrs continue @@ -92,16 +91,16 @@ def refresh_ipv6(interface): # IPv6 interface is explicitly brought up, subsequent to which the # RMC services are restarted to re-establish the communication with # the hypervisor. - subp.subp(['ip', 'link', 'set', interface, 'down']) - subp.subp(['ip', 'link', 'set', interface, 'up']) + subp.subp(["ip", "link", "set", interface, "down"]) + subp.subp(["ip", "link", "set", interface, "up"]) def sysconfig_path(iface): - return '/etc/sysconfig/network-scripts/ifcfg-' + iface + return "/etc/sysconfig/network-scripts/ifcfg-" + iface def restart_network_manager(): - subp.subp(['systemctl', 'restart', 'NetworkManager']) + subp.subp(["systemctl", "restart", "NetworkManager"]) def disable_ipv6(iface_file): @@ -113,12 +112,11 @@ def disable_ipv6(iface_file): contents = util.load_file(iface_file) except IOError as e: if e.errno == errno.ENOENT: - LOG.debug("IPv6 interface file %s does not exist\n", - iface_file) + LOG.debug("IPv6 interface file %s does not exist\n", iface_file) else: raise e - if 'IPV6INIT' not in contents: + if "IPV6INIT" not in contents: LOG.debug("Interface file %s did not have IPV6INIT", iface_file) return @@ -135,11 +133,12 @@ def disable_ipv6(iface_file): def search(contents): # Search for any NM_CONTROLLED or IPV6 lines in IPv6 interface file. - return( - contents.startswith("IPV6ADDR") or - contents.startswith("IPADDR6") or - contents.startswith("IPV6INIT") or - contents.startswith("NM_CONTROLLED")) + return ( + contents.startswith("IPV6ADDR") + or contents.startswith("IPADDR6") + or contents.startswith("IPV6INIT") + or contents.startswith("NM_CONTROLLED") + ) def refresh_rmc(): @@ -152,8 +151,8 @@ def refresh_rmc(): # until the subsystem and all resource managers are stopped. # -s : start Resource Monitoring & Control subsystem. try: - subp.subp([RMCCTRL, '-z']) - subp.subp([RMCCTRL, '-s']) + subp.subp([RMCCTRL, "-z"]) + subp.subp([RMCCTRL, "-s"]) except Exception: - util.logexc(LOG, 'Failed to refresh the RMC subsystem.') + util.logexc(LOG, "Failed to refresh the RMC subsystem.") raise diff --git a/cloudinit/config/cc_reset_rmc.py b/cloudinit/config/cc_reset_rmc.py index 1cd72774..3b929903 100644 --- a/cloudinit/config/cc_reset_rmc.py +++ b/cloudinit/config/cc_reset_rmc.py @@ -39,9 +39,8 @@ Prerequisite of using this module is to install RSCT packages. import os from cloudinit import log as logging +from cloudinit import subp, util from cloudinit.settings import PER_INSTANCE -from cloudinit import util -from cloudinit import subp frequency = PER_INSTANCE @@ -49,34 +48,34 @@ frequency = PER_INSTANCE # The symlink for RMCCTRL and RECFGCT are # /usr/sbin/rsct/bin/rmcctrl and # /usr/sbin/rsct/install/bin/recfgct respectively. -RSCT_PATH = '/opt/rsct/install/bin' -RMCCTRL = 'rmcctrl' -RECFGCT = 'recfgct' +RSCT_PATH = "/opt/rsct/install/bin" +RMCCTRL = "rmcctrl" +RECFGCT = "recfgct" LOG = logging.getLogger(__name__) -NODE_ID_FILE = '/etc/ct_node_id' +NODE_ID_FILE = "/etc/ct_node_id" def handle(name, _cfg, cloud, _log, _args): # Ensuring node id has to be generated only once during first boot - if cloud.datasource.platform_type == 'none': - LOG.debug('Skipping creation of new ct_node_id node') + if cloud.datasource.platform_type == "none": + LOG.debug("Skipping creation of new ct_node_id node") return if not os.path.isdir(RSCT_PATH): LOG.debug("module disabled, RSCT_PATH not present") return - orig_path = os.environ.get('PATH') + orig_path = os.environ.get("PATH") try: add_path(orig_path) reset_rmc() finally: if orig_path: - os.environ['PATH'] = orig_path + os.environ["PATH"] = orig_path else: - del os.environ['PATH'] + del os.environ["PATH"] def reconfigure_rsct_subsystems(): @@ -88,17 +87,17 @@ def reconfigure_rsct_subsystems(): LOG.debug(out.strip()) return out except subp.ProcessExecutionError: - util.logexc(LOG, 'Failed to reconfigure the RSCT subsystems.') + util.logexc(LOG, "Failed to reconfigure the RSCT subsystems.") raise def get_node_id(): try: fp = util.load_file(NODE_ID_FILE) - node_id = fp.split('\n')[0] + node_id = fp.split("\n")[0] return node_id except Exception: - util.logexc(LOG, 'Failed to get node ID from file %s.' % NODE_ID_FILE) + util.logexc(LOG, "Failed to get node ID from file %s." % NODE_ID_FILE) raise @@ -107,25 +106,25 @@ def add_path(orig_path): # So thet cloud init automatically find and # run RECFGCT to create new node_id. suff = ":" + orig_path if orig_path else "" - os.environ['PATH'] = RSCT_PATH + suff - return os.environ['PATH'] + os.environ["PATH"] = RSCT_PATH + suff + return os.environ["PATH"] def rmcctrl(): # Stop the RMC subsystem and all resource managers so that we can make # some changes to it try: - return subp.subp([RMCCTRL, '-z']) + return subp.subp([RMCCTRL, "-z"]) except Exception: - util.logexc(LOG, 'Failed to stop the RMC subsystem.') + util.logexc(LOG, "Failed to stop the RMC subsystem.") raise def reset_rmc(): - LOG.debug('Attempting to reset RMC.') + LOG.debug("Attempting to reset RMC.") node_id_before = get_node_id() - LOG.debug('Node ID at beginning of module: %s', node_id_before) + LOG.debug("Node ID at beginning of module: %s", node_id_before) # Stop the RMC subsystem and all resource managers so that we can make # some changes to it @@ -133,11 +132,11 @@ def reset_rmc(): reconfigure_rsct_subsystems() node_id_after = get_node_id() - LOG.debug('Node ID at end of module: %s', node_id_after) + LOG.debug("Node ID at end of module: %s", node_id_after) # Check if new node ID is generated or not # by comparing old and new node ID if node_id_after == node_id_before: - msg = 'New node ID did not get generated.' + msg = "New node ID did not get generated." LOG.error(msg) raise Exception(msg) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 00bb7ae7..b009c392 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -13,21 +13,21 @@ import os import stat from textwrap import dedent +from cloudinit import subp, util from cloudinit.config.schema import get_meta_doc, validate_cloudconfig_schema from cloudinit.settings import PER_ALWAYS -from cloudinit import subp -from cloudinit import util NOBLOCK = "noblock" frequency = PER_ALWAYS -distros = ['all'] +distros = ["all"] meta = { - 'id': 'cc_resizefs', - 'name': 'Resizefs', - 'title': 'Resize filesystem', - 'description': dedent("""\ + "id": "cc_resizefs", + "name": "Resizefs", + "title": "Resize filesystem", + "description": dedent( + """\ Resize a filesystem to use all avaliable space on partition. This module is useful along with ``cc_growpart`` and will ensure that if the root partition has been resized the root filesystem will be resized @@ -36,22 +36,26 @@ meta = { running. Optionally, the resize operation can be performed in the background while cloud-init continues running modules. This can be enabled by setting ``resize_rootfs`` to ``true``. This module can be - disabled altogether by setting ``resize_rootfs`` to ``false``."""), - 'distros': distros, - 'examples': [ - 'resize_rootfs: false # disable root filesystem resize operation'], - 'frequency': PER_ALWAYS, + disabled altogether by setting ``resize_rootfs`` to ``false``.""" + ), + "distros": distros, + "examples": [ + "resize_rootfs: false # disable root filesystem resize operation" + ], + "frequency": PER_ALWAYS, } schema = { - 'type': 'object', - 'properties': { - 'resize_rootfs': { - 'enum': [True, False, NOBLOCK], - 'description': dedent("""\ - Whether to resize the root partition. Default: 'true'""") + "type": "object", + "properties": { + "resize_rootfs": { + "enum": [True, False, NOBLOCK], + "description": dedent( + """\ + Whether to resize the root partition. Default: 'true'""" + ), } - } + }, } __doc__ = get_meta_doc(meta, schema) # Supplement python help() @@ -63,32 +67,38 @@ def _resize_btrfs(mount_point, devpth): # Use a subvolume that is not ro to trick the resize operation to do the # "right" thing. The use of ".snapshot" is specific to "snapper" a generic # solution would be walk the subvolumes and find a rw mounted subvolume. - if (not util.mount_is_read_write(mount_point) and - os.path.isdir("%s/.snapshots" % mount_point)): - return ('btrfs', 'filesystem', 'resize', 'max', - '%s/.snapshots' % mount_point) + if not util.mount_is_read_write(mount_point) and os.path.isdir( + "%s/.snapshots" % mount_point + ): + return ( + "btrfs", + "filesystem", + "resize", + "max", + "%s/.snapshots" % mount_point, + ) else: - return ('btrfs', 'filesystem', 'resize', 'max', mount_point) + return ("btrfs", "filesystem", "resize", "max", mount_point) def _resize_ext(mount_point, devpth): - return ('resize2fs', devpth) + return ("resize2fs", devpth) def _resize_xfs(mount_point, devpth): - return ('xfs_growfs', mount_point) + return ("xfs_growfs", mount_point) def _resize_ufs(mount_point, devpth): - return ('growfs', '-y', mount_point) + return ("growfs", "-y", mount_point) def _resize_zfs(mount_point, devpth): - return ('zpool', 'online', '-e', mount_point, devpth) + return ("zpool", "online", "-e", mount_point, devpth) def _resize_hammer2(mount_point, devpth): - return ('hammer2', 'growfs', mount_point) + return ("hammer2", "growfs", mount_point) def _can_skip_resize_ufs(mount_point, devpth): @@ -100,7 +110,7 @@ def _can_skip_resize_ufs(mount_point, devpth): # growfs exits with 1 for almost all cases up to this one. # This means we can't just use rcs=[0, 1] as subp parameter: try: - subp.subp(['growfs', '-N', devpth]) + subp.subp(["growfs", "-N", devpth]) except subp.ProcessExecutionError as e: if e.stderr.startswith(skip_start) and skip_contain in e.stderr: # This FS is already at the desired size @@ -114,17 +124,15 @@ def _can_skip_resize_ufs(mount_point, devpth): # for multiple filesystem types if possible, e.g. one command for # ext2, ext3 and ext4. RESIZE_FS_PREFIXES_CMDS = [ - ('btrfs', _resize_btrfs), - ('ext', _resize_ext), - ('xfs', _resize_xfs), - ('ufs', _resize_ufs), - ('zfs', _resize_zfs), - ('hammer2', _resize_hammer2), + ("btrfs", _resize_btrfs), + ("ext", _resize_ext), + ("xfs", _resize_xfs), + ("ufs", _resize_ufs), + ("zfs", _resize_zfs), + ("hammer2", _resize_hammer2), ] -RESIZE_FS_PRECHECK_CMDS = { - 'ufs': _can_skip_resize_ufs -} +RESIZE_FS_PRECHECK_CMDS = {"ufs": _can_skip_resize_ufs} def can_skip_resize(fs_type, resize_what, devpth): @@ -148,52 +156,66 @@ def maybe_get_writable_device_path(devpath, info, log): container = util.is_container() # Ensure the path is a block device. - if (devpath == "/dev/root" and not os.path.exists(devpath) and - not container): + if ( + devpath == "/dev/root" + and not os.path.exists(devpath) + and not container + ): devpath = util.rootdev_from_cmdline(util.get_cmdline()) if devpath is None: log.warning("Unable to find device '/dev/root'") return None log.debug("Converted /dev/root to '%s' per kernel cmdline", devpath) - if devpath == 'overlayroot': + if devpath == "overlayroot": log.debug("Not attempting to resize devpath '%s': %s", devpath, info) return None # FreeBSD zpool can also just use gpt/