From 493f6c3e923902d5d4f3d87e1cc4c726ea90ada4 Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Tue, 11 Apr 2017 10:00:12 -0600 Subject: DigitalOcean: bind resolvers to loopback interface. This change makes the DigitalOcean datasource consistent with OpenStack and Joyent by binding the resolver addresses to the loopback interface. This _is_ a work-around to bug 1675571. Part of bug 1676908. --- cloudinit/sources/helpers/digitalocean.py | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py index 72f7bde4..6423c8ef 100644 --- a/cloudinit/sources/helpers/digitalocean.py +++ b/cloudinit/sources/helpers/digitalocean.py @@ -107,15 +107,12 @@ def convert_network_configuration(config, dns_servers): } """ - def _get_subnet_part(pcfg, nameservers=None): + def _get_subnet_part(pcfg): subpart = {'type': 'static', 'control': 'auto', 'address': pcfg.get('ip_address'), 'gateway': pcfg.get('gateway')} - if nameservers: - subpart['dns_nameservers'] = nameservers - if ":" in pcfg.get('ip_address'): subpart['address'] = "{0}/{1}".format(pcfg.get('ip_address'), pcfg.get('cidr')) @@ -157,13 +154,8 @@ def convert_network_configuration(config, dns_servers): continue sub_part = _get_subnet_part(raw_subnet) - if nic_type == 'public' and 'anchor' not in netdef: - # add DNS resolvers to the public interfaces only - sub_part = _get_subnet_part(raw_subnet, dns_servers) - else: - # remove the gateway any non-public interfaces - if 'gateway' in sub_part: - del sub_part['gateway'] + if netdef in ('private', 'anchor_ipv4', 'anchor_ipv6'): + del sub_part['gateway'] subnets.append(sub_part) @@ -171,6 +163,10 @@ def convert_network_configuration(config, dns_servers): nic_configs.append(ncfg) LOG.debug("nic '%s' configuration: %s", if_name, ncfg) + if dns_servers: + LOG.debug("added dns servers: %s", dns_servers) + nic_configs.append({'type': 'nameserver', 'address': dns_servers}) + return {'version': 1, 'config': nic_configs} -- cgit v1.2.3 From 721348a622a660b65acfdf7fdf53203b47f80748 Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Mon, 10 Apr 2017 15:52:37 -0400 Subject: util: teach write_file about copy_mode option On centos/fedora/rhel/derivatives, /etc/ssh/sshd_config has mode 0600, but cloud-init unilaterally sets file modes to 0644 when no explicit mode is passed to util.write_file. On ubuntu/debian, this file has mode 0644. With this patch, write_file learns about the copy_mode option, which will cause it to use the mode of the existing file by default, falling back to the explicit mode parameter if the file does not exist. LP: #1644064 Resolves: rhbz#1295984 --- cloudinit/atomic_helper.py | 12 +++++++++++- cloudinit/config/cc_set_passwords.py | 3 ++- cloudinit/util.py | 10 +++++++++- tests/unittests/test_util.py | 33 +++++++++++++++++++++++++++++++-- 4 files changed, 53 insertions(+), 5 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/atomic_helper.py b/cloudinit/atomic_helper.py index fb2df8d5..587b9945 100644 --- a/cloudinit/atomic_helper.py +++ b/cloudinit/atomic_helper.py @@ -2,13 +2,23 @@ import json import os +import stat import tempfile _DEF_PERMS = 0o644 -def write_file(filename, content, mode=_DEF_PERMS, omode="wb"): +def write_file(filename, content, mode=_DEF_PERMS, + omode="wb", copy_mode=False): # open filename in mode 'omode', write content, set permissions to 'mode' + + if copy_mode: + try: + file_stat = os.stat(filename) + mode = stat.S_IMODE(file_stat.st_mode) + except OSError: + pass + tf = None try: tf = tempfile.NamedTemporaryFile(dir=os.path.dirname(filename), diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index eb0bdab0..bb24d57f 100755 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -215,7 +215,8 @@ def handle(_name, cfg, cloud, log, args): pw_auth)) lines = [str(l) for l in new_lines] - util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines)) + util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines), + copy_mode=True) try: cmd = cloud.distro.init_cmd # Default service diff --git a/cloudinit/util.py b/cloudinit/util.py index 17abdf81..6940850c 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1688,7 +1688,7 @@ def chmod(path, mode): os.chmod(path, real_mode) -def write_file(filename, content, mode=0o644, omode="wb"): +def write_file(filename, content, mode=0o644, omode="wb", copy_mode=False): """ Writes a file with the given content and sets the file mode as specified. Resotres the SELinux context if possible. @@ -1698,6 +1698,14 @@ def write_file(filename, content, mode=0o644, omode="wb"): @param mode: The filesystem mode to set on the file. @param omode: The open mode used when opening the file (w, wb, a, etc.) """ + + if copy_mode: + try: + file_stat = os.stat(filename) + mode = stat.S_IMODE(file_stat.st_mode) + except OSError: + pass + ensure_dir(os.path.dirname(filename)) if 'b' in omode.lower(): content = encode_text(content) diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index ab74311e..5d21b4b7 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -103,8 +103,8 @@ class TestWriteFile(helpers.TestCase): self.assertTrue(os.path.isdir(dirname)) self.assertTrue(os.path.isfile(path)) - def test_custom_mode(self): - """Verify custom mode works properly.""" + def test_explicit_mode(self): + """Verify explicit file mode works properly.""" path = os.path.join(self.tmp, "NewFile.txt") contents = "Hey there" @@ -115,6 +115,35 @@ class TestWriteFile(helpers.TestCase): file_stat = os.stat(path) self.assertEqual(0o666, stat.S_IMODE(file_stat.st_mode)) + def test_copy_mode_no_existing(self): + """Verify that file is created with mode 0o644 if copy_mode + is true and there is no prior existing file.""" + path = os.path.join(self.tmp, "NewFile.txt") + contents = "Hey there" + + util.write_file(path, contents, copy_mode=True) + + self.assertTrue(os.path.exists(path)) + self.assertTrue(os.path.isfile(path)) + file_stat = os.stat(path) + self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode)) + + def test_copy_mode_with_existing(self): + """Verify that file is created using mode of existing file + if copy_mode is true.""" + path = os.path.join(self.tmp, "NewFile.txt") + contents = "Hey there" + + open(path, 'w').close() + os.chmod(path, 0o666) + + util.write_file(path, contents, copy_mode=True) + + self.assertTrue(os.path.exists(path)) + self.assertTrue(os.path.isfile(path)) + file_stat = os.stat(path) + self.assertEqual(0o666, stat.S_IMODE(file_stat.st_mode)) + def test_custom_omode(self): """Verify custom omode works properly.""" path = os.path.join(self.tmp, "NewFile.txt") -- cgit v1.2.3 From aab609104988b9d14572eb8fa1dd28a8156c0aea Mon Sep 17 00:00:00 2001 From: Jon Grimm Date: Mon, 3 Apr 2017 11:52:42 -0500 Subject: Remove (and/or fix) URL shortener references Several references that were using URL shorteners are now broken due to their service going away, making it painful to even figure out what they were supposed to be pointing at. Put back long URLS using '# noqa' to make flake8 happy. LP: #1669727 --- cloudinit/config/cc_yum_add_repo.py | 2 +- cloudinit/distros/debian.py | 3 +-- cloudinit/distros/parsers/hosts.py | 4 ++-- cloudinit/distros/rhel.py | 4 ++-- cloudinit/sources/DataSourceCloudStack.py | 2 +- cloudinit/sources/helpers/openstack.py | 2 +- 6 files changed, 8 insertions(+), 9 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index ef8535ed..a04e1b2a 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -52,7 +52,7 @@ def _format_repo_value(val): return str(int(val)) if isinstance(val, (list, tuple)): # Can handle 'lists' in certain cases - # See: http://bit.ly/Qqrf1t + # See: https://linux.die.net/man/5/yum.conf return "\n ".join([_format_repo_value(v) for v in val]) if not isinstance(val, six.string_types): return str(val) diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 3f0f9d53..16f8d955 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -143,8 +143,7 @@ class Distro(distros.Distro): pkgs = [] e = os.environ.copy() - # See: http://tiny.cc/kg91fw - # Or: http://tiny.cc/mh91fw + # See: http://manpages.ubuntu.com/manpages/xenial/man7/debconf.7.html e['DEBIAN_FRONTEND'] = 'noninteractive' wcfg = self.get_option("apt_get_wrapper", APT_GET_WRAPPER) diff --git a/cloudinit/distros/parsers/hosts.py b/cloudinit/distros/parsers/hosts.py index 87f164be..64444581 100644 --- a/cloudinit/distros/parsers/hosts.py +++ b/cloudinit/distros/parsers/hosts.py @@ -10,8 +10,8 @@ from cloudinit.distros.parsers import chop_comment # See: man hosts -# or http://unixhelp.ed.ac.uk/CGI/man-cgi?hosts -# or http://tinyurl.com/6lmox3 +# or https://linux.die.net/man/5/hosts +# or https://www.freebsd.org/doc/en_US.ISO8859-1/books/handbook/configtuning-configfiles.html # noqa class HostsConf(object): def __init__(self, text): self._text = text diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 372c7d0f..1fecb619 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -28,7 +28,7 @@ def _make_sysconfig_bool(val): class Distro(distros.Distro): - # See: http://tiny.cc/6r99fw + # See: https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/Networking_Guide/sec-Network_Configuration_Using_sysconfig_Files.html # noqa clock_conf_fn = "/etc/sysconfig/clock" locale_conf_fn = '/etc/sysconfig/i18n' systemd_locale_conf_fn = '/etc/locale.conf' @@ -130,8 +130,8 @@ class Distro(distros.Distro): rhel_util.update_sysconfig_file(out_fn, host_cfg) def _select_hostname(self, hostname, fqdn): - # See: http://bit.ly/TwitgL # Should be fqdn if we can use it + # See: https://www.centos.org/docs/5/html/Deployment_Guide-en-US/ch-sysconfig.html#s2-sysconfig-network # noqa if fqdn: return fqdn return hostname diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index b0ab275c..ceef0282 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -207,8 +207,8 @@ def get_latest_lease(): def get_vr_address(): # Get the address of the virtual router via dhcp leases - # see http://bit.ly/T76eKC for documentation on the virtual router. # If no virtual router is detected, fallback on default gateway. + # See http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/4.8/virtual_machines/user-data.html # noqa lease_file = get_latest_lease() if not lease_file: LOG.debug("No lease file found, using default gateway") diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 61cd36bd..26f3168d 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -21,7 +21,7 @@ from cloudinit import sources from cloudinit import url_helper from cloudinit import util -# For reference: http://tinyurl.com/laora4c +# See https://docs.openstack.org/user-guide/cli-config-drive.html LOG = logging.getLogger(__name__) -- cgit v1.2.3 From ff44056771416cb811879b13b97f88d8f2057071 Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Tue, 11 Apr 2017 16:01:03 -0600 Subject: DigitalOcean: configure all NICs presented in meta-data. Instead of only configuring 'public' and 'private' interfaces, we want to configure any that has been defined in the meta-data. For legacy reasons, the 'public' and 'private' interfaces are maintained as 'eth0' and 'eth1' respectively. This is part of bug 1676908 for general DigitalOcean datasource fixups. --- cloudinit/sources/helpers/digitalocean.py | 32 +++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py index 6423c8ef..8a19c3bd 100644 --- a/cloudinit/sources/helpers/digitalocean.py +++ b/cloudinit/sources/helpers/digitalocean.py @@ -121,27 +121,31 @@ def convert_network_configuration(config, dns_servers): return subpart - all_nics = [] - for k in ('public', 'private'): - if k in config: - all_nics.extend(config[k]) - - macs_to_nics = cloudnet.get_interfaces_by_mac() nic_configs = [] + macs_to_nics = cloudnet.get_interfaces_by_mac() + LOG.debug("nic mapping: %s", macs_to_nics) - for nic in all_nics: + for n in config: + nic = config[n][0] + LOG.debug("considering %s", nic) mac_address = nic.get('mac') + if mac_address not in macs_to_nics: + raise RuntimeError("Did not find network interface on system " + "with mac '%s'. Cannot apply configuration: %s" + % (mac_address, nic)) + sysfs_name = macs_to_nics.get(mac_address) nic_type = nic.get('type', 'unknown') - # Note: the entry 'public' above contains a list, but - # the list will only ever have one nic inside it per digital ocean. - # If it ever had more than one nic, then this code would - # assign all 'public' the same name. - if_name = NIC_MAP.get(nic_type, sysfs_name) - LOG.debug("mapped %s interface to %s, assigning name of %s", - mac_address, sysfs_name, if_name) + if_name = NIC_MAP.get(nic_type, sysfs_name) + if if_name != sysfs_name: + LOG.debug("Found %s interface '%s' on '%s', assigned name of '%s'", + nic_type, mac_address, sysfs_name, if_name) + else: + msg = ("Found interface '%s' on '%s', which is not a public " + "or private interface. Using default system naming.") + LOG.debug(msg, mac_address, sysfs_name) ncfg = {'type': 'physical', 'mac_address': mac_address, -- cgit v1.2.3 From dad97585be0f30202a5a351800f20d4432b94694 Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Tue, 11 Apr 2017 12:38:11 -0600 Subject: DigitalOcean: assign IPv4ll address to lowest indexed interface. Previously the IPv4LL address for metadata discovery was assigned to the first interfaces from an alphabetic sort. On DigitalOcean, the metadata is only accessible from the first interface. This fixes a problem where the IPv4LL address is bound to the wrong interface with snapshots. This is part of general improvements to the DigitalOcean Datasource in bug 1676908. --- cloudinit/sources/helpers/digitalocean.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py index 8a19c3bd..257989e8 100644 --- a/cloudinit/sources/helpers/digitalocean.py +++ b/cloudinit/sources/helpers/digitalocean.py @@ -23,11 +23,8 @@ def assign_ipv4_link_local(nic=None): """ if not nic: - for cdev in sorted(cloudnet.get_devicelist()): - if cloudnet.is_physical(cdev): - nic = cdev - LOG.debug("assigned nic '%s' for link-local discovery", nic) - break + nic = get_link_local_nic() + LOG.debug("selected interface '%s' for reading metadata", nic) if not nic: raise RuntimeError("unable to find interfaces to access the" @@ -57,6 +54,13 @@ def assign_ipv4_link_local(nic=None): return nic +def get_link_local_nic(): + nics = [f for f in cloudnet.get_devicelist() if cloudnet.is_physical(f)] + if not nics: + return None + return min(nics, key=lambda d: cloudnet.read_sys_net_int(d, 'ifindex')) + + def del_ipv4_link_local(nic=None): """Remove the ip4LL address. While this is not necessary, the ip4LL address is extraneous and confusing to users. -- cgit v1.2.3 From 61d05d1a2c22cfed646fab2a0b41652aceea3d1e Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Tue, 28 Mar 2017 17:12:31 -0500 Subject: doc: Add missing doc link to snap-config module. --- cloudinit/config/cc_snap_config.py | 4 ++-- doc/rtd/topics/modules.rst | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_snap_config.py b/cloudinit/config/cc_snap_config.py index db511661..fe0cc73e 100644 --- a/cloudinit/config/cc_snap_config.py +++ b/cloudinit/config/cc_snap_config.py @@ -5,8 +5,8 @@ # This file is part of cloud-init. See LICENSE file for license information. """ -Snappy ------- +Snap Config +----------- **Summary:** snap_config modules allows configuration of snapd. This module uses the same ``snappy`` namespace for configuration but diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst index a3ead4f1..c963c09a 100644 --- a/doc/rtd/topics/modules.rst +++ b/doc/rtd/topics/modules.rst @@ -44,6 +44,7 @@ Modules .. automodule:: cloudinit.config.cc_set_hostname .. automodule:: cloudinit.config.cc_set_passwords .. automodule:: cloudinit.config.cc_snappy +.. automodule:: cloudinit.config.cc_snap_config .. automodule:: cloudinit.config.cc_spacewalk .. automodule:: cloudinit.config.cc_ssh .. automodule:: cloudinit.config.cc_ssh_authkey_fingerprints -- cgit v1.2.3 From df4ca453520342a0541ab9202305858bf39d4f48 Mon Sep 17 00:00:00 2001 From: Dimitri John Ledkov Date: Thu, 20 Apr 2017 11:08:48 +0100 Subject: net: kernel lies about vlans not stealing mac addresses, when they do Introduce is_vlan function and call that when building dictionary of interfaces by mac address. LP: #1682871 --- cloudinit/net/__init__.py | 7 +++++++ tests/unittests/test_net.py | 30 +++++++++++++++++++++++++++--- 2 files changed, 34 insertions(+), 3 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 346be5d3..a072a8d6 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -86,6 +86,11 @@ def is_bridge(devname): return os.path.exists(sys_dev_path(devname, "bridge")) +def is_vlan(devname): + uevent = str(read_sys_net_safe(devname, "uevent")) + return 'DEVTYPE=vlan' in uevent.splitlines() + + def is_connected(devname): # is_connected isn't really as simple as that. 2 is # 'physically connected'. 3 is 'not connected'. but a wlan interface will @@ -393,6 +398,8 @@ def get_interfaces_by_mac(): continue if is_bridge(name): continue + if is_vlan(name): + continue mac = get_interface_mac(name) # some devices may not have a mac (tun0) if not mac: diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 9cc5e4ab..89e75369 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -1463,13 +1463,16 @@ class TestNetRenderers(CiTestCase): class TestGetInterfacesByMac(CiTestCase): _data = {'devices': ['enp0s1', 'enp0s2', 'bond1', 'bridge1', - 'bridge1-nic', 'tun0'], + 'bridge1-nic', 'tun0', 'bond1.101'], 'bonds': ['bond1'], 'bridges': ['bridge1'], - 'own_macs': ['enp0s1', 'enp0s2', 'bridge1-nic', 'bridge1'], + 'vlans': ['bond1.101'], + 'own_macs': ['enp0s1', 'enp0s2', 'bridge1-nic', 'bridge1', + 'bond1.101'], 'macs': {'enp0s1': 'aa:aa:aa:aa:aa:01', 'enp0s2': 'aa:aa:aa:aa:aa:02', 'bond1': 'aa:aa:aa:aa:aa:01', + 'bond1.101': 'aa:aa:aa:aa:aa:01', 'bridge1': 'aa:aa:aa:aa:aa:03', 'bridge1-nic': 'aa:aa:aa:aa:aa:03', 'tun0': None}} @@ -1484,13 +1487,16 @@ class TestGetInterfacesByMac(CiTestCase): def _se_is_bridge(self, name): return name in self.data['bridges'] + def _se_is_vlan(self, name): + return name in self.data['vlans'] + def _se_interface_has_own_mac(self, name): return name in self.data['own_macs'] def _mock_setup(self): self.data = copy.deepcopy(self._data) mocks = ('get_devicelist', 'get_interface_mac', 'is_bridge', - 'interface_has_own_mac') + 'interface_has_own_mac', 'is_vlan') self.mocks = {} for n in mocks: m = mock.patch('cloudinit.net.' + n, @@ -1536,6 +1542,24 @@ class TestGetInterfacesByMac(CiTestCase): mock.call('b1')], any_order=True) + def test_excludes_vlans(self): + self._mock_setup() + # add a device 'b1', make all return they have their "own mac", + # set everything other than 'b1' to be a vlan. + # then expect b1 is the only thing left. + self.data['macs']['b1'] = 'aa:aa:aa:aa:aa:b1' + self.data['devices'].append('b1') + self.data['bonds'] = [] + self.data['bridges'] = [] + self.data['own_macs'] = self.data['devices'] + self.data['vlans'] = [f for f in self.data['devices'] if f != "b1"] + ret = net.get_interfaces_by_mac() + self.assertEqual({'aa:aa:aa:aa:aa:b1': 'b1'}, ret) + self.mocks['is_vlan'].assert_has_calls( + [mock.call('bridge1'), mock.call('enp0s1'), mock.call('bond1'), + mock.call('b1')], + any_order=True) + def _gzip_data(data): with io.BytesIO() as iobuf: -- cgit v1.2.3 From 33816e96d8981918f734dab3ee1a967bce85451a Mon Sep 17 00:00:00 2001 From: Syed Date: Wed, 5 Apr 2017 12:09:02 -0400 Subject: CloudStack: Add NetworkManager to list of supported DHCP lease dirs. To query the metadata, the Cloudstack source currently scans a predefined DHCP lease directories to find the IP of the DHCP server. This list does not include "/var/lib/NetworkManager/" which is the default directory in CentOS7. Add that directory to the list. --- cloudinit/sources/DataSourceCloudStack.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index ceef0282..0188d894 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -178,9 +178,10 @@ def get_default_gateway(): def get_dhclient_d(): # find lease files directory - supported_dirs = ["/var/lib/dhclient", "/var/lib/dhcp"] + supported_dirs = ["/var/lib/dhclient", "/var/lib/dhcp", + "/var/lib/NetworkManager"] for d in supported_dirs: - if os.path.exists(d): + if os.path.exists(d) and len(os.listdir(d)) > 0: LOG.debug("Using %s lease directory", d) return d return None -- cgit v1.2.3 From 5afe4cd0797a12d07ea19b9715b720d47bdea401 Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Thu, 6 Apr 2017 11:14:29 -0700 Subject: pylint: fix all logging warnings This will change all instances of LOG.warn to LOG.warning as warn is now a deprecated method. It will also make sure any logging uses lazy logging by passing string format arguments as function parameters. --- .pylintrc | 27 ++++++++++++++++-- cloudinit/cloud.py | 3 +- cloudinit/cmd/main.py | 17 +++++------ cloudinit/config/__init__.py | 2 +- cloudinit/config/cc_apt_configure.py | 11 ++++---- cloudinit/config/cc_disk_setup.py | 30 ++++++++++---------- cloudinit/config/cc_fan.py | 4 +-- cloudinit/config/cc_mounts.py | 12 ++++---- cloudinit/config/cc_resolv_conf.py | 2 +- cloudinit/config/cc_rsyslog.py | 7 +++-- cloudinit/config/cc_snappy.py | 6 ++-- cloudinit/distros/__init__.py | 31 ++++++++++---------- cloudinit/distros/arch.py | 6 ++-- cloudinit/distros/debian.py | 2 +- cloudinit/distros/freebsd.py | 8 +++--- cloudinit/distros/gentoo.py | 11 ++++---- cloudinit/distros/parsers/resolv_conf.py | 6 ++-- cloudinit/distros/ug_util.py | 16 +++++------ cloudinit/ec2_utils.py | 7 +++-- cloudinit/gpg.py | 2 +- cloudinit/handlers/__init__.py | 2 +- cloudinit/helpers.py | 14 ++++----- cloudinit/net/network_state.py | 8 +++--- cloudinit/reporting/handlers.py | 4 +-- cloudinit/sources/DataSourceAltCloud.py | 4 +-- cloudinit/sources/DataSourceAzure.py | 22 +++++++-------- cloudinit/sources/DataSourceCloudSigma.py | 2 +- cloudinit/sources/DataSourceConfigDrive.py | 4 +-- cloudinit/sources/DataSourceDigitalOcean.py | 2 +- cloudinit/sources/DataSourceEc2.py | 10 +++---- cloudinit/sources/DataSourceGCE.py | 5 ++-- cloudinit/sources/DataSourceMAAS.py | 10 +++---- cloudinit/sources/DataSourceNoCloud.py | 4 +-- cloudinit/sources/DataSourceOVF.py | 10 +++---- cloudinit/sources/DataSourceOpenNebula.py | 4 +-- cloudinit/sources/DataSourceOpenStack.py | 4 +-- cloudinit/sources/DataSourceSmartOS.py | 2 +- cloudinit/sources/__init__.py | 4 +-- cloudinit/sources/helpers/azure.py | 2 +- .../sources/helpers/vmware/imc/config_file.py | 8 +++--- cloudinit/stages.py | 33 +++++++++++----------- cloudinit/templater.py | 8 +++--- cloudinit/url_helper.py | 11 ++++---- cloudinit/user_data.py | 11 ++++---- cloudinit/util.py | 18 ++++++------ cloudinit/warnings.py | 4 +-- tests/cloud_tests/__main__.py | 2 +- tests/cloud_tests/args.py | 2 +- tests/cloud_tests/collect.py | 2 +- tests/cloud_tests/verify.py | 9 +++--- tools/mock-meta.py | 6 ++-- 51 files changed, 239 insertions(+), 202 deletions(-) (limited to 'cloudinit') diff --git a/.pylintrc b/.pylintrc index b8cda03c..0f5e41b1 100644 --- a/.pylintrc +++ b/.pylintrc @@ -6,8 +6,29 @@ jobs=4 [MESSAGES CONTROL] -# Errors only -disable=C, F, I, R, W +# Errors and warings with some filtered: +# W0105(pointless-string-statement) +# W0107(unnecessary-pass) +# W0201(attribute-defined-outside-init) +# W0212(protected-access) +# W0221(arguments-differ) +# W0222(signature-differs) +# W0223(abstract-method) +# W0231(super-init-not-called) +# W0311(bad-indentation) +# W0511(fixme) +# W0602(global-variable-not-assigned) +# W0603(global-statement) +# W0611(unused-import) +# W0612(unused-variable) +# W0613(unused-argument) +# W0621(redefined-outer-name) +# W0622(redefined-builtin) +# W0631(undefined-loop-variable) +# W0703(broad-except) +# W1401(anomalous-backslash-in-string) + +disable=C, F, I, R, W0105, W0107, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0612, W0613, W0621, W0622, W0631, W0703, W1401 [REPORTS] @@ -25,7 +46,7 @@ reports=no # (useful for modules/projects where namespaces are manipulated during runtime # and thus existing member attributes cannot be deduced by static analysis. It # supports qualified module names, as well as Unix pattern matching. -ignored-modules=six.moves,pkg_resources +ignored-modules=six.moves,pkg_resources,httplib,http.client # List of class names for which member attributes should not be checked (useful # for classes with dynamically set attributes). This supports the use of diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py index b93a42ea..d8a9fc86 100644 --- a/cloudinit/cloud.py +++ b/cloudinit/cloud.py @@ -56,7 +56,8 @@ class Cloud(object): def get_template_filename(self, name): fn = self.paths.template_tpl % (name) if not os.path.isfile(fn): - LOG.warn("No template found at %s for template named %s", fn, name) + LOG.warning("No template found at %s for template named %s", + fn, name) return None return fn diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index fd221323..26cc2654 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -405,7 +405,8 @@ def main_init(name, args): errfmt_orig = errfmt (outfmt, errfmt) = util.get_output_cfg(mods.cfg, name) if outfmt_orig != outfmt or errfmt_orig != errfmt: - LOG.warn("Stdout, stderr changing to (%s, %s)", outfmt, errfmt) + LOG.warning("Stdout, stderr changing to (%s, %s)", + outfmt, errfmt) (outfmt, errfmt) = util.fixup_output(mods.cfg, name) except Exception: util.logexc(LOG, "Failed to re-adjust output redirection!") @@ -427,15 +428,15 @@ def di_report_warn(datasource, cfg): dicfg = cfg.get('di_report', {}) if not isinstance(dicfg, dict): - LOG.warn("di_report config not a dictionary: %s", dicfg) + LOG.warning("di_report config not a dictionary: %s", dicfg) return dslist = dicfg.get('datasource_list') if dslist is None: - LOG.warn("no 'datasource_list' found in di_report.") + LOG.warning("no 'datasource_list' found in di_report.") return elif not isinstance(dslist, list): - LOG.warn("di_report/datasource_list not a list: %s", dslist) + LOG.warning("di_report/datasource_list not a list: %s", dslist) return # ds.__module__ is like cloudinit.sources.DataSourceName @@ -444,8 +445,8 @@ def di_report_warn(datasource, cfg): if modname.startswith(sources.DS_PREFIX): modname = modname[len(sources.DS_PREFIX):] else: - LOG.warn("Datasource '%s' came from unexpected module '%s'.", - datasource, modname) + LOG.warning("Datasource '%s' came from unexpected module '%s'.", + datasource, modname) if modname in dslist: LOG.debug("used datasource '%s' from '%s' was in di_report's list: %s", @@ -571,10 +572,10 @@ def main_single(name, args): mod_args, mod_freq) if failures: - LOG.warn("Ran %s but it failed!", mod_name) + LOG.warning("Ran %s but it failed!", mod_name) return 1 elif not which_ran: - LOG.warn("Did not run %s, does it exist?", mod_name) + LOG.warning("Did not run %s, does it exist?", mod_name) return 1 else: # Guess it worked diff --git a/cloudinit/config/__init__.py b/cloudinit/config/__init__.py index 57e2a44d..0ef9a748 100644 --- a/cloudinit/config/__init__.py +++ b/cloudinit/config/__init__.py @@ -37,7 +37,7 @@ def fixup_module(mod, def_freq=PER_INSTANCE): else: freq = mod.frequency if freq and freq not in FREQUENCIES: - LOG.warn("Module %s has an unknown frequency %s", mod, freq) + LOG.warning("Module %s has an unknown frequency %s", mod, freq) if not hasattr(mod, 'distros'): setattr(mod, 'distros', []) if not hasattr(mod, 'osfamilies'): diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 06804e85..7e751776 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -347,8 +347,8 @@ def dpkg_reconfigure(packages, target=None): unhandled.append(pkg) if len(unhandled): - LOG.warn("The following packages were installed and preseeded, " - "but cannot be unconfigured: %s", unhandled) + LOG.warning("The following packages were installed and preseeded, " + "but cannot be unconfigured: %s", unhandled) if len(to_config): util.subp(['dpkg-reconfigure', '--frontend=noninteractive'] + @@ -441,7 +441,7 @@ def rename_apt_lists(new_mirrors, target=None): os.rename(filename, newname) except OSError: # since this is a best effort task, warn with but don't fail - LOG.warn("Failed to rename apt list:", exc_info=True) + LOG.warning("Failed to rename apt list:", exc_info=True) def mirror_to_placeholder(tmpl, mirror, placeholder): @@ -449,7 +449,7 @@ def mirror_to_placeholder(tmpl, mirror, placeholder): replace the specified mirror in a template with a placeholder string Checks for existance of the expected mirror and warns if not found""" if mirror not in tmpl: - LOG.warn("Expected mirror '%s' not found in: %s", mirror, tmpl) + LOG.warning("Expected mirror '%s' not found in: %s", mirror, tmpl) return tmpl.replace(mirror, placeholder) @@ -525,7 +525,8 @@ def generate_sources_list(cfg, release, mirrors, cloud): if not template_fn: template_fn = cloud.get_template_filename('sources.list') if not template_fn: - LOG.warn("No template found, not rendering /etc/apt/sources.list") + LOG.warning("No template found, " + "not rendering /etc/apt/sources.list") return tmpl = util.load_file(template_fn) diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index f39f0815..f49386e3 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -181,7 +181,7 @@ def update_fs_setup_devices(disk_setup, tformer): # update it with the response from 'tformer' for definition in disk_setup: if not isinstance(definition, dict): - LOG.warn("entry in disk_setup not a dict: %s", definition) + LOG.warning("entry in disk_setup not a dict: %s", definition) continue origname = definition.get('device') @@ -279,7 +279,7 @@ def is_device_valid(name, partition=False): try: d_type = device_type(name) except Exception: - LOG.warn("Query against device %s failed" % name) + LOG.warning("Query against device %s failed", name) return False if partition and d_type == 'part': @@ -372,7 +372,7 @@ def find_device_node(device, fs_type=None, label=None, valid_targets=None, if not raw_device_used: return (device, False) - LOG.warn("Failed to find device during available device search.") + LOG.warning("Failed to find device during available device search.") return (None, False) @@ -638,7 +638,7 @@ def purge_disk(device): if d['type'] not in ["disk", "crypt"]: wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d['name']] try: - LOG.info("Purging filesystem on /dev/%s" % d['name']) + LOG.info("Purging filesystem on /dev/%s", d['name']) util.subp(wipefs_cmd) except Exception: raise Exception("Failed FS purge of /dev/%s" % d['name']) @@ -700,7 +700,7 @@ def exec_mkpart_gpt(device, layout): [SGDISK_CMD, '-t', '{}:{}'.format(index, partition_type), device]) except Exception: - LOG.warn("Failed to partition device %s" % device) + LOG.warning("Failed to partition device %s", device) raise read_parttbl(device) @@ -736,7 +736,7 @@ def mkpart(device, definition): # ensure that we get a real device rather than a symbolic link device = os.path.realpath(device) - LOG.debug("Checking values for %s definition" % device) + LOG.debug("Checking values for %s definition", device) overwrite = definition.get('overwrite', False) layout = definition.get('layout', False) table_type = definition.get('table_type', 'mbr') @@ -766,7 +766,7 @@ def mkpart(device, definition): LOG.debug("Checking if device is safe to partition") if not overwrite and (is_disk_used(device) or is_filesystem(device)): - LOG.debug("Skipping partitioning on configured device %s" % device) + LOG.debug("Skipping partitioning on configured device %s", device) return LOG.debug("Checking for device size") @@ -774,7 +774,7 @@ def mkpart(device, definition): LOG.debug("Calculating partition layout") part_definition = get_partition_layout(table_type, device_size, layout) - LOG.debug(" Layout is: %s" % part_definition) + LOG.debug(" Layout is: %s", part_definition) LOG.debug("Creating partition table on %s", device) exec_mkpart(table_type, device, part_definition) @@ -799,7 +799,7 @@ def lookup_force_flag(fs): if fs.lower() in flags: return flags[fs] - LOG.warn("Force flag for %s is unknown." % fs) + LOG.warning("Force flag for %s is unknown.", fs) return '' @@ -858,7 +858,7 @@ def mkfs(fs_cfg): LOG.debug("Device %s has required file system", device) return else: - LOG.warn("Destroying filesystem on %s", device) + LOG.warning("Destroying filesystem on %s", device) else: LOG.debug("Device %s is cleared for formating", device) @@ -883,14 +883,14 @@ def mkfs(fs_cfg): return if not reuse and fs_replace and device: - LOG.debug("Replacing file system on %s as instructed." % device) + LOG.debug("Replacing file system on %s as instructed.", device) if not device: LOG.debug("No device aviable that matches request. " "Skipping fs creation for %s", fs_cfg) return elif not partition or str(partition).lower() == 'none': - LOG.debug("Using the raw device to place filesystem %s on" % label) + LOG.debug("Using the raw device to place filesystem %s on", label) else: LOG.debug("Error in device identification handling.") @@ -901,7 +901,7 @@ def mkfs(fs_cfg): # Make sure the device is defined if not device: - LOG.warn("Device is not known: %s", device) + LOG.warning("Device is not known: %s", device) return # Check that we can create the FS @@ -923,8 +923,8 @@ def mkfs(fs_cfg): mkfs_cmd = util.which("mk%s" % fs_type) if not mkfs_cmd: - LOG.warn("Cannot create fstype '%s'. No mkfs.%s command", fs_type, - fs_type) + LOG.warning("Cannot create fstype '%s'. No mkfs.%s command", + fs_type, fs_type) return fs_cmd = [mkfs_cmd, device] diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py index f0cda3d5..0a135bbe 100644 --- a/cloudinit/config/cc_fan.py +++ b/cloudinit/config/cc_fan.py @@ -64,7 +64,7 @@ def stop_update_start(service, config_file, content, systemd=False): try: return util.subp(cmd, capture=True) except util.ProcessExecutionError as e: - LOG.warn("failed: %s (%s): %s", service, cmd, e) + LOG.warning("failed: %s (%s): %s", service, cmd, e) return False stop_failed = not run(cmds['stop'], msg='stop %s' % service) @@ -74,7 +74,7 @@ def stop_update_start(service, config_file, content, systemd=False): ret = run(cmds['start'], msg='start %s' % service) if ret and stop_failed: - LOG.warn("success: %s started", service) + LOG.warning("success: %s started", service) if 'enable' in cmds: ret = run(cmds['enable'], msg='enable %s' % service) diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index 5b630f8b..f14a4fc5 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -216,8 +216,9 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None): else: pinfo[k] = v - LOG.debug("suggest %(size)s swap for %(mem)s memory with '%(avail)s'" - " disk given max=%(max_in)s [max=%(max)s]'" % pinfo) + LOG.debug("suggest %s swap for %s memory with '%s'" + " disk given max=%s [max=%s]'", pinfo['size'], pinfo['mem'], + pinfo['avail'], pinfo['max_in'], pinfo['max']) return size @@ -266,7 +267,7 @@ def handle_swapcfg(swapcfg): return None or (filename, size) """ if not isinstance(swapcfg, dict): - LOG.warn("input for swap config was not a dict.") + LOG.warning("input for swap config was not a dict.") return None fname = swapcfg.get('filename', '/swap.img') @@ -289,7 +290,8 @@ def handle_swapcfg(swapcfg): return fname LOG.debug("swap file %s existed, but not in /proc/swaps", fname) except Exception: - LOG.warn("swap file %s existed. Error reading /proc/swaps", fname) + LOG.warning("swap file %s existed. Error reading /proc/swaps", + fname) return fname try: @@ -300,7 +302,7 @@ def handle_swapcfg(swapcfg): return setup_swapfile(fname=fname, size=size, maxsize=maxsize) except Exception as e: - LOG.warn("failed to setup swap: %s", e) + LOG.warning("failed to setup swap: %s", e) return None diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py index 9c5cd1fe..2548d1f1 100644 --- a/cloudinit/config/cc_resolv_conf.py +++ b/cloudinit/config/cc_resolv_conf.py @@ -77,7 +77,7 @@ def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"): params['options'] = {} params['flags'] = flags - LOG.debug("Writing resolv.conf from template %s" % template_fn) + LOG.debug("Writing resolv.conf from template %s", template_fn) templater.render_to_file(template_fn, target_fname, params) diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py index 50316214..50ff9e35 100644 --- a/cloudinit/config/cc_rsyslog.py +++ b/cloudinit/config/cc_rsyslog.py @@ -252,7 +252,8 @@ def apply_rsyslog_changes(configs, def_fname, cfg_dir): for cur_pos, ent in enumerate(configs): if isinstance(ent, dict): if "content" not in ent: - LOG.warn("No 'content' entry in config entry %s", cur_pos + 1) + LOG.warning("No 'content' entry in config entry %s", + cur_pos + 1) continue content = ent['content'] filename = ent.get("filename", def_fname) @@ -262,7 +263,7 @@ def apply_rsyslog_changes(configs, def_fname, cfg_dir): filename = filename.strip() if not filename: - LOG.warn("Entry %s has an empty filename", cur_pos + 1) + LOG.warning("Entry %s has an empty filename", cur_pos + 1) continue filename = os.path.join(cfg_dir, filename) @@ -389,7 +390,7 @@ def remotes_to_rsyslog_cfg(remotes, header=None, footer=None): try: lines.append(str(parse_remotes_line(line, name=name))) except ValueError as e: - LOG.warn("failed loading remote %s: %s [%s]", name, line, e) + LOG.warning("failed loading remote %s: %s [%s]", name, line, e) if footer is not None: lines.append(footer) return '\n'.join(lines) + "\n" diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py index 6ea81b84..a9682f19 100644 --- a/cloudinit/config/cc_snappy.py +++ b/cloudinit/config/cc_snappy.py @@ -283,8 +283,8 @@ def handle(name, cfg, cloud, log, args): render_snap_op(**pkg_op) except Exception as e: fails.append((pkg_op, e,)) - LOG.warn("'%s' failed for '%s': %s", - pkg_op['op'], pkg_op['name'], e) + LOG.warning("'%s' failed for '%s': %s", + pkg_op['op'], pkg_op['name'], e) # Default to disabling SSH ssh_enabled = mycfg.get('ssh_enabled', "auto") @@ -303,7 +303,7 @@ def handle(name, cfg, cloud, log, args): LOG.debug("Enabling SSH, password authentication requested") ssh_enabled = True elif ssh_enabled not in (True, False): - LOG.warn("Unknown value '%s' in ssh_enabled", ssh_enabled) + LOG.warning("Unknown value '%s' in ssh_enabled", ssh_enabled) disable_enable_ssh(ssh_enabled) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 803ac74e..28650b88 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -143,9 +143,9 @@ class Distro(object): def _apply_network_from_network_config(self, netconfig, bring_up=True): distro = self.__class__ - LOG.warn("apply_network_config is not currently implemented " - "for distribution '%s'. Attempting to use apply_network", - distro) + LOG.warning("apply_network_config is not currently implemented " + "for distribution '%s'. Attempting to use apply_network", + distro) header = '\n'.join([ "# Converted from network_config for distro %s" % distro, "# Implmentation of _write_network_config is needed." @@ -335,7 +335,8 @@ class Distro(object): try: (_out, err) = util.subp(cmd) if len(err): - LOG.warn("Running %s resulted in stderr output: %s", cmd, err) + LOG.warning("Running %s resulted in stderr output: %s", + cmd, err) return True except util.ProcessExecutionError: util.logexc(LOG, "Running interface command %s failed", cmd) @@ -358,7 +359,7 @@ class Distro(object): Add a user to the system using standard GNU tools """ if util.is_user(name): - LOG.info("User %s already exists, skipping." % name) + LOG.info("User %s already exists, skipping.", name) return if 'create_groups' in kwargs: @@ -520,9 +521,9 @@ class Distro(object): keys = list(keys.values()) if keys is not None: if not isinstance(keys, (tuple, list, set)): - LOG.warn("Invalid type '%s' detected for" - " 'ssh_authorized_keys', expected list," - " string, dict, or set.", type(keys)) + LOG.warning("Invalid type '%s' detected for" + " 'ssh_authorized_keys', expected list," + " string, dict, or set.", type(keys)) else: keys = set(keys) or [] ssh_util.setup_user_keys(keys, name, options=None) @@ -595,7 +596,7 @@ class Distro(object): "#includedir %s" % (path), ''] sudoers_contents = "\n".join(lines) util.append_file(sudo_base, sudoers_contents) - LOG.debug("Added '#includedir %s' to %s" % (path, sudo_base)) + LOG.debug("Added '#includedir %s' to %s", path, sudo_base) except IOError as e: util.logexc(LOG, "Failed to write %s", sudo_base) raise e @@ -647,11 +648,11 @@ class Distro(object): # Check if group exists, and then add it doesn't if util.is_group(name): - LOG.warn("Skipping creation of existing group '%s'" % name) + LOG.warning("Skipping creation of existing group '%s'", name) else: try: util.subp(group_add_cmd) - LOG.info("Created new group %s" % name) + LOG.info("Created new group %s", name) except Exception: util.logexc(LOG, "Failed to create group %s", name) @@ -659,12 +660,12 @@ class Distro(object): if len(members) > 0: for member in members: if not util.is_user(member): - LOG.warn("Unable to add group member '%s' to group '%s'" - "; user does not exist.", member, name) + LOG.warning("Unable to add group member '%s' to group '%s'" + "; user does not exist.", member, name) continue util.subp(['usermod', '-a', '-G', name, member]) - LOG.info("Added user '%s' to group '%s'" % (member, name)) + LOG.info("Added user '%s' to group '%s'", member, name) def _get_package_mirror_info(mirror_info, data_source=None, @@ -708,7 +709,7 @@ def _get_package_mirror_info(mirror_info, data_source=None, if found: results[name] = found - LOG.debug("filtered distro mirror info: %s" % results) + LOG.debug("filtered distro mirror info: %s", results) return results diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py index 64b8c1fb..75d46201 100644 --- a/cloudinit/distros/arch.py +++ b/cloudinit/distros/arch.py @@ -83,7 +83,8 @@ class Distro(distros.Distro): try: (_out, err) = util.subp(cmd) if len(err): - LOG.warn("Running %s resulted in stderr output: %s", cmd, err) + LOG.warning("Running %s resulted in stderr output: %s", + cmd, err) except util.ProcessExecutionError: util.logexc(LOG, "Running interface command %s failed", cmd) @@ -94,7 +95,8 @@ class Distro(distros.Distro): try: (_out, err) = util.subp(cmd) if len(err): - LOG.warn("Running %s resulted in stderr output: %s", cmd, err) + LOG.warning("Running %s resulted in stderr output: %s", + cmd, err) return True except util.ProcessExecutionError: util.logexc(LOG, "Running interface command %s failed", cmd) diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 16f8d955..d06d46a6 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -223,6 +223,6 @@ def _maybe_remove_legacy_eth0(path="/etc/network/interfaces.d/eth0.cfg"): except Exception: msg = bmsg + " %s exists, but could not be read." % path - LOG.warn(msg) + LOG.warning(msg) # vi: ts=4 expandtab diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index a70ee45b..183e4452 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -148,7 +148,7 @@ class Distro(distros.Distro): def create_group(self, name, members): group_add_cmd = ['pw', '-n', name] if util.is_group(name): - LOG.warn("Skipping creation of existing group '%s'", name) + LOG.warning("Skipping creation of existing group '%s'", name) else: try: util.subp(group_add_cmd) @@ -160,8 +160,8 @@ class Distro(distros.Distro): if len(members) > 0: for member in members: if not util.is_user(member): - LOG.warn("Unable to add group member '%s' to group '%s'" - "; user does not exist.", member, name) + LOG.warning("Unable to add group member '%s' to group '%s'" + "; user does not exist.", member, name) continue try: util.subp(['pw', 'usermod', '-n', name, '-G', member]) @@ -369,7 +369,7 @@ class Distro(distros.Distro): # OS. This is just fine. (_out, err) = util.subp(cmd, rcs=[0, 1]) if len(err): - LOG.warn("Error running %s: %s", cmd, err) + LOG.warning("Error running %s: %s", cmd, err) def install_packages(self, pkglist): self.update_package_sources() diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py index 83fb56ff..0ad2f032 100644 --- a/cloudinit/distros/gentoo.py +++ b/cloudinit/distros/gentoo.py @@ -96,8 +96,8 @@ class Distro(distros.Distro): try: (_out, err) = util.subp(cmd) if len(err): - LOG.warn("Running %s resulted in stderr output: %s", - cmd, err) + LOG.warning("Running %s resulted in stderr output: %s", + cmd, err) except util.ProcessExecutionError: util.logexc(LOG, "Running interface command %s failed", cmd) @@ -121,7 +121,8 @@ class Distro(distros.Distro): try: (_out, err) = util.subp(cmd) if len(err): - LOG.warn("Running %s resulted in stderr output: %s", cmd, err) + LOG.warning("Running %s resulted in stderr output: %s", + cmd, err) return True except util.ProcessExecutionError: util.logexc(LOG, "Running interface command %s failed", cmd) @@ -138,8 +139,8 @@ class Distro(distros.Distro): try: (_out, err) = util.subp(cmd) if len(err): - LOG.warn("Running %s resulted in stderr output: %s", cmd, - err) + LOG.warning("Running %s resulted in stderr output: %s", + cmd, err) except util.ProcessExecutionError: util.logexc(LOG, "Running interface command %s failed", cmd) return False diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py index d1f8a042..a62055ae 100644 --- a/cloudinit/distros/parsers/resolv_conf.py +++ b/cloudinit/distros/parsers/resolv_conf.py @@ -81,9 +81,9 @@ class ResolvConf(object): if len(new_ns) == len(current_ns): return current_ns if len(current_ns) >= 3: - LOG.warn("ignoring nameserver %r: adding would " - "exceed the maximum of " - "'3' name servers (see resolv.conf(5))" % (ns)) + LOG.warning("ignoring nameserver %r: adding would " + "exceed the maximum of " + "'3' name servers (see resolv.conf(5))", ns) return current_ns[:3] self._remove_option('nameserver') for n in new_ns: diff --git a/cloudinit/distros/ug_util.py b/cloudinit/distros/ug_util.py index 53a0eafb..9378dd78 100755 --- a/cloudinit/distros/ug_util.py +++ b/cloudinit/distros/ug_util.py @@ -214,8 +214,8 @@ def normalize_users_groups(cfg, distro): 'name': old_user, } if not isinstance(old_user, dict): - LOG.warn(("Format for 'user' key must be a string or " - "dictionary and not %s"), type_utils.obj_name(old_user)) + LOG.warning(("Format for 'user' key must be a string or dictionary" + " and not %s"), type_utils.obj_name(old_user)) old_user = {} # If no old user format, then assume the distro @@ -227,9 +227,9 @@ def normalize_users_groups(cfg, distro): try: distro_user_config = distro.get_default_user() except NotImplementedError: - LOG.warn(("Distro has not implemented default user " - "access. No distribution provided default user" - " will be normalized.")) + LOG.warning(("Distro has not implemented default user " + "access. No distribution provided default user" + " will be normalized.")) # Merge the old user (which may just be an empty dict when not # present with the distro provided default user configuration so @@ -239,9 +239,9 @@ def normalize_users_groups(cfg, distro): base_users = cfg.get('users', []) if not isinstance(base_users, (list, dict) + six.string_types): - LOG.warn(("Format for 'users' key must be a comma separated string" - " or a dictionary or a list and not %s"), - type_utils.obj_name(base_users)) + LOG.warning(("Format for 'users' key must be a comma separated string" + " or a dictionary or a list and not %s"), + type_utils.obj_name(base_users)) base_users = [] if old_user: diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 13691549..723d6bd6 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -38,8 +38,8 @@ class MetadataLeafDecoder(object): # Assume it's json, unless it fails parsing... return json.loads(blob) except (ValueError, TypeError) as e: - LOG.warn("Field %s looked like a json object, but it was" - " not: %s", field, e) + LOG.warning("Field %s looked like a json object, but it" + " was not: %s", field, e) if blob.find("\n") != -1: return blob.splitlines() return blob @@ -125,7 +125,8 @@ class MetadataMaterializer(object): joined.update(child_contents) for field in leaf_contents.keys(): if field in joined: - LOG.warn("Duplicate key found in results from %s", base_url) + LOG.warning("Duplicate key found in results from %s", + base_url) else: joined[field] = leaf_contents[field] return joined diff --git a/cloudinit/gpg.py b/cloudinit/gpg.py index 70c620de..d58d73e0 100644 --- a/cloudinit/gpg.py +++ b/cloudinit/gpg.py @@ -43,7 +43,7 @@ def delete_key(key): util.subp(["gpg", "--batch", "--yes", "--delete-keys", key], capture=True) except util.ProcessExecutionError as error: - LOG.warn('Failed delete key "%s": %s', key, error) + LOG.warning('Failed delete key "%s": %s', key, error) def getkeybyid(keyid, keyserver='keyserver.ubuntu.com'): diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index 1362db6e..c3576c04 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -246,7 +246,7 @@ def fixup_handler(mod, def_freq=PER_INSTANCE): else: freq = mod.frequency if freq and freq not in FREQUENCIES: - LOG.warn("Handler %s has an unknown frequency %s", mod, freq) + LOG.warning("Handler %s has an unknown frequency %s", mod, freq) return mod diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index 7435d58d..f01021aa 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -126,11 +126,11 @@ class FileSemaphores(object): # this case could happen if the migrator module hadn't run yet # but the item had run before we did canon_sem_name. if cname != name and os.path.exists(self._get_path(name, freq)): - LOG.warn("%s has run without canonicalized name [%s].\n" - "likely the migrator has not yet run. " - "It will run next boot.\n" - "run manually with: cloud-init single --name=migrator" - % (name, cname)) + LOG.warning("%s has run without canonicalized name [%s].\n" + "likely the migrator has not yet run. " + "It will run next boot.\n" + "run manually with: cloud-init single --name=migrator", + name, cname) return True return False @@ -375,8 +375,8 @@ class Paths(object): def get_ipath(self, name=None): ipath = self._get_ipath(name) if not ipath: - LOG.warn(("No per instance data available, " - "is there an datasource/iid set?")) + LOG.warning(("No per instance data available, " + "is there an datasource/iid set?")) return None else: return ipath diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 692b6007..db3c3579 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -242,8 +242,8 @@ class NetworkStateInterpreter(object): if not skip_broken: raise else: - LOG.warn("Skipping invalid command: %s", command, - exc_info=True) + LOG.warning("Skipping invalid command: %s", command, + exc_info=True) LOG.debug(self.dump_network_state()) def parse_config_v2(self, skip_broken=True): @@ -262,8 +262,8 @@ class NetworkStateInterpreter(object): if not skip_broken: raise else: - LOG.warn("Skipping invalid command: %s", command, - exc_info=True) + LOG.warning("Skipping invalid command: %s", command, + exc_info=True) LOG.debug(self.dump_network_state()) @ensure_command_keys(['name']) diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py index b90bc191..4066076c 100644 --- a/cloudinit/reporting/handlers.py +++ b/cloudinit/reporting/handlers.py @@ -37,7 +37,7 @@ class LogHandler(ReportingHandler): try: level = getattr(logging, level.upper()) except Exception: - LOG.warn("invalid level '%s', using WARN", input_level) + LOG.warning("invalid level '%s', using WARN", input_level) level = logging.WARN self.level = level @@ -82,7 +82,7 @@ class WebHookHandler(ReportingHandler): timeout=self.timeout, retries=self.retries, ssl_details=self.ssl_details) except Exception: - LOG.warn("failed posting event: %s" % event.as_string()) + LOG.warning("failed posting event: %s", event.as_string()) available_handlers = DictRegistry() diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index 8528fa10..ed1d691a 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -181,7 +181,7 @@ class DataSourceAltCloud(sources.DataSource): try: cmd = CMD_PROBE_FLOPPY (cmd_out, _err) = util.subp(cmd) - LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out)) + LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out) except ProcessExecutionError as _err: util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) return False @@ -196,7 +196,7 @@ class DataSourceAltCloud(sources.DataSource): cmd = CMD_UDEVADM_SETTLE cmd.append('--exit-if-exists=' + floppy_dev) (cmd_out, _err) = util.subp(cmd) - LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out)) + LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out) except ProcessExecutionError as _err: util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) return False diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 48a3e1df..04358b73 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -116,7 +116,7 @@ class DataSourceAzureNet(sources.DataSource): # the metadata and "bounce" the network to force DDNS to update via # dhclient azure_hostname = self.metadata.get('local-hostname') - LOG.debug("Hostname in metadata is {}".format(azure_hostname)) + LOG.debug("Hostname in metadata is %s", azure_hostname) hostname_command = self.ds_cfg['hostname_bounce']['hostname_command'] with temporary_hostname(azure_hostname, self.ds_cfg, @@ -132,7 +132,7 @@ class DataSourceAzureNet(sources.DataSource): cfg=cfg, prev_hostname=previous_hostname) except Exception as e: - LOG.warn("Failed publishing hostname: %s", e) + LOG.warning("Failed publishing hostname: %s", e) util.logexc(LOG, "handling set_hostname failed") def get_metadata_from_agent(self): @@ -168,7 +168,7 @@ class DataSourceAzureNet(sources.DataSource): func=wait_for_files, args=(fp_files,)) if len(missing): - LOG.warn("Did not find files, but going on: %s", missing) + LOG.warning("Did not find files, but going on: %s", missing) metadata = {} metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) @@ -199,7 +199,7 @@ class DataSourceAzureNet(sources.DataSource): except BrokenAzureDataSource as exc: raise exc except util.MountFailedError: - LOG.warn("%s was not mountable", cdev) + LOG.warning("%s was not mountable", cdev) continue (md, self.userdata_raw, cfg, files) = ret @@ -331,8 +331,8 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, log_pre="Azure ephemeral disk: ") if missing: - LOG.warn("ephemeral device '%s' did not appear after %d seconds.", - devpath, maxwait) + LOG.warning("ephemeral device '%s' did not appear after %d seconds.", + devpath, maxwait) return result = False @@ -342,7 +342,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, else: result, msg = can_dev_be_reformatted(devpath) - LOG.debug("reformattable=%s: %s" % (result, msg)) + LOG.debug("reformattable=%s: %s", result, msg) if not result: return @@ -355,7 +355,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, LOG.debug(bmsg + " removed.") except Exception as e: # python3 throws FileNotFoundError, python2 throws OSError - LOG.warn(bmsg + ": remove failed! (%s)" % e) + LOG.warning(bmsg + ": remove failed! (%s)", e) else: LOG.debug(bmsg + " did not exist.") return @@ -405,7 +405,7 @@ def pubkeys_from_crt_files(flist): errors.append(fname) if errors: - LOG.warn("failed to convert the crt files to pubkey: %s", errors) + LOG.warning("failed to convert the crt files to pubkey: %s", errors) return pubkeys @@ -427,8 +427,8 @@ def wait_for_files(flist, maxwait=60, naplen=.5, log_pre=""): time.sleep(naplen) waited += naplen - LOG.warn("%sStill missing files after %s seconds: %s", - log_pre, maxwait, need) + LOG.warning("%sStill missing files after %s seconds: %s", + log_pre, maxwait, need) return need diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py index ffc23e3d..19df16b1 100644 --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -43,7 +43,7 @@ class DataSourceCloudSigma(sources.DataSource): LOG.debug("detected hypervisor as %s", sys_product_name) return 'cloudsigma' in sys_product_name.lower() - LOG.warn("failed to query dmi data for system product name") + LOG.warning("failed to query dmi data for system product name") return False def get_data(self): diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 46dd89e0..ef374f3f 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -127,7 +127,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): try: self.vendordata_raw = sources.convert_vendordata(vd) except ValueError as e: - LOG.warn("Invalid content in vendor-data: %s", e) + LOG.warning("Invalid content in vendor-data: %s", e) self.vendordata_raw = None # network_config is an /etc/network/interfaces formated file and is @@ -190,7 +190,7 @@ def on_first_boot(data, distro=None, network=True): if network: net_conf = data.get("network_config", '') if net_conf and distro: - LOG.warn("Updating network interfaces from config drive") + LOG.warning("Updating network interfaces from config drive") distro.apply_network(net_conf) write_injected_files(data.get('files')) diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py index d052c4c3..5e7e66be 100644 --- a/cloudinit/sources/DataSourceDigitalOcean.py +++ b/cloudinit/sources/DataSourceDigitalOcean.py @@ -51,7 +51,7 @@ class DataSourceDigitalOcean(sources.DataSource): if not is_do: return False - LOG.info("Running on digital ocean. droplet_id=%s" % droplet_id) + LOG.info("Running on digital ocean. droplet_id=%s", droplet_id) ipv4LL_nic = None if self.use_ip4LL: diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 6f01a139..2f9c7edf 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -125,7 +125,7 @@ class DataSourceEc2(sources.DataSource): if len(filtered): mdurls = filtered else: - LOG.warn("Empty metadata url list! using default list") + LOG.warning("Empty metadata url list! using default list") mdurls = self.metadata_urls urls = [] @@ -232,7 +232,7 @@ def read_strict_mode(cfgval, default): try: return parse_strict_mode(cfgval) except ValueError as e: - LOG.warn(e) + LOG.warning(e) return default @@ -270,7 +270,7 @@ def warn_if_necessary(cfgval, cfg): try: mode, sleep = parse_strict_mode(cfgval) except ValueError as e: - LOG.warn(e) + LOG.warning(e) return if mode == "false": @@ -304,8 +304,8 @@ def identify_platform(): if result: return result except Exception as e: - LOG.warn("calling %s with %s raised exception: %s", - checker, data, e) + LOG.warning("calling %s with %s raised exception: %s", + checker, data, e) def _collect_platform_data(): diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index 637c9505..e9afda9c 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -98,7 +98,7 @@ class DataSourceGCE(sources.DataSource): if not running_on_gce: LOG.debug(msg, mkey) else: - LOG.warn(msg, mkey) + LOG.warning(msg, mkey) return False self.metadata[mkey] = value @@ -116,7 +116,8 @@ class DataSourceGCE(sources.DataSource): self.metadata['user-data'] = b64decode( self.metadata['user-data']) else: - LOG.warn('unknown user-data-encoding: %s, ignoring', encoding) + LOG.warning('unknown user-data-encoding: %s, ignoring', + encoding) return running_on_gce diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 41179b02..77df5a51 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -71,7 +71,7 @@ class DataSourceMAAS(sources.DataSource): except MAASSeedDirNone: pass except MAASSeedDirMalformed as exc: - LOG.warn("%s was malformed: %s" % (self.seed_dir, exc)) + LOG.warning("%s was malformed: %s", self.seed_dir, exc) raise # If there is no metadata_url, then we're not configured @@ -107,7 +107,7 @@ class DataSourceMAAS(sources.DataSource): try: self.vendordata_raw = sources.convert_vendordata(vd) except ValueError as e: - LOG.warn("Invalid content in vendor-data: %s", e) + LOG.warning("Invalid content in vendor-data: %s", e) self.vendordata_raw = None def wait_for_metadata_service(self, url): @@ -126,7 +126,7 @@ class DataSourceMAAS(sources.DataSource): if timeout in mcfg: timeout = int(mcfg.get("timeout", timeout)) except Exception: - LOG.warn("Failed to get timeout, using %s" % timeout) + LOG.warning("Failed to get timeout, using %s", timeout) starttime = time.time() if url.endswith("/"): @@ -190,8 +190,8 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None, else: md[path] = util.decode_binary(resp.contents) else: - LOG.warn(("Fetching from %s resulted in" - " an invalid http code %s"), url, resp.code) + LOG.warning(("Fetching from %s resulted in" + " an invalid http code %s"), url, resp.code) except url_helper.UrlError as e: if e.code == 404 and not optional: raise MAASSeedDirMalformed( diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 5924b828..c68f6b8c 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -104,8 +104,8 @@ class DataSourceNoCloud(sources.DataSource): pp2d_kwargs) except ValueError as e: if dev in label_list: - LOG.warn("device %s with label=%s not a" - "valid seed.", dev, label) + LOG.warning("device %s with label=%s not a" + "valid seed.", dev, label) continue mydata = _merge_new_seed(mydata, seeded) diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index d70784ac..f20c9a65 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -225,12 +225,12 @@ def get_max_wait_from_cfg(cfg): try: max_wait = int(cfg.get(max_wait_cfg_option, default_max_wait)) except ValueError: - LOG.warn("Failed to get '%s', using %s", - max_wait_cfg_option, default_max_wait) + LOG.warning("Failed to get '%s', using %s", + max_wait_cfg_option, default_max_wait) if max_wait <= 0: - LOG.warn("Invalid value '%s' for '%s', using '%s' instead", - max_wait, max_wait_cfg_option, default_max_wait) + LOG.warning("Invalid value '%s' for '%s', using '%s' instead", + max_wait, max_wait_cfg_option, default_max_wait) max_wait = default_max_wait return max_wait @@ -355,7 +355,7 @@ def transport_iso9660(require_iso=True): try: (fname, contents) = util.mount_cb(fullp, get_ovf_env, mtype=mtype) except util.MountFailedError: - LOG.debug("%s not mountable as iso9660" % fullp) + LOG.debug("%s not mountable as iso9660", fullp) continue if contents is not False: diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index cd75e6ea..5fdac192 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -64,7 +64,7 @@ class DataSourceOpenNebula(sources.DataSource): except BrokenContextDiskDir as exc: raise exc except util.MountFailedError: - LOG.warn("%s was not mountable" % cdev) + LOG.warning("%s was not mountable", cdev) if results: seed = cdev @@ -381,7 +381,7 @@ def read_context_disk_dir(source_dir, asuser=None): try: results['userdata'] = util.b64d(results['userdata']) except TypeError: - LOG.warn("Failed base64 decoding of userdata") + LOG.warning("Failed base64 decoding of userdata") # generate static /etc/network/interfaces # only if there are any required context variables diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index e1ea21f8..f0a6bfce 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -73,7 +73,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): if len(filtered): urls = filtered else: - LOG.warn("Empty metadata url list! using default list") + LOG.warning("Empty metadata url list! using default list") urls = [DEF_MD_URL] md_urls = [] @@ -137,7 +137,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): try: self.vendordata_raw = sources.convert_vendordata(vd) except ValueError as e: - LOG.warn("Invalid content in vendor-data: %s", e) + LOG.warning("Invalid content in vendor-data: %s", e) self.vendordata_raw = None return True diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 5e668947..6c6902fd 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -555,7 +555,7 @@ class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient): val = base64.b64decode(val.encode()).decode() # Bogus input produces different errors in Python 2 and 3 except (TypeError, binascii.Error): - LOG.warn("Failed base64 decoding key '%s': %s", key, val) + LOG.warning("Failed base64 decoding key '%s': %s", key, val) if strip: val = val.strip() diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 5c99437e..c3ce36d6 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -237,8 +237,8 @@ class DataSource(object): if candidate in valid: return candidate else: - LOG.warn("invalid dsmode '%s', using default=%s", - candidate, default) + LOG.warning("invalid dsmode '%s', using default=%s", + candidate, default) return default return default diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index f32dac9a..6e01aa47 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -289,7 +289,7 @@ class WALinuxAgentShim(object): LOG.debug("Unable to find endpoint in dhclient logs. " " Falling back to check lease files") if fallback_lease_file is None: - LOG.warn("No fallback lease file was specified.") + LOG.warning("No fallback lease file was specified.") value = None else: LOG.debug("Looking for endpoint in lease file %s", diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py index 14293f3c..602af078 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_file.py +++ b/cloudinit/sources/helpers/vmware/imc/config_file.py @@ -43,9 +43,9 @@ class ConfigFile(ConfigSource, dict): # "sensitive" settings shall not be logged if canLog: - logger.debug("ADDED KEY-VAL :: '%s' = '%s'" % (key, val)) + logger.debug("ADDED KEY-VAL :: '%s' = '%s'", key, val) else: - logger.debug("ADDED KEY-VAL :: '%s' = '*****************'" % key) + logger.debug("ADDED KEY-VAL :: '%s' = '*****************'", key) self[key] = val @@ -60,7 +60,7 @@ class ConfigFile(ConfigSource, dict): Keyword arguments: filename - The full path to the config file. """ - logger.info('Parsing the config file %s.' % filename) + logger.info('Parsing the config file %s.', filename) config = configparser.ConfigParser() config.optionxform = str @@ -69,7 +69,7 @@ class ConfigFile(ConfigSource, dict): self.clear() for category in config.sections(): - logger.debug("FOUND CATEGORY = '%s'" % category) + logger.debug("FOUND CATEGORY = '%s'", category) for (key, value) in config.items(category): self._insertKey(category + '|' + key, value) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 12165433..f7191b09 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -163,8 +163,8 @@ class Init(object): except OSError as e: error = e - LOG.warn("Failed changing perms on '%s'. tried: %s. %s", - log_file, ','.join(perms), error) + LOG.warning("Failed changing perms on '%s'. tried: %s. %s", + log_file, ','.join(perms), error) def read_cfg(self, extra_fns=None): # None check so that we don't keep on re-loading if empty @@ -447,9 +447,9 @@ class Init(object): mod_locs, looked_locs = importer.find_module( mod_name, [''], ['list_types', 'handle_part']) if not mod_locs: - LOG.warn("Could not find a valid user-data handler" - " named %s in file %s (searched %s)", - mod_name, fname, looked_locs) + LOG.warning("Could not find a valid user-data handler" + " named %s in file %s (searched %s)", + mod_name, fname, looked_locs) continue mod = importer.import_module(mod_locs[0]) mod = handlers.fixup_handler(mod) @@ -568,7 +568,8 @@ class Init(object): if not isinstance(vdcfg, dict): vdcfg = {'enabled': False} - LOG.warn("invalid 'vendor_data' setting. resetting to: %s", vdcfg) + LOG.warning("invalid 'vendor_data' setting. resetting to: %s", + vdcfg) enabled = vdcfg.get('enabled') no_handlers = vdcfg.get('disabled_handlers', None) @@ -632,10 +633,10 @@ class Init(object): return try: - LOG.debug("applying net config names for %s" % netcfg) + LOG.debug("applying net config names for %s", netcfg) self.distro.apply_network_config_names(netcfg) except Exception as e: - LOG.warn("Failed to rename devices: %s", e) + LOG.warning("Failed to rename devices: %s", e) if (self.datasource is not NULL_DATA_SOURCE and not self.is_new_instance()): @@ -651,9 +652,9 @@ class Init(object): "likely broken: %s", e) return except NotImplementedError: - LOG.warn("distro '%s' does not implement apply_network_config. " - "networking may not be configured properly.", - self.distro) + LOG.warning("distro '%s' does not implement apply_network_config. " + "networking may not be configured properly.", + self.distro) return @@ -737,15 +738,15 @@ class Modules(object): if not mod_name: continue if freq and freq not in FREQUENCIES: - LOG.warn(("Config specified module %s" - " has an unknown frequency %s"), raw_name, freq) + LOG.warning(("Config specified module %s" + " has an unknown frequency %s"), raw_name, freq) # Reset it so when ran it will get set to a known value freq = None mod_locs, looked_locs = importer.find_module( mod_name, ['', type_utils.obj_name(config)], ['handle']) if not mod_locs: - LOG.warn("Could not find module named %s (searched %s)", - mod_name, looked_locs) + LOG.warning("Could not find module named %s (searched %s)", + mod_name, looked_locs) continue mod = config.fixup_module(importer.import_module(mod_locs[0])) mostly_mods.append([mod, raw_name, freq, run_args]) @@ -877,7 +878,7 @@ def _pkl_load(fname): pickle_contents = util.load_file(fname, decode=False) except Exception as e: if os.path.isfile(fname): - LOG.warn("failed loading pickle in %s: %s" % (fname, e)) + LOG.warning("failed loading pickle in %s: %s", fname, e) pass # This is allowed so just return nothing successfully loaded... diff --git a/cloudinit/templater.py b/cloudinit/templater.py index 648cd218..b3ea64e4 100644 --- a/cloudinit/templater.py +++ b/cloudinit/templater.py @@ -103,14 +103,14 @@ def detect_template(text): raise ValueError("Unknown template rendering type '%s' requested" % template_type) if template_type == 'jinja' and not JINJA_AVAILABLE: - LOG.warn("Jinja not available as the selected renderer for" - " desired template, reverting to the basic renderer.") + LOG.warning("Jinja not available as the selected renderer for" + " desired template, reverting to the basic renderer.") return ('basic', basic_render, rest) elif template_type == 'jinja' and JINJA_AVAILABLE: return ('jinja', jinja_render, rest) if template_type == 'cheetah' and not CHEETAH_AVAILABLE: - LOG.warn("Cheetah not available as the selected renderer for" - " desired template, reverting to the basic renderer.") + LOG.warning("Cheetah not available as the selected renderer for" + " desired template, reverting to the basic renderer.") return ('basic', basic_render, rest) elif template_type == 'cheetah' and CHEETAH_AVAILABLE: return ('cheetah', cheetah_render, rest) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 2f6a158e..d2b92e6a 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -155,8 +155,8 @@ def _get_ssl_args(url, ssl_details): scheme = urlparse(url).scheme if scheme == 'https' and ssl_details: if not SSL_ENABLED: - LOG.warn("SSL is not supported in requests v%s, " - "cert. verification can not occur!", _REQ_VER) + LOG.warning("SSL is not supported in requests v%s, " + "cert. verification can not occur!", _REQ_VER) else: if 'ca_certs' in ssl_details and ssl_details['ca_certs']: ssl_args['verify'] = ssl_details['ca_certs'] @@ -415,14 +415,15 @@ class OauthUrlHelper(object): return if 'date' not in exception.headers: - LOG.warn("Missing header 'date' in %s response", exception.code) + LOG.warning("Missing header 'date' in %s response", + exception.code) return date = exception.headers['date'] try: remote_time = time.mktime(parsedate(date)) except Exception as e: - LOG.warn("Failed to convert datetime '%s': %s", date, e) + LOG.warning("Failed to convert datetime '%s': %s", date, e) return skew = int(remote_time - time.time()) @@ -430,7 +431,7 @@ class OauthUrlHelper(object): old_skew = self.skew_data.get(host, 0) if abs(old_skew - skew) > self.skew_change_limit: self.update_skew_file(host, skew) - LOG.warn("Setting oauth clockskew for %s to %d", host, skew) + LOG.warning("Setting oauth clockskew for %s to %d", host, skew) self.skew_data[host] = skew return diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index cfe5aa2f..88cb7f84 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -109,8 +109,9 @@ class UserDataProcessor(object): ctype_orig = None was_compressed = True except util.DecompressionError as e: - LOG.warn("Failed decompressing payload from %s of length" - " %s due to: %s", ctype_orig, len(payload), e) + LOG.warning("Failed decompressing payload from %s of" + " length %s due to: %s", + ctype_orig, len(payload), e) continue # Attempt to figure out the payloads content-type @@ -228,9 +229,9 @@ class UserDataProcessor(object): if resp.ok(): content = resp.contents else: - LOG.warn(("Fetching from %s resulted in" - " a invalid http code of %s"), - include_url, resp.code) + LOG.warning(("Fetching from %s resulted in" + " a invalid http code of %s"), + include_url, resp.code) if content is not None: new_msg = convert_string(content) diff --git a/cloudinit/util.py b/cloudinit/util.py index 6940850c..bfddca67 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -96,11 +96,11 @@ def _lsb_release(target=None): data[fmap[fname]] = val.strip() missing = [k for k in fmap.values() if k not in data] if len(missing): - LOG.warn("Missing fields in lsb_release --all output: %s", - ','.join(missing)) + LOG.warning("Missing fields in lsb_release --all output: %s", + ','.join(missing)) except ProcessExecutionError as err: - LOG.warn("Unable to get lsb_release --all: %s", err) + LOG.warning("Unable to get lsb_release --all: %s", err) data = dict((v, "UNAVAILABLE") for v in fmap.values()) return data @@ -590,7 +590,7 @@ def system_info(): 'release': platform.release(), 'python': platform.python_version(), 'uname': platform.uname(), - 'dist': platform.linux_distribution(), + 'dist': platform.linux_distribution(), # pylint: disable=W1505 } @@ -865,7 +865,7 @@ def read_file_or_url(url, timeout=5, retries=10, url = "file://%s" % url if url.lower().startswith("file://"): if data: - LOG.warn("Unable to post data to file resource %s", url) + LOG.warning("Unable to post data to file resource %s", url) file_path = url[len("file://"):] try: contents = load_file(file_path, decode=False) @@ -1279,7 +1279,7 @@ def get_cmdline(): # replace nulls with space and drop trailing null cmdline = contents.replace("\x00", " ")[:-1] except Exception as e: - LOG.warn("failed reading /proc/1/cmdline: %s", e) + LOG.warning("failed reading /proc/1/cmdline: %s", e) cmdline = "" else: try: @@ -1400,7 +1400,7 @@ def logexc(log, msg, *args): # or even desirable to have that much junk # coming out to a non-debug stream if msg: - log.warn(msg, *args) + log.warning(msg, *args) # Debug gets the full trace. However, nose has a bug whereby its # logcapture plugin doesn't properly handle the case where there is no # actual exception. To avoid tracebacks during the test suite then, we'll @@ -2344,8 +2344,8 @@ def read_dmi_data(key): if dmidecode_path: return _call_dmidecode(key, dmidecode_path) - LOG.warn("did not find either path %s or dmidecode command", - DMI_SYS_PATH) + LOG.warning("did not find either path %s or dmidecode command", + DMI_SYS_PATH) return None diff --git a/cloudinit/warnings.py b/cloudinit/warnings.py index 3206d4e9..f9f7a63c 100644 --- a/cloudinit/warnings.py +++ b/cloudinit/warnings.py @@ -130,10 +130,10 @@ def show_warning(name, cfg=None, sleep=None, mode=True, **kwargs): os.path.join(_get_warn_dir(cfg), name), topline + "\n".join(fmtlines) + "\n" + topline) - LOG.warn(topline + "\n".join(fmtlines) + "\n" + closeline) + LOG.warning(topline + "\n".join(fmtlines) + "\n" + closeline) if sleep: - LOG.debug("sleeping %d seconds for warning '%s'" % (sleep, name)) + LOG.debug("sleeping %d seconds for warning '%s'", sleep, name) time.sleep(sleep) # vi: ts=4 expandtab diff --git a/tests/cloud_tests/__main__.py b/tests/cloud_tests/__main__.py index ef7d1878..ed654ad3 100644 --- a/tests/cloud_tests/__main__.py +++ b/tests/cloud_tests/__main__.py @@ -38,7 +38,7 @@ def run(args): finally: # TODO: make this configurable via environ or cmdline if failed: - LOG.warn('some tests failed, leaving data in %s', args.data_dir) + LOG.warning('some tests failed, leaving data in %s', args.data_dir) else: shutil.rmtree(args.data_dir) return failed diff --git a/tests/cloud_tests/args.py b/tests/cloud_tests/args.py index b68cc98e..371b0444 100644 --- a/tests/cloud_tests/args.py +++ b/tests/cloud_tests/args.py @@ -94,7 +94,7 @@ def normalize_create_args(args): if os.path.exists(config.name_to_path(args.name)): msg = 'test: {} already exists'.format(args.name) if args.force: - LOG.warn('%s but ignoring due to --force', msg) + LOG.warning('%s but ignoring due to --force', msg) else: LOG.error(msg) return None diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py index 68b47d7a..02fc0e52 100644 --- a/tests/cloud_tests/collect.py +++ b/tests/cloud_tests/collect.py @@ -45,7 +45,7 @@ def collect_test_data(args, snapshot, os_name, test_name): # if test is not enabled, skip and return 0 failures if not test_config.get('enabled', False): - LOG.warn('test config %s is not enabled, skipping', test_name) + LOG.warning('test config %s is not enabled, skipping', test_name) return ({}, 0) # create test instance diff --git a/tests/cloud_tests/verify.py b/tests/cloud_tests/verify.py index ef7d4e21..2a63550e 100644 --- a/tests/cloud_tests/verify.py +++ b/tests/cloud_tests/verify.py @@ -45,9 +45,9 @@ def verify_data(base_dir, tests): } for failure in res[test_name]['failures']: - LOG.warn('test case: %s failed %s.%s with: %s', - test_name, failure['class'], failure['function'], - failure['error']) + LOG.warning('test case: %s failed %s.%s with: %s', + test_name, failure['class'], failure['function'], + failure['error']) return res @@ -80,7 +80,8 @@ def verify(args): if len(fail_list) == 0: LOG.info('test: %s passed all tests', test_name) else: - LOG.warn('test: %s failed %s tests', test_name, len(fail_list)) + LOG.warning('test: %s failed %s tests', test_name, + len(fail_list)) failed += len(fail_list) # dump results diff --git a/tools/mock-meta.py b/tools/mock-meta.py index 95fc4659..82816e8a 100755 --- a/tools/mock-meta.py +++ b/tools/mock-meta.py @@ -293,9 +293,9 @@ class MetaDataHandler(object): else: return "%s" % (PLACEMENT_CAPABILITIES.get(pentry, '')) else: - log.warn(("Did not implement action %s, " - "returning empty response: %r"), - action, NOT_IMPL_RESPONSE) + log.warning(("Did not implement action %s, " + "returning empty response: %r"), + action, NOT_IMPL_RESPONSE) return NOT_IMPL_RESPONSE -- cgit v1.2.3 From 068dec2f6694da263147a5a7fbc5e6d30f7d1e0d Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 20 Apr 2017 13:20:38 -0400 Subject: Fix growpart for some cases when booted with root=PARTUUID. Growing the root partition would fail in either of two cases: a.) if the device /dev/root existed b.) the kernel command line had upper case letters in PARTUUID= the kernel will accept upper case partuuid, but udev creates links with lower case. In that scenario, we need to adjust to a /dev/disk/by- with lower case. The fix here addresses that, and also fixes uuid similarly for the lowercase issue. LP: #1684869 --- cloudinit/config/cc_growpart.py | 6 +++++- cloudinit/util.py | 5 +++-- 2 files changed, 8 insertions(+), 3 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 089693e8..d2bc6e6c 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -252,9 +252,13 @@ def devent2dev(devent): container = util.is_container() # Ensure the path is a block device. - if (dev == "/dev/root" and not os.path.exists(dev) and not container): + if (dev == "/dev/root" and not container): dev = util.rootdev_from_cmdline(util.get_cmdline()) if dev is None: + if os.path.exists(dev): + # if /dev/root exists, but we failed to convert + # that to a "real" /dev/ path device, then return it. + return dev raise ValueError("Unable to find device '/dev/root'") return dev diff --git a/cloudinit/util.py b/cloudinit/util.py index bfddca67..22af99dd 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2404,9 +2404,10 @@ def rootdev_from_cmdline(cmdline): if found.startswith("LABEL="): return "/dev/disk/by-label/" + found[len("LABEL="):] if found.startswith("UUID="): - return "/dev/disk/by-uuid/" + found[len("UUID="):] + return "/dev/disk/by-uuid/" + found[len("UUID="):].lower() if found.startswith("PARTUUID="): - disks_path = "/dev/disk/by-partuuid/" + found[len("PARTUUID="):] + disks_path = ("/dev/disk/by-partuuid/" + + found[len("PARTUUID="):].lower()) if os.path.exists(disks_path): return disks_path results = find_devs_with(found) -- cgit v1.2.3 From d16632ad8dfd1844d265d93ab00b54d419626019 Mon Sep 17 00:00:00 2001 From: Dylan Perry Date: Fri, 7 Apr 2017 15:43:35 +1000 Subject: Fix yum repo config where keys contain array values ConfigObj produces configuration files that are incompatible with yum if multiple values are listed for a configuration key. Switch to the builtin configparser, and ConfigParser (Python 2) which correctly handles this case. Add additional test case for array values in yum_repos definition LP: #1592150 --- cloudinit/config/cc_yum_add_repo.py | 22 +++++---- .../test_handler/test_handler_yum_add_repo.py | 56 +++++++++++++++++++--- 2 files changed, 64 insertions(+), 14 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index a04e1b2a..6a42f499 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -32,7 +32,10 @@ entry, the config entry will be skipped. import os -import configobj +try: + from configparser import ConfigParser +except ImportError: + from ConfigParser import ConfigParser import six from cloudinit import util @@ -53,7 +56,7 @@ def _format_repo_value(val): if isinstance(val, (list, tuple)): # Can handle 'lists' in certain cases # See: https://linux.die.net/man/5/yum.conf - return "\n ".join([_format_repo_value(v) for v in val]) + return "\n".join([_format_repo_value(v) for v in val]) if not isinstance(val, six.string_types): return str(val) return val @@ -62,16 +65,19 @@ def _format_repo_value(val): # TODO(harlowja): move to distro? # See man yum.conf def _format_repository_config(repo_id, repo_config): - to_be = configobj.ConfigObj() - to_be[repo_id] = {} + to_be = ConfigParser() + to_be.add_section(repo_id) # Do basic translation of the items -> values for (k, v) in repo_config.items(): # For now assume that people using this know # the format of yum and don't verify keys/values further - to_be[repo_id][k] = _format_repo_value(v) - lines = to_be.write() - lines.insert(0, "# Created by cloud-init on %s" % (util.time_rfc2822())) - return "\n".join(lines) + to_be.set(repo_id, k, _format_repo_value(v)) + to_be_stream = six.StringIO() + to_be.write(to_be_stream) + to_be_stream.seek(0) + lines = to_be_stream.readlines() + lines.insert(0, "# Created by cloud-init on %s\n" % (util.time_rfc2822())) + return "".join(lines) def handle(name, cfg, _cloud, log, _args): diff --git a/tests/unittests/test_handler/test_handler_yum_add_repo.py b/tests/unittests/test_handler/test_handler_yum_add_repo.py index 3feba86c..4815bdb6 100644 --- a/tests/unittests/test_handler/test_handler_yum_add_repo.py +++ b/tests/unittests/test_handler/test_handler_yum_add_repo.py @@ -5,10 +5,13 @@ from cloudinit import util from .. import helpers -import configobj +try: + from configparser import ConfigParser +except ImportError: + from ConfigParser import ConfigParser import logging import shutil -from six import BytesIO +from six import StringIO import tempfile LOG = logging.getLogger(__name__) @@ -54,9 +57,9 @@ class TestConfig(helpers.FilesystemMockingTestCase): } self.patchUtils(self.tmp) cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, []) - contents = util.load_file("/etc/yum.repos.d/epel_testing.repo", - decode=False) - contents = configobj.ConfigObj(BytesIO(contents)) + contents = util.load_file("/etc/yum.repos.d/epel_testing.repo") + parser = ConfigParser() + parser.readfp(StringIO(contents)) expected = { 'epel_testing': { 'name': 'Extra Packages for Enterprise Linux 5 - Testing', @@ -67,6 +70,47 @@ class TestConfig(helpers.FilesystemMockingTestCase): 'gpgcheck': '1', } } - self.assertEqual(expected, dict(contents)) + for section in expected: + self.assertTrue(parser.has_section(section), + "Contains section {}".format(section)) + for k, v in expected[section].items(): + self.assertEqual(parser.get(section, k), v) + + def test_write_config_array(self): + cfg = { + 'yum_repos': { + 'puppetlabs-products': { + 'name': 'Puppet Labs Products El 6 - $basearch', + 'baseurl': + 'http://yum.puppetlabs.com/el/6/products/$basearch', + 'gpgkey': [ + 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppetlabs', + 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppet', + ], + 'enabled': True, + 'gpgcheck': True, + } + } + } + self.patchUtils(self.tmp) + cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, []) + contents = util.load_file("/etc/yum.repos.d/puppetlabs_products.repo") + parser = ConfigParser() + parser.readfp(StringIO(contents)) + expected = { + 'puppetlabs_products': { + 'name': 'Puppet Labs Products El 6 - $basearch', + 'baseurl': 'http://yum.puppetlabs.com/el/6/products/$basearch', + 'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppetlabs\n' + 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puppet', + 'enabled': '1', + 'gpgcheck': '1', + } + } + for section in expected: + self.assertTrue(parser.has_section(section), + "Contains section {}".format(section)) + for k, v in expected[section].items(): + self.assertEqual(parser.get(section, k), v) # vi: ts=4 expandtab -- cgit v1.2.3