diff options
51 files changed, 239 insertions, 202 deletions
@@ -6,8 +6,29 @@ jobs=4 [MESSAGES CONTROL] -# Errors only -disable=C, F, I, R, W +# Errors and warings with some filtered: +# W0105(pointless-string-statement) +# W0107(unnecessary-pass) +# W0201(attribute-defined-outside-init) +# W0212(protected-access) +# W0221(arguments-differ) +# W0222(signature-differs) +# W0223(abstract-method) +# W0231(super-init-not-called) +# W0311(bad-indentation) +# W0511(fixme) +# W0602(global-variable-not-assigned) +# W0603(global-statement) +# W0611(unused-import) +# W0612(unused-variable) +# W0613(unused-argument) +# W0621(redefined-outer-name) +# W0622(redefined-builtin) +# W0631(undefined-loop-variable) +# W0703(broad-except) +# W1401(anomalous-backslash-in-string) + +disable=C, F, I, R, W0105, W0107, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0612, W0613, W0621, W0622, W0631, W0703, W1401 [REPORTS] @@ -25,7 +46,7 @@ reports=no # (useful for modules/projects where namespaces are manipulated during runtime # and thus existing member attributes cannot be deduced by static analysis. It # supports qualified module names, as well as Unix pattern matching. -ignored-modules=six.moves,pkg_resources +ignored-modules=six.moves,pkg_resources,httplib,http.client # List of class names for which member attributes should not be checked (useful # for classes with dynamically set attributes). This supports the use of diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py index b93a42ea..d8a9fc86 100644 --- a/cloudinit/cloud.py +++ b/cloudinit/cloud.py @@ -56,7 +56,8 @@ class Cloud(object): def get_template_filename(self, name): fn = self.paths.template_tpl % (name) if not os.path.isfile(fn): - LOG.warn("No template found at %s for template named %s", fn, name) + LOG.warning("No template found at %s for template named %s", + fn, name) return None return fn diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index fd221323..26cc2654 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -405,7 +405,8 @@ def main_init(name, args): errfmt_orig = errfmt (outfmt, errfmt) = util.get_output_cfg(mods.cfg, name) if outfmt_orig != outfmt or errfmt_orig != errfmt: - LOG.warn("Stdout, stderr changing to (%s, %s)", outfmt, errfmt) + LOG.warning("Stdout, stderr changing to (%s, %s)", + outfmt, errfmt) (outfmt, errfmt) = util.fixup_output(mods.cfg, name) except Exception: util.logexc(LOG, "Failed to re-adjust output redirection!") @@ -427,15 +428,15 @@ def di_report_warn(datasource, cfg): dicfg = cfg.get('di_report', {}) if not isinstance(dicfg, dict): - LOG.warn("di_report config not a dictionary: %s", dicfg) + LOG.warning("di_report config not a dictionary: %s", dicfg) return dslist = dicfg.get('datasource_list') if dslist is None: - LOG.warn("no 'datasource_list' found in di_report.") + LOG.warning("no 'datasource_list' found in di_report.") return elif not isinstance(dslist, list): - LOG.warn("di_report/datasource_list not a list: %s", dslist) + LOG.warning("di_report/datasource_list not a list: %s", dslist) return # ds.__module__ is like cloudinit.sources.DataSourceName @@ -444,8 +445,8 @@ def di_report_warn(datasource, cfg): if modname.startswith(sources.DS_PREFIX): modname = modname[len(sources.DS_PREFIX):] else: - LOG.warn("Datasource '%s' came from unexpected module '%s'.", - datasource, modname) + LOG.warning("Datasource '%s' came from unexpected module '%s'.", + datasource, modname) if modname in dslist: LOG.debug("used datasource '%s' from '%s' was in di_report's list: %s", @@ -571,10 +572,10 @@ def main_single(name, args): mod_args, mod_freq) if failures: - LOG.warn("Ran %s but it failed!", mod_name) + LOG.warning("Ran %s but it failed!", mod_name) return 1 elif not which_ran: - LOG.warn("Did not run %s, does it exist?", mod_name) + LOG.warning("Did not run %s, does it exist?", mod_name) return 1 else: # Guess it worked diff --git a/cloudinit/config/__init__.py b/cloudinit/config/__init__.py index 57e2a44d..0ef9a748 100644 --- a/cloudinit/config/__init__.py +++ b/cloudinit/config/__init__.py @@ -37,7 +37,7 @@ def fixup_module(mod, def_freq=PER_INSTANCE): else: freq = mod.frequency if freq and freq not in FREQUENCIES: - LOG.warn("Module %s has an unknown frequency %s", mod, freq) + LOG.warning("Module %s has an unknown frequency %s", mod, freq) if not hasattr(mod, 'distros'): setattr(mod, 'distros', []) if not hasattr(mod, 'osfamilies'): diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 06804e85..7e751776 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -347,8 +347,8 @@ def dpkg_reconfigure(packages, target=None): unhandled.append(pkg) if len(unhandled): - LOG.warn("The following packages were installed and preseeded, " - "but cannot be unconfigured: %s", unhandled) + LOG.warning("The following packages were installed and preseeded, " + "but cannot be unconfigured: %s", unhandled) if len(to_config): util.subp(['dpkg-reconfigure', '--frontend=noninteractive'] + @@ -441,7 +441,7 @@ def rename_apt_lists(new_mirrors, target=None): os.rename(filename, newname) except OSError: # since this is a best effort task, warn with but don't fail - LOG.warn("Failed to rename apt list:", exc_info=True) + LOG.warning("Failed to rename apt list:", exc_info=True) def mirror_to_placeholder(tmpl, mirror, placeholder): @@ -449,7 +449,7 @@ def mirror_to_placeholder(tmpl, mirror, placeholder): replace the specified mirror in a template with a placeholder string Checks for existance of the expected mirror and warns if not found""" if mirror not in tmpl: - LOG.warn("Expected mirror '%s' not found in: %s", mirror, tmpl) + LOG.warning("Expected mirror '%s' not found in: %s", mirror, tmpl) return tmpl.replace(mirror, placeholder) @@ -525,7 +525,8 @@ def generate_sources_list(cfg, release, mirrors, cloud): if not template_fn: template_fn = cloud.get_template_filename('sources.list') if not template_fn: - LOG.warn("No template found, not rendering /etc/apt/sources.list") + LOG.warning("No template found, " + "not rendering /etc/apt/sources.list") return tmpl = util.load_file(template_fn) diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index f39f0815..f49386e3 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -181,7 +181,7 @@ def update_fs_setup_devices(disk_setup, tformer): # update it with the response from 'tformer' for definition in disk_setup: if not isinstance(definition, dict): - LOG.warn("entry in disk_setup not a dict: %s", definition) + LOG.warning("entry in disk_setup not a dict: %s", definition) continue origname = definition.get('device') @@ -279,7 +279,7 @@ def is_device_valid(name, partition=False): try: d_type = device_type(name) except Exception: - LOG.warn("Query against device %s failed" % name) + LOG.warning("Query against device %s failed", name) return False if partition and d_type == 'part': @@ -372,7 +372,7 @@ def find_device_node(device, fs_type=None, label=None, valid_targets=None, if not raw_device_used: return (device, False) - LOG.warn("Failed to find device during available device search.") + LOG.warning("Failed to find device during available device search.") return (None, False) @@ -638,7 +638,7 @@ def purge_disk(device): if d['type'] not in ["disk", "crypt"]: wipefs_cmd = [WIPEFS_CMD, "--all", "/dev/%s" % d['name']] try: - LOG.info("Purging filesystem on /dev/%s" % d['name']) + LOG.info("Purging filesystem on /dev/%s", d['name']) util.subp(wipefs_cmd) except Exception: raise Exception("Failed FS purge of /dev/%s" % d['name']) @@ -700,7 +700,7 @@ def exec_mkpart_gpt(device, layout): [SGDISK_CMD, '-t', '{}:{}'.format(index, partition_type), device]) except Exception: - LOG.warn("Failed to partition device %s" % device) + LOG.warning("Failed to partition device %s", device) raise read_parttbl(device) @@ -736,7 +736,7 @@ def mkpart(device, definition): # ensure that we get a real device rather than a symbolic link device = os.path.realpath(device) - LOG.debug("Checking values for %s definition" % device) + LOG.debug("Checking values for %s definition", device) overwrite = definition.get('overwrite', False) layout = definition.get('layout', False) table_type = definition.get('table_type', 'mbr') @@ -766,7 +766,7 @@ def mkpart(device, definition): LOG.debug("Checking if device is safe to partition") if not overwrite and (is_disk_used(device) or is_filesystem(device)): - LOG.debug("Skipping partitioning on configured device %s" % device) + LOG.debug("Skipping partitioning on configured device %s", device) return LOG.debug("Checking for device size") @@ -774,7 +774,7 @@ def mkpart(device, definition): LOG.debug("Calculating partition layout") part_definition = get_partition_layout(table_type, device_size, layout) - LOG.debug(" Layout is: %s" % part_definition) + LOG.debug(" Layout is: %s", part_definition) LOG.debug("Creating partition table on %s", device) exec_mkpart(table_type, device, part_definition) @@ -799,7 +799,7 @@ def lookup_force_flag(fs): if fs.lower() in flags: return flags[fs] - LOG.warn("Force flag for %s is unknown." % fs) + LOG.warning("Force flag for %s is unknown.", fs) return '' @@ -858,7 +858,7 @@ def mkfs(fs_cfg): LOG.debug("Device %s has required file system", device) return else: - LOG.warn("Destroying filesystem on %s", device) + LOG.warning("Destroying filesystem on %s", device) else: LOG.debug("Device %s is cleared for formating", device) @@ -883,14 +883,14 @@ def mkfs(fs_cfg): return if not reuse and fs_replace and device: - LOG.debug("Replacing file system on %s as instructed." % device) + LOG.debug("Replacing file system on %s as instructed.", device) if not device: LOG.debug("No device aviable that matches request. " "Skipping fs creation for %s", fs_cfg) return elif not partition or str(partition).lower() == 'none': - LOG.debug("Using the raw device to place filesystem %s on" % label) + LOG.debug("Using the raw device to place filesystem %s on", label) else: LOG.debug("Error in device identification handling.") @@ -901,7 +901,7 @@ def mkfs(fs_cfg): # Make sure the device is defined if not device: - LOG.warn("Device is not known: %s", device) + LOG.warning("Device is not known: %s", device) return # Check that we can create the FS @@ -923,8 +923,8 @@ def mkfs(fs_cfg): mkfs_cmd = util.which("mk%s" % fs_type) if not mkfs_cmd: - LOG.warn("Cannot create fstype '%s'. No mkfs.%s command", fs_type, - fs_type) + LOG.warning("Cannot create fstype '%s'. No mkfs.%s command", + fs_type, fs_type) return fs_cmd = [mkfs_cmd, device] diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py index f0cda3d5..0a135bbe 100644 --- a/cloudinit/config/cc_fan.py +++ b/cloudinit/config/cc_fan.py @@ -64,7 +64,7 @@ def stop_update_start(service, config_file, content, systemd=False): try: return util.subp(cmd, capture=True) except util.ProcessExecutionError as e: - LOG.warn("failed: %s (%s): %s", service, cmd, e) + LOG.warning("failed: %s (%s): %s", service, cmd, e) return False stop_failed = not run(cmds['stop'], msg='stop %s' % service) @@ -74,7 +74,7 @@ def stop_update_start(service, config_file, content, systemd=False): ret = run(cmds['start'], msg='start %s' % service) if ret and stop_failed: - LOG.warn("success: %s started", service) + LOG.warning("success: %s started", service) if 'enable' in cmds: ret = run(cmds['enable'], msg='enable %s' % service) diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index 5b630f8b..f14a4fc5 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -216,8 +216,9 @@ def suggested_swapsize(memsize=None, maxsize=None, fsys=None): else: pinfo[k] = v - LOG.debug("suggest %(size)s swap for %(mem)s memory with '%(avail)s'" - " disk given max=%(max_in)s [max=%(max)s]'" % pinfo) + LOG.debug("suggest %s swap for %s memory with '%s'" + " disk given max=%s [max=%s]'", pinfo['size'], pinfo['mem'], + pinfo['avail'], pinfo['max_in'], pinfo['max']) return size @@ -266,7 +267,7 @@ def handle_swapcfg(swapcfg): return None or (filename, size) """ if not isinstance(swapcfg, dict): - LOG.warn("input for swap config was not a dict.") + LOG.warning("input for swap config was not a dict.") return None fname = swapcfg.get('filename', '/swap.img') @@ -289,7 +290,8 @@ def handle_swapcfg(swapcfg): return fname LOG.debug("swap file %s existed, but not in /proc/swaps", fname) except Exception: - LOG.warn("swap file %s existed. Error reading /proc/swaps", fname) + LOG.warning("swap file %s existed. Error reading /proc/swaps", + fname) return fname try: @@ -300,7 +302,7 @@ def handle_swapcfg(swapcfg): return setup_swapfile(fname=fname, size=size, maxsize=maxsize) except Exception as e: - LOG.warn("failed to setup swap: %s", e) + LOG.warning("failed to setup swap: %s", e) return None diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py index 9c5cd1fe..2548d1f1 100644 --- a/cloudinit/config/cc_resolv_conf.py +++ b/cloudinit/config/cc_resolv_conf.py @@ -77,7 +77,7 @@ def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"): params['options'] = {} params['flags'] = flags - LOG.debug("Writing resolv.conf from template %s" % template_fn) + LOG.debug("Writing resolv.conf from template %s", template_fn) templater.render_to_file(template_fn, target_fname, params) diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py index 50316214..50ff9e35 100644 --- a/cloudinit/config/cc_rsyslog.py +++ b/cloudinit/config/cc_rsyslog.py @@ -252,7 +252,8 @@ def apply_rsyslog_changes(configs, def_fname, cfg_dir): for cur_pos, ent in enumerate(configs): if isinstance(ent, dict): if "content" not in ent: - LOG.warn("No 'content' entry in config entry %s", cur_pos + 1) + LOG.warning("No 'content' entry in config entry %s", + cur_pos + 1) continue content = ent['content'] filename = ent.get("filename", def_fname) @@ -262,7 +263,7 @@ def apply_rsyslog_changes(configs, def_fname, cfg_dir): filename = filename.strip() if not filename: - LOG.warn("Entry %s has an empty filename", cur_pos + 1) + LOG.warning("Entry %s has an empty filename", cur_pos + 1) continue filename = os.path.join(cfg_dir, filename) @@ -389,7 +390,7 @@ def remotes_to_rsyslog_cfg(remotes, header=None, footer=None): try: lines.append(str(parse_remotes_line(line, name=name))) except ValueError as e: - LOG.warn("failed loading remote %s: %s [%s]", name, line, e) + LOG.warning("failed loading remote %s: %s [%s]", name, line, e) if footer is not None: lines.append(footer) return '\n'.join(lines) + "\n" diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py index 6ea81b84..a9682f19 100644 --- a/cloudinit/config/cc_snappy.py +++ b/cloudinit/config/cc_snappy.py @@ -283,8 +283,8 @@ def handle(name, cfg, cloud, log, args): render_snap_op(**pkg_op) except Exception as e: fails.append((pkg_op, e,)) - LOG.warn("'%s' failed for '%s': %s", - pkg_op['op'], pkg_op['name'], e) + LOG.warning("'%s' failed for '%s': %s", + pkg_op['op'], pkg_op['name'], e) # Default to disabling SSH ssh_enabled = mycfg.get('ssh_enabled', "auto") @@ -303,7 +303,7 @@ def handle(name, cfg, cloud, log, args): LOG.debug("Enabling SSH, password authentication requested") ssh_enabled = True elif ssh_enabled not in (True, False): - LOG.warn("Unknown value '%s' in ssh_enabled", ssh_enabled) + LOG.warning("Unknown value '%s' in ssh_enabled", ssh_enabled) disable_enable_ssh(ssh_enabled) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 803ac74e..28650b88 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -143,9 +143,9 @@ class Distro(object): def _apply_network_from_network_config(self, netconfig, bring_up=True): distro = self.__class__ - LOG.warn("apply_network_config is not currently implemented " - "for distribution '%s'. Attempting to use apply_network", - distro) + LOG.warning("apply_network_config is not currently implemented " + "for distribution '%s'. Attempting to use apply_network", + distro) header = '\n'.join([ "# Converted from network_config for distro %s" % distro, "# Implmentation of _write_network_config is needed." @@ -335,7 +335,8 @@ class Distro(object): try: (_out, err) = util.subp(cmd) if len(err): - LOG.warn("Running %s resulted in stderr output: %s", cmd, err) + LOG.warning("Running %s resulted in stderr output: %s", + cmd, err) return True except util.ProcessExecutionError: util.logexc(LOG, "Running interface command %s failed", cmd) @@ -358,7 +359,7 @@ class Distro(object): Add a user to the system using standard GNU tools """ if util.is_user(name): - LOG.info("User %s already exists, skipping." % name) + LOG.info("User %s already exists, skipping.", name) return if 'create_groups' in kwargs: @@ -520,9 +521,9 @@ class Distro(object): keys = list(keys.values()) if keys is not None: if not isinstance(keys, (tuple, list, set)): - LOG.warn("Invalid type '%s' detected for" - " 'ssh_authorized_keys', expected list," - " string, dict, or set.", type(keys)) + LOG.warning("Invalid type '%s' detected for" + " 'ssh_authorized_keys', expected list," + " string, dict, or set.", type(keys)) else: keys = set(keys) or [] ssh_util.setup_user_keys(keys, name, options=None) @@ -595,7 +596,7 @@ class Distro(object): "#includedir %s" % (path), ''] sudoers_contents = "\n".join(lines) util.append_file(sudo_base, sudoers_contents) - LOG.debug("Added '#includedir %s' to %s" % (path, sudo_base)) + LOG.debug("Added '#includedir %s' to %s", path, sudo_base) except IOError as e: util.logexc(LOG, "Failed to write %s", sudo_base) raise e @@ -647,11 +648,11 @@ class Distro(object): # Check if group exists, and then add it doesn't if util.is_group(name): - LOG.warn("Skipping creation of existing group '%s'" % name) + LOG.warning("Skipping creation of existing group '%s'", name) else: try: util.subp(group_add_cmd) - LOG.info("Created new group %s" % name) + LOG.info("Created new group %s", name) except Exception: util.logexc(LOG, "Failed to create group %s", name) @@ -659,12 +660,12 @@ class Distro(object): if len(members) > 0: for member in members: if not util.is_user(member): - LOG.warn("Unable to add group member '%s' to group '%s'" - "; user does not exist.", member, name) + LOG.warning("Unable to add group member '%s' to group '%s'" + "; user does not exist.", member, name) continue util.subp(['usermod', '-a', '-G', name, member]) - LOG.info("Added user '%s' to group '%s'" % (member, name)) + LOG.info("Added user '%s' to group '%s'", member, name) def _get_package_mirror_info(mirror_info, data_source=None, @@ -708,7 +709,7 @@ def _get_package_mirror_info(mirror_info, data_source=None, if found: results[name] = found - LOG.debug("filtered distro mirror info: %s" % results) + LOG.debug("filtered distro mirror info: %s", results) return results diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py index 64b8c1fb..75d46201 100644 --- a/cloudinit/distros/arch.py +++ b/cloudinit/distros/arch.py @@ -83,7 +83,8 @@ class Distro(distros.Distro): try: (_out, err) = util.subp(cmd) if len(err): - LOG.warn("Running %s resulted in stderr output: %s", cmd, err) + LOG.warning("Running %s resulted in stderr output: %s", + cmd, err) except util.ProcessExecutionError: util.logexc(LOG, "Running interface command %s failed", cmd) @@ -94,7 +95,8 @@ class Distro(distros.Distro): try: (_out, err) = util.subp(cmd) if len(err): - LOG.warn("Running %s resulted in stderr output: %s", cmd, err) + LOG.warning("Running %s resulted in stderr output: %s", + cmd, err) return True except util.ProcessExecutionError: util.logexc(LOG, "Running interface command %s failed", cmd) diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 16f8d955..d06d46a6 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -223,6 +223,6 @@ def _maybe_remove_legacy_eth0(path="/etc/network/interfaces.d/eth0.cfg"): except Exception: msg = bmsg + " %s exists, but could not be read." % path - LOG.warn(msg) + LOG.warning(msg) # vi: ts=4 expandtab diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index a70ee45b..183e4452 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -148,7 +148,7 @@ class Distro(distros.Distro): def create_group(self, name, members): group_add_cmd = ['pw', '-n', name] if util.is_group(name): - LOG.warn("Skipping creation of existing group '%s'", name) + LOG.warning("Skipping creation of existing group '%s'", name) else: try: util.subp(group_add_cmd) @@ -160,8 +160,8 @@ class Distro(distros.Distro): if len(members) > 0: for member in members: if not util.is_user(member): - LOG.warn("Unable to add group member '%s' to group '%s'" - "; user does not exist.", member, name) + LOG.warning("Unable to add group member '%s' to group '%s'" + "; user does not exist.", member, name) continue try: util.subp(['pw', 'usermod', '-n', name, '-G', member]) @@ -369,7 +369,7 @@ class Distro(distros.Distro): # OS. This is just fine. (_out, err) = util.subp(cmd, rcs=[0, 1]) if len(err): - LOG.warn("Error running %s: %s", cmd, err) + LOG.warning("Error running %s: %s", cmd, err) def install_packages(self, pkglist): self.update_package_sources() diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py index 83fb56ff..0ad2f032 100644 --- a/cloudinit/distros/gentoo.py +++ b/cloudinit/distros/gentoo.py @@ -96,8 +96,8 @@ class Distro(distros.Distro): try: (_out, err) = util.subp(cmd) if len(err): - LOG.warn("Running %s resulted in stderr output: %s", - cmd, err) + LOG.warning("Running %s resulted in stderr output: %s", + cmd, err) except util.ProcessExecutionError: util.logexc(LOG, "Running interface command %s failed", cmd) @@ -121,7 +121,8 @@ class Distro(distros.Distro): try: (_out, err) = util.subp(cmd) if len(err): - LOG.warn("Running %s resulted in stderr output: %s", cmd, err) + LOG.warning("Running %s resulted in stderr output: %s", + cmd, err) return True except util.ProcessExecutionError: util.logexc(LOG, "Running interface command %s failed", cmd) @@ -138,8 +139,8 @@ class Distro(distros.Distro): try: (_out, err) = util.subp(cmd) if len(err): - LOG.warn("Running %s resulted in stderr output: %s", cmd, - err) + LOG.warning("Running %s resulted in stderr output: %s", + cmd, err) except util.ProcessExecutionError: util.logexc(LOG, "Running interface command %s failed", cmd) return False diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py index d1f8a042..a62055ae 100644 --- a/cloudinit/distros/parsers/resolv_conf.py +++ b/cloudinit/distros/parsers/resolv_conf.py @@ -81,9 +81,9 @@ class ResolvConf(object): if len(new_ns) == len(current_ns): return current_ns if len(current_ns) >= 3: - LOG.warn("ignoring nameserver %r: adding would " - "exceed the maximum of " - "'3' name servers (see resolv.conf(5))" % (ns)) + LOG.warning("ignoring nameserver %r: adding would " + "exceed the maximum of " + "'3' name servers (see resolv.conf(5))", ns) return current_ns[:3] self._remove_option('nameserver') for n in new_ns: diff --git a/cloudinit/distros/ug_util.py b/cloudinit/distros/ug_util.py index 53a0eafb..9378dd78 100755 --- a/cloudinit/distros/ug_util.py +++ b/cloudinit/distros/ug_util.py @@ -214,8 +214,8 @@ def normalize_users_groups(cfg, distro): 'name': old_user, } if not isinstance(old_user, dict): - LOG.warn(("Format for 'user' key must be a string or " - "dictionary and not %s"), type_utils.obj_name(old_user)) + LOG.warning(("Format for 'user' key must be a string or dictionary" + " and not %s"), type_utils.obj_name(old_user)) old_user = {} # If no old user format, then assume the distro @@ -227,9 +227,9 @@ def normalize_users_groups(cfg, distro): try: distro_user_config = distro.get_default_user() except NotImplementedError: - LOG.warn(("Distro has not implemented default user " - "access. No distribution provided default user" - " will be normalized.")) + LOG.warning(("Distro has not implemented default user " + "access. No distribution provided default user" + " will be normalized.")) # Merge the old user (which may just be an empty dict when not # present with the distro provided default user configuration so @@ -239,9 +239,9 @@ def normalize_users_groups(cfg, distro): base_users = cfg.get('users', []) if not isinstance(base_users, (list, dict) + six.string_types): - LOG.warn(("Format for 'users' key must be a comma separated string" - " or a dictionary or a list and not %s"), - type_utils.obj_name(base_users)) + LOG.warning(("Format for 'users' key must be a comma separated string" + " or a dictionary or a list and not %s"), + type_utils.obj_name(base_users)) base_users = [] if old_user: diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 13691549..723d6bd6 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -38,8 +38,8 @@ class MetadataLeafDecoder(object): # Assume it's json, unless it fails parsing... return json.loads(blob) except (ValueError, TypeError) as e: - LOG.warn("Field %s looked like a json object, but it was" - " not: %s", field, e) + LOG.warning("Field %s looked like a json object, but it" + " was not: %s", field, e) if blob.find("\n") != -1: return blob.splitlines() return blob @@ -125,7 +125,8 @@ class MetadataMaterializer(object): joined.update(child_contents) for field in leaf_contents.keys(): if field in joined: - LOG.warn("Duplicate key found in results from %s", base_url) + LOG.warning("Duplicate key found in results from %s", + base_url) else: joined[field] = leaf_contents[field] return joined diff --git a/cloudinit/gpg.py b/cloudinit/gpg.py index 70c620de..d58d73e0 100644 --- a/cloudinit/gpg.py +++ b/cloudinit/gpg.py @@ -43,7 +43,7 @@ def delete_key(key): util.subp(["gpg", "--batch", "--yes", "--delete-keys", key], capture=True) except util.ProcessExecutionError as error: - LOG.warn('Failed delete key "%s": %s', key, error) + LOG.warning('Failed delete key "%s": %s', key, error) def getkeybyid(keyid, keyserver='keyserver.ubuntu.com'): diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index 1362db6e..c3576c04 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -246,7 +246,7 @@ def fixup_handler(mod, def_freq=PER_INSTANCE): else: freq = mod.frequency if freq and freq not in FREQUENCIES: - LOG.warn("Handler %s has an unknown frequency %s", mod, freq) + LOG.warning("Handler %s has an unknown frequency %s", mod, freq) return mod diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index 7435d58d..f01021aa 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -126,11 +126,11 @@ class FileSemaphores(object): # this case could happen if the migrator module hadn't run yet # but the item had run before we did canon_sem_name. if cname != name and os.path.exists(self._get_path(name, freq)): - LOG.warn("%s has run without canonicalized name [%s].\n" - "likely the migrator has not yet run. " - "It will run next boot.\n" - "run manually with: cloud-init single --name=migrator" - % (name, cname)) + LOG.warning("%s has run without canonicalized name [%s].\n" + "likely the migrator has not yet run. " + "It will run next boot.\n" + "run manually with: cloud-init single --name=migrator", + name, cname) return True return False @@ -375,8 +375,8 @@ class Paths(object): def get_ipath(self, name=None): ipath = self._get_ipath(name) if not ipath: - LOG.warn(("No per instance data available, " - "is there an datasource/iid set?")) + LOG.warning(("No per instance data available, " + "is there an datasource/iid set?")) return None else: return ipath diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 692b6007..db3c3579 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -242,8 +242,8 @@ class NetworkStateInterpreter(object): if not skip_broken: raise else: - LOG.warn("Skipping invalid command: %s", command, - exc_info=True) + LOG.warning("Skipping invalid command: %s", command, + exc_info=True) LOG.debug(self.dump_network_state()) def parse_config_v2(self, skip_broken=True): @@ -262,8 +262,8 @@ class NetworkStateInterpreter(object): if not skip_broken: raise else: - LOG.warn("Skipping invalid command: %s", command, - exc_info=True) + LOG.warning("Skipping invalid command: %s", command, + exc_info=True) LOG.debug(self.dump_network_state()) @ensure_command_keys(['name']) diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py index b90bc191..4066076c 100644 --- a/cloudinit/reporting/handlers.py +++ b/cloudinit/reporting/handlers.py @@ -37,7 +37,7 @@ class LogHandler(ReportingHandler): try: level = getattr(logging, level.upper()) except Exception: - LOG.warn("invalid level '%s', using WARN", input_level) + LOG.warning("invalid level '%s', using WARN", input_level) level = logging.WARN self.level = level @@ -82,7 +82,7 @@ class WebHookHandler(ReportingHandler): timeout=self.timeout, retries=self.retries, ssl_details=self.ssl_details) except Exception: - LOG.warn("failed posting event: %s" % event.as_string()) + LOG.warning("failed posting event: %s", event.as_string()) available_handlers = DictRegistry() diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index 8528fa10..ed1d691a 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -181,7 +181,7 @@ class DataSourceAltCloud(sources.DataSource): try: cmd = CMD_PROBE_FLOPPY (cmd_out, _err) = util.subp(cmd) - LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out)) + LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out) except ProcessExecutionError as _err: util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) return False @@ -196,7 +196,7 @@ class DataSourceAltCloud(sources.DataSource): cmd = CMD_UDEVADM_SETTLE cmd.append('--exit-if-exists=' + floppy_dev) (cmd_out, _err) = util.subp(cmd) - LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out)) + LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out) except ProcessExecutionError as _err: util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) return False diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 48a3e1df..04358b73 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -116,7 +116,7 @@ class DataSourceAzureNet(sources.DataSource): # the metadata and "bounce" the network to force DDNS to update via # dhclient azure_hostname = self.metadata.get('local-hostname') - LOG.debug("Hostname in metadata is {}".format(azure_hostname)) + LOG.debug("Hostname in metadata is %s", azure_hostname) hostname_command = self.ds_cfg['hostname_bounce']['hostname_command'] with temporary_hostname(azure_hostname, self.ds_cfg, @@ -132,7 +132,7 @@ class DataSourceAzureNet(sources.DataSource): cfg=cfg, prev_hostname=previous_hostname) except Exception as e: - LOG.warn("Failed publishing hostname: %s", e) + LOG.warning("Failed publishing hostname: %s", e) util.logexc(LOG, "handling set_hostname failed") def get_metadata_from_agent(self): @@ -168,7 +168,7 @@ class DataSourceAzureNet(sources.DataSource): func=wait_for_files, args=(fp_files,)) if len(missing): - LOG.warn("Did not find files, but going on: %s", missing) + LOG.warning("Did not find files, but going on: %s", missing) metadata = {} metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) @@ -199,7 +199,7 @@ class DataSourceAzureNet(sources.DataSource): except BrokenAzureDataSource as exc: raise exc except util.MountFailedError: - LOG.warn("%s was not mountable", cdev) + LOG.warning("%s was not mountable", cdev) continue (md, self.userdata_raw, cfg, files) = ret @@ -331,8 +331,8 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, log_pre="Azure ephemeral disk: ") if missing: - LOG.warn("ephemeral device '%s' did not appear after %d seconds.", - devpath, maxwait) + LOG.warning("ephemeral device '%s' did not appear after %d seconds.", + devpath, maxwait) return result = False @@ -342,7 +342,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, else: result, msg = can_dev_be_reformatted(devpath) - LOG.debug("reformattable=%s: %s" % (result, msg)) + LOG.debug("reformattable=%s: %s", result, msg) if not result: return @@ -355,7 +355,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120, LOG.debug(bmsg + " removed.") except Exception as e: # python3 throws FileNotFoundError, python2 throws OSError - LOG.warn(bmsg + ": remove failed! (%s)" % e) + LOG.warning(bmsg + ": remove failed! (%s)", e) else: LOG.debug(bmsg + " did not exist.") return @@ -405,7 +405,7 @@ def pubkeys_from_crt_files(flist): errors.append(fname) if errors: - LOG.warn("failed to convert the crt files to pubkey: %s", errors) + LOG.warning("failed to convert the crt files to pubkey: %s", errors) return pubkeys @@ -427,8 +427,8 @@ def wait_for_files(flist, maxwait=60, naplen=.5, log_pre=""): time.sleep(naplen) waited += naplen - LOG.warn("%sStill missing files after %s seconds: %s", - log_pre, maxwait, need) + LOG.warning("%sStill missing files after %s seconds: %s", + log_pre, maxwait, need) return need diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py index ffc23e3d..19df16b1 100644 --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -43,7 +43,7 @@ class DataSourceCloudSigma(sources.DataSource): LOG.debug("detected hypervisor as %s", sys_product_name) return 'cloudsigma' in sys_product_name.lower() - LOG.warn("failed to query dmi data for system product name") + LOG.warning("failed to query dmi data for system product name") return False def get_data(self): diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 46dd89e0..ef374f3f 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -127,7 +127,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): try: self.vendordata_raw = sources.convert_vendordata(vd) except ValueError as e: - LOG.warn("Invalid content in vendor-data: %s", e) + LOG.warning("Invalid content in vendor-data: %s", e) self.vendordata_raw = None # network_config is an /etc/network/interfaces formated file and is @@ -190,7 +190,7 @@ def on_first_boot(data, distro=None, network=True): if network: net_conf = data.get("network_config", '') if net_conf and distro: - LOG.warn("Updating network interfaces from config drive") + LOG.warning("Updating network interfaces from config drive") distro.apply_network(net_conf) write_injected_files(data.get('files')) diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py index d052c4c3..5e7e66be 100644 --- a/cloudinit/sources/DataSourceDigitalOcean.py +++ b/cloudinit/sources/DataSourceDigitalOcean.py @@ -51,7 +51,7 @@ class DataSourceDigitalOcean(sources.DataSource): if not is_do: return False - LOG.info("Running on digital ocean. droplet_id=%s" % droplet_id) + LOG.info("Running on digital ocean. droplet_id=%s", droplet_id) ipv4LL_nic = None if self.use_ip4LL: diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 6f01a139..2f9c7edf 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -125,7 +125,7 @@ class DataSourceEc2(sources.DataSource): if len(filtered): mdurls = filtered else: - LOG.warn("Empty metadata url list! using default list") + LOG.warning("Empty metadata url list! using default list") mdurls = self.metadata_urls urls = [] @@ -232,7 +232,7 @@ def read_strict_mode(cfgval, default): try: return parse_strict_mode(cfgval) except ValueError as e: - LOG.warn(e) + LOG.warning(e) return default @@ -270,7 +270,7 @@ def warn_if_necessary(cfgval, cfg): try: mode, sleep = parse_strict_mode(cfgval) except ValueError as e: - LOG.warn(e) + LOG.warning(e) return if mode == "false": @@ -304,8 +304,8 @@ def identify_platform(): if result: return result except Exception as e: - LOG.warn("calling %s with %s raised exception: %s", - checker, data, e) + LOG.warning("calling %s with %s raised exception: %s", + checker, data, e) def _collect_platform_data(): diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index 637c9505..e9afda9c 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -98,7 +98,7 @@ class DataSourceGCE(sources.DataSource): if not running_on_gce: LOG.debug(msg, mkey) else: - LOG.warn(msg, mkey) + LOG.warning(msg, mkey) return False self.metadata[mkey] = value @@ -116,7 +116,8 @@ class DataSourceGCE(sources.DataSource): self.metadata['user-data'] = b64decode( self.metadata['user-data']) else: - LOG.warn('unknown user-data-encoding: %s, ignoring', encoding) + LOG.warning('unknown user-data-encoding: %s, ignoring', + encoding) return running_on_gce diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 41179b02..77df5a51 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -71,7 +71,7 @@ class DataSourceMAAS(sources.DataSource): except MAASSeedDirNone: pass except MAASSeedDirMalformed as exc: - LOG.warn("%s was malformed: %s" % (self.seed_dir, exc)) + LOG.warning("%s was malformed: %s", self.seed_dir, exc) raise # If there is no metadata_url, then we're not configured @@ -107,7 +107,7 @@ class DataSourceMAAS(sources.DataSource): try: self.vendordata_raw = sources.convert_vendordata(vd) except ValueError as e: - LOG.warn("Invalid content in vendor-data: %s", e) + LOG.warning("Invalid content in vendor-data: %s", e) self.vendordata_raw = None def wait_for_metadata_service(self, url): @@ -126,7 +126,7 @@ class DataSourceMAAS(sources.DataSource): if timeout in mcfg: timeout = int(mcfg.get("timeout", timeout)) except Exception: - LOG.warn("Failed to get timeout, using %s" % timeout) + LOG.warning("Failed to get timeout, using %s", timeout) starttime = time.time() if url.endswith("/"): @@ -190,8 +190,8 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None, else: md[path] = util.decode_binary(resp.contents) else: - LOG.warn(("Fetching from %s resulted in" - " an invalid http code %s"), url, resp.code) + LOG.warning(("Fetching from %s resulted in" + " an invalid http code %s"), url, resp.code) except url_helper.UrlError as e: if e.code == 404 and not optional: raise MAASSeedDirMalformed( diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 5924b828..c68f6b8c 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -104,8 +104,8 @@ class DataSourceNoCloud(sources.DataSource): pp2d_kwargs) except ValueError as e: if dev in label_list: - LOG.warn("device %s with label=%s not a" - "valid seed.", dev, label) + LOG.warning("device %s with label=%s not a" + "valid seed.", dev, label) continue mydata = _merge_new_seed(mydata, seeded) diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index d70784ac..f20c9a65 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -225,12 +225,12 @@ def get_max_wait_from_cfg(cfg): try: max_wait = int(cfg.get(max_wait_cfg_option, default_max_wait)) except ValueError: - LOG.warn("Failed to get '%s', using %s", - max_wait_cfg_option, default_max_wait) + LOG.warning("Failed to get '%s', using %s", + max_wait_cfg_option, default_max_wait) if max_wait <= 0: - LOG.warn("Invalid value '%s' for '%s', using '%s' instead", - max_wait, max_wait_cfg_option, default_max_wait) + LOG.warning("Invalid value '%s' for '%s', using '%s' instead", + max_wait, max_wait_cfg_option, default_max_wait) max_wait = default_max_wait return max_wait @@ -355,7 +355,7 @@ def transport_iso9660(require_iso=True): try: (fname, contents) = util.mount_cb(fullp, get_ovf_env, mtype=mtype) except util.MountFailedError: - LOG.debug("%s not mountable as iso9660" % fullp) + LOG.debug("%s not mountable as iso9660", fullp) continue if contents is not False: diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index cd75e6ea..5fdac192 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -64,7 +64,7 @@ class DataSourceOpenNebula(sources.DataSource): except BrokenContextDiskDir as exc: raise exc except util.MountFailedError: - LOG.warn("%s was not mountable" % cdev) + LOG.warning("%s was not mountable", cdev) if results: seed = cdev @@ -381,7 +381,7 @@ def read_context_disk_dir(source_dir, asuser=None): try: results['userdata'] = util.b64d(results['userdata']) except TypeError: - LOG.warn("Failed base64 decoding of userdata") + LOG.warning("Failed base64 decoding of userdata") # generate static /etc/network/interfaces # only if there are any required context variables diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index e1ea21f8..f0a6bfce 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -73,7 +73,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): if len(filtered): urls = filtered else: - LOG.warn("Empty metadata url list! using default list") + LOG.warning("Empty metadata url list! using default list") urls = [DEF_MD_URL] md_urls = [] @@ -137,7 +137,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): try: self.vendordata_raw = sources.convert_vendordata(vd) except ValueError as e: - LOG.warn("Invalid content in vendor-data: %s", e) + LOG.warning("Invalid content in vendor-data: %s", e) self.vendordata_raw = None return True diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 5e668947..6c6902fd 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -555,7 +555,7 @@ class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient): val = base64.b64decode(val.encode()).decode() # Bogus input produces different errors in Python 2 and 3 except (TypeError, binascii.Error): - LOG.warn("Failed base64 decoding key '%s': %s", key, val) + LOG.warning("Failed base64 decoding key '%s': %s", key, val) if strip: val = val.strip() diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 5c99437e..c3ce36d6 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -237,8 +237,8 @@ class DataSource(object): if candidate in valid: return candidate else: - LOG.warn("invalid dsmode '%s', using default=%s", - candidate, default) + LOG.warning("invalid dsmode '%s', using default=%s", + candidate, default) return default return default diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index f32dac9a..6e01aa47 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -289,7 +289,7 @@ class WALinuxAgentShim(object): LOG.debug("Unable to find endpoint in dhclient logs. " " Falling back to check lease files") if fallback_lease_file is None: - LOG.warn("No fallback lease file was specified.") + LOG.warning("No fallback lease file was specified.") value = None else: LOG.debug("Looking for endpoint in lease file %s", diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py index 14293f3c..602af078 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_file.py +++ b/cloudinit/sources/helpers/vmware/imc/config_file.py @@ -43,9 +43,9 @@ class ConfigFile(ConfigSource, dict): # "sensitive" settings shall not be logged if canLog: - logger.debug("ADDED KEY-VAL :: '%s' = '%s'" % (key, val)) + logger.debug("ADDED KEY-VAL :: '%s' = '%s'", key, val) else: - logger.debug("ADDED KEY-VAL :: '%s' = '*****************'" % key) + logger.debug("ADDED KEY-VAL :: '%s' = '*****************'", key) self[key] = val @@ -60,7 +60,7 @@ class ConfigFile(ConfigSource, dict): Keyword arguments: filename - The full path to the config file. """ - logger.info('Parsing the config file %s.' % filename) + logger.info('Parsing the config file %s.', filename) config = configparser.ConfigParser() config.optionxform = str @@ -69,7 +69,7 @@ class ConfigFile(ConfigSource, dict): self.clear() for category in config.sections(): - logger.debug("FOUND CATEGORY = '%s'" % category) + logger.debug("FOUND CATEGORY = '%s'", category) for (key, value) in config.items(category): self._insertKey(category + '|' + key, value) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 12165433..f7191b09 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -163,8 +163,8 @@ class Init(object): except OSError as e: error = e - LOG.warn("Failed changing perms on '%s'. tried: %s. %s", - log_file, ','.join(perms), error) + LOG.warning("Failed changing perms on '%s'. tried: %s. %s", + log_file, ','.join(perms), error) def read_cfg(self, extra_fns=None): # None check so that we don't keep on re-loading if empty @@ -447,9 +447,9 @@ class Init(object): mod_locs, looked_locs = importer.find_module( mod_name, [''], ['list_types', 'handle_part']) if not mod_locs: - LOG.warn("Could not find a valid user-data handler" - " named %s in file %s (searched %s)", - mod_name, fname, looked_locs) + LOG.warning("Could not find a valid user-data handler" + " named %s in file %s (searched %s)", + mod_name, fname, looked_locs) continue mod = importer.import_module(mod_locs[0]) mod = handlers.fixup_handler(mod) @@ -568,7 +568,8 @@ class Init(object): if not isinstance(vdcfg, dict): vdcfg = {'enabled': False} - LOG.warn("invalid 'vendor_data' setting. resetting to: %s", vdcfg) + LOG.warning("invalid 'vendor_data' setting. resetting to: %s", + vdcfg) enabled = vdcfg.get('enabled') no_handlers = vdcfg.get('disabled_handlers', None) @@ -632,10 +633,10 @@ class Init(object): return try: - LOG.debug("applying net config names for %s" % netcfg) + LOG.debug("applying net config names for %s", netcfg) self.distro.apply_network_config_names(netcfg) except Exception as e: - LOG.warn("Failed to rename devices: %s", e) + LOG.warning("Failed to rename devices: %s", e) if (self.datasource is not NULL_DATA_SOURCE and not self.is_new_instance()): @@ -651,9 +652,9 @@ class Init(object): "likely broken: %s", e) return except NotImplementedError: - LOG.warn("distro '%s' does not implement apply_network_config. " - "networking may not be configured properly.", - self.distro) + LOG.warning("distro '%s' does not implement apply_network_config. " + "networking may not be configured properly.", + self.distro) return @@ -737,15 +738,15 @@ class Modules(object): if not mod_name: continue if freq and freq not in FREQUENCIES: - LOG.warn(("Config specified module %s" - " has an unknown frequency %s"), raw_name, freq) + LOG.warning(("Config specified module %s" + " has an unknown frequency %s"), raw_name, freq) # Reset it so when ran it will get set to a known value freq = None mod_locs, looked_locs = importer.find_module( mod_name, ['', type_utils.obj_name(config)], ['handle']) if not mod_locs: - LOG.warn("Could not find module named %s (searched %s)", - mod_name, looked_locs) + LOG.warning("Could not find module named %s (searched %s)", + mod_name, looked_locs) continue mod = config.fixup_module(importer.import_module(mod_locs[0])) mostly_mods.append([mod, raw_name, freq, run_args]) @@ -877,7 +878,7 @@ def _pkl_load(fname): pickle_contents = util.load_file(fname, decode=False) except Exception as e: if os.path.isfile(fname): - LOG.warn("failed loading pickle in %s: %s" % (fname, e)) + LOG.warning("failed loading pickle in %s: %s", fname, e) pass # This is allowed so just return nothing successfully loaded... diff --git a/cloudinit/templater.py b/cloudinit/templater.py index 648cd218..b3ea64e4 100644 --- a/cloudinit/templater.py +++ b/cloudinit/templater.py @@ -103,14 +103,14 @@ def detect_template(text): raise ValueError("Unknown template rendering type '%s' requested" % template_type) if template_type == 'jinja' and not JINJA_AVAILABLE: - LOG.warn("Jinja not available as the selected renderer for" - " desired template, reverting to the basic renderer.") + LOG.warning("Jinja not available as the selected renderer for" + " desired template, reverting to the basic renderer.") return ('basic', basic_render, rest) elif template_type == 'jinja' and JINJA_AVAILABLE: return ('jinja', jinja_render, rest) if template_type == 'cheetah' and not CHEETAH_AVAILABLE: - LOG.warn("Cheetah not available as the selected renderer for" - " desired template, reverting to the basic renderer.") + LOG.warning("Cheetah not available as the selected renderer for" + " desired template, reverting to the basic renderer.") return ('basic', basic_render, rest) elif template_type == 'cheetah' and CHEETAH_AVAILABLE: return ('cheetah', cheetah_render, rest) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 2f6a158e..d2b92e6a 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -155,8 +155,8 @@ def _get_ssl_args(url, ssl_details): scheme = urlparse(url).scheme if scheme == 'https' and ssl_details: if not SSL_ENABLED: - LOG.warn("SSL is not supported in requests v%s, " - "cert. verification can not occur!", _REQ_VER) + LOG.warning("SSL is not supported in requests v%s, " + "cert. verification can not occur!", _REQ_VER) else: if 'ca_certs' in ssl_details and ssl_details['ca_certs']: ssl_args['verify'] = ssl_details['ca_certs'] @@ -415,14 +415,15 @@ class OauthUrlHelper(object): return if 'date' not in exception.headers: - LOG.warn("Missing header 'date' in %s response", exception.code) + LOG.warning("Missing header 'date' in %s response", + exception.code) return date = exception.headers['date'] try: remote_time = time.mktime(parsedate(date)) except Exception as e: - LOG.warn("Failed to convert datetime '%s': %s", date, e) + LOG.warning("Failed to convert datetime '%s': %s", date, e) return skew = int(remote_time - time.time()) @@ -430,7 +431,7 @@ class OauthUrlHelper(object): old_skew = self.skew_data.get(host, 0) if abs(old_skew - skew) > self.skew_change_limit: self.update_skew_file(host, skew) - LOG.warn("Setting oauth clockskew for %s to %d", host, skew) + LOG.warning("Setting oauth clockskew for %s to %d", host, skew) self.skew_data[host] = skew return diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index cfe5aa2f..88cb7f84 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -109,8 +109,9 @@ class UserDataProcessor(object): ctype_orig = None was_compressed = True except util.DecompressionError as e: - LOG.warn("Failed decompressing payload from %s of length" - " %s due to: %s", ctype_orig, len(payload), e) + LOG.warning("Failed decompressing payload from %s of" + " length %s due to: %s", + ctype_orig, len(payload), e) continue # Attempt to figure out the payloads content-type @@ -228,9 +229,9 @@ class UserDataProcessor(object): if resp.ok(): content = resp.contents else: - LOG.warn(("Fetching from %s resulted in" - " a invalid http code of %s"), - include_url, resp.code) + LOG.warning(("Fetching from %s resulted in" + " a invalid http code of %s"), + include_url, resp.code) if content is not None: new_msg = convert_string(content) diff --git a/cloudinit/util.py b/cloudinit/util.py index 6940850c..bfddca67 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -96,11 +96,11 @@ def _lsb_release(target=None): data[fmap[fname]] = val.strip() missing = [k for k in fmap.values() if k not in data] if len(missing): - LOG.warn("Missing fields in lsb_release --all output: %s", - ','.join(missing)) + LOG.warning("Missing fields in lsb_release --all output: %s", + ','.join(missing)) except ProcessExecutionError as err: - LOG.warn("Unable to get lsb_release --all: %s", err) + LOG.warning("Unable to get lsb_release --all: %s", err) data = dict((v, "UNAVAILABLE") for v in fmap.values()) return data @@ -590,7 +590,7 @@ def system_info(): 'release': platform.release(), 'python': platform.python_version(), 'uname': platform.uname(), - 'dist': platform.linux_distribution(), + 'dist': platform.linux_distribution(), # pylint: disable=W1505 } @@ -865,7 +865,7 @@ def read_file_or_url(url, timeout=5, retries=10, url = "file://%s" % url if url.lower().startswith("file://"): if data: - LOG.warn("Unable to post data to file resource %s", url) + LOG.warning("Unable to post data to file resource %s", url) file_path = url[len("file://"):] try: contents = load_file(file_path, decode=False) @@ -1279,7 +1279,7 @@ def get_cmdline(): # replace nulls with space and drop trailing null cmdline = contents.replace("\x00", " ")[:-1] except Exception as e: - LOG.warn("failed reading /proc/1/cmdline: %s", e) + LOG.warning("failed reading /proc/1/cmdline: %s", e) cmdline = "" else: try: @@ -1400,7 +1400,7 @@ def logexc(log, msg, *args): # or even desirable to have that much junk # coming out to a non-debug stream if msg: - log.warn(msg, *args) + log.warning(msg, *args) # Debug gets the full trace. However, nose has a bug whereby its # logcapture plugin doesn't properly handle the case where there is no # actual exception. To avoid tracebacks during the test suite then, we'll @@ -2344,8 +2344,8 @@ def read_dmi_data(key): if dmidecode_path: return _call_dmidecode(key, dmidecode_path) - LOG.warn("did not find either path %s or dmidecode command", - DMI_SYS_PATH) + LOG.warning("did not find either path %s or dmidecode command", + DMI_SYS_PATH) return None diff --git a/cloudinit/warnings.py b/cloudinit/warnings.py index 3206d4e9..f9f7a63c 100644 --- a/cloudinit/warnings.py +++ b/cloudinit/warnings.py @@ -130,10 +130,10 @@ def show_warning(name, cfg=None, sleep=None, mode=True, **kwargs): os.path.join(_get_warn_dir(cfg), name), topline + "\n".join(fmtlines) + "\n" + topline) - LOG.warn(topline + "\n".join(fmtlines) + "\n" + closeline) + LOG.warning(topline + "\n".join(fmtlines) + "\n" + closeline) if sleep: - LOG.debug("sleeping %d seconds for warning '%s'" % (sleep, name)) + LOG.debug("sleeping %d seconds for warning '%s'", sleep, name) time.sleep(sleep) # vi: ts=4 expandtab diff --git a/tests/cloud_tests/__main__.py b/tests/cloud_tests/__main__.py index ef7d1878..ed654ad3 100644 --- a/tests/cloud_tests/__main__.py +++ b/tests/cloud_tests/__main__.py @@ -38,7 +38,7 @@ def run(args): finally: # TODO: make this configurable via environ or cmdline if failed: - LOG.warn('some tests failed, leaving data in %s', args.data_dir) + LOG.warning('some tests failed, leaving data in %s', args.data_dir) else: shutil.rmtree(args.data_dir) return failed diff --git a/tests/cloud_tests/args.py b/tests/cloud_tests/args.py index b68cc98e..371b0444 100644 --- a/tests/cloud_tests/args.py +++ b/tests/cloud_tests/args.py @@ -94,7 +94,7 @@ def normalize_create_args(args): if os.path.exists(config.name_to_path(args.name)): msg = 'test: {} already exists'.format(args.name) if args.force: - LOG.warn('%s but ignoring due to --force', msg) + LOG.warning('%s but ignoring due to --force', msg) else: LOG.error(msg) return None diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py index 68b47d7a..02fc0e52 100644 --- a/tests/cloud_tests/collect.py +++ b/tests/cloud_tests/collect.py @@ -45,7 +45,7 @@ def collect_test_data(args, snapshot, os_name, test_name): # if test is not enabled, skip and return 0 failures if not test_config.get('enabled', False): - LOG.warn('test config %s is not enabled, skipping', test_name) + LOG.warning('test config %s is not enabled, skipping', test_name) return ({}, 0) # create test instance diff --git a/tests/cloud_tests/verify.py b/tests/cloud_tests/verify.py index ef7d4e21..2a63550e 100644 --- a/tests/cloud_tests/verify.py +++ b/tests/cloud_tests/verify.py @@ -45,9 +45,9 @@ def verify_data(base_dir, tests): } for failure in res[test_name]['failures']: - LOG.warn('test case: %s failed %s.%s with: %s', - test_name, failure['class'], failure['function'], - failure['error']) + LOG.warning('test case: %s failed %s.%s with: %s', + test_name, failure['class'], failure['function'], + failure['error']) return res @@ -80,7 +80,8 @@ def verify(args): if len(fail_list) == 0: LOG.info('test: %s passed all tests', test_name) else: - LOG.warn('test: %s failed %s tests', test_name, len(fail_list)) + LOG.warning('test: %s failed %s tests', test_name, + len(fail_list)) failed += len(fail_list) # dump results diff --git a/tools/mock-meta.py b/tools/mock-meta.py index 95fc4659..82816e8a 100755 --- a/tools/mock-meta.py +++ b/tools/mock-meta.py @@ -293,9 +293,9 @@ class MetaDataHandler(object): else: return "%s" % (PLACEMENT_CAPABILITIES.get(pentry, '')) else: - log.warn(("Did not implement action %s, " - "returning empty response: %r"), - action, NOT_IMPL_RESPONSE) + log.warning(("Did not implement action %s, " + "returning empty response: %r"), + action, NOT_IMPL_RESPONSE) return NOT_IMPL_RESPONSE |