diff options
54 files changed, 294 insertions, 262 deletions
| @@ -1,13 +1,13 @@  CWD=$(shell pwd)  PY_FILES=$(shell find cloudinit bin tests tools -name "*.py" -type f )  PY_FILES+="bin/cloud-init" -noseopts ?= -v  YAML_FILES=$(shell find cloudinit bin tests tools -name "*.yaml" -type f )  YAML_FILES+=$(shell find doc/examples -name "cloud-config*.txt" -type f )  CHANGELOG_VERSION=$(shell $(CWD)/tools/read-version)  CODE_VERSION=$(shell python -c "from cloudinit import version; print version.version_string()") +noseopts ?= -vv --nologcapture  PIP_INSTALL := pip install @@ -17,13 +17,20 @@ endif  all: check -check: test check_version pyflakes +check: check_version pep8 pyflakes pyflakes3 test  pep8: -	@$(CWD)/tools/run-pep8 $(PY_FILES) +	@$(CWD)/tools/run-pep8  pyflakes: -	@pyflakes $(PY_FILES) +	@$(CWD)/tools/run-pyflakes + +pyflakes3: +	@$(CWD)/tools/run-pyflakes3 +	 +unittest: clean_pyc +	nosetests $(noseopts) tests/unittests +	nosetests3 $(noseopts) tests/unittests  pip-requirements:  	@echo "Installing cloud-init dependencies..." @@ -33,8 +40,7 @@ pip-test-requirements:  	@echo "Installing cloud-init test dependencies..."  	$(PIP_INSTALL) -r "$@.txt" -q -test: clean_pyc -	@n=$$(which nosetests3) || n=nosetests; set -- $$n $(noseopts) tests/; echo "Running $$*"; "$$@" +test: unittest  check_version:  	@if [ "$(CHANGELOG_VERSION)" != "$(CODE_VERSION)" ]; then \ @@ -60,5 +66,5 @@ rpm:  deb:  	./packages/bddeb -.PHONY: test pyflakes 2to3 clean pep8 rpm deb yaml check_version -.PHONY: pip-test-requirements pip-requirements clean_pyc +.PHONY: test pyflakes pyflakes3 2to3 clean pep8 rpm deb yaml check_version +.PHONY: pip-test-requirements pip-requirements clean_pyc unittest diff --git a/bin/cloud-init b/bin/cloud-init index 9b90c45e..7f665e7e 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -194,7 +194,7 @@ def main_init(name, args):      if args.debug:          # Reset so that all the debug handlers are closed out          LOG.debug(("Logging being reset, this logger may no" -                    " longer be active shortly")) +                   " longer be active shortly"))          logging.resetLogging()      logging.setupLogging(init.cfg)      apply_reporting_cfg(init.cfg) @@ -276,9 +276,9 @@ def main_init(name, args):          # This may run user-data handlers and/or perform          # url downloads and such as needed.          (ran, _results) = init.cloudify().run('consume_data', -                                             init.consume_data, -                                             args=[PER_INSTANCE], -                                             freq=PER_INSTANCE) +                                              init.consume_data, +                                              args=[PER_INSTANCE], +                                              freq=PER_INSTANCE)          if not ran:              # Just consume anything that is set to run per-always              # if nothing ran in the per-instance code @@ -349,7 +349,7 @@ def main_modules(action_name, args):      if args.debug:          # Reset so that all the debug handlers are closed out          LOG.debug(("Logging being reset, this logger may no" -                    " longer be active shortly")) +                   " longer be active shortly"))          logging.resetLogging()      logging.setupLogging(mods.cfg)      apply_reporting_cfg(init.cfg) @@ -534,7 +534,8 @@ def status_wrapper(name, args, data_d=None, link_d=None):                  errors.extend(v1[m].get('errors', []))          atomic_write_json(result_path, -            {'v1': {'datasource': v1['datasource'], 'errors': errors}}) +                          {'v1': {'datasource': v1['datasource'], +                                  'errors': errors}})          util.sym_link(os.path.relpath(result_path, link_d), result_link,                        force=True) @@ -578,13 +579,13 @@ def main():      # These settings are used for the 'config' and 'final' stages      parser_mod = subparsers.add_parser('modules', -                                      help=('activates modules ' -                                            'using a given configuration key')) +                                       help=('activates modules using ' +                                             'a given configuration key'))      parser_mod.add_argument("--mode", '-m', action='store', -                             help=("module configuration name " -                                    "to use (default: %(default)s)"), -                             default='config', -                             choices=('init', 'config', 'final')) +                            help=("module configuration name " +                                  "to use (default: %(default)s)"), +                            default='config', +                            choices=('init', 'config', 'final'))      parser_mod.set_defaults(action=('modules', main_modules))      # These settings are used when you want to query information @@ -600,22 +601,22 @@ def main():      # This subcommand allows you to run a single module      parser_single = subparsers.add_parser('single', -                                         help=('run a single module ')) +                                          help=('run a single module '))      parser_single.set_defaults(action=('single', main_single))      parser_single.add_argument("--name", '-n', action="store", -                              help="module name to run", -                              required=True) +                               help="module name to run", +                               required=True)      parser_single.add_argument("--frequency", action="store", -                              help=("frequency of the module"), -                              required=False, -                              choices=list(FREQ_SHORT_NAMES.keys())) +                               help=("frequency of the module"), +                               required=False, +                               choices=list(FREQ_SHORT_NAMES.keys()))      parser_single.add_argument("--report", action="store_true",                                 help="enable reporting",                                 required=False)      parser_single.add_argument("module_args", nargs="*", -                              metavar='argument', -                              help=('any additional arguments to' -                                    ' pass to this module')) +                               metavar='argument', +                               help=('any additional arguments to' +                                     ' pass to this module'))      parser_single.set_defaults(action=('single', main_single))      args = parser.parse_args() diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 9e9e9e26..702977cb 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -91,7 +91,8 @@ def handle(name, cfg, cloud, log, _args):          if matchcfg:              matcher = re.compile(matchcfg).search          else: -            matcher = lambda f: False +            def matcher(x): +                return False          errors = add_sources(cfg['apt_sources'], params,                               aa_repo_match=matcher) @@ -173,7 +174,8 @@ def add_sources(srclist, template_params=None, aa_repo_match=None):          template_params = {}      if aa_repo_match is None: -        aa_repo_match = lambda f: False +        def aa_repo_match(x): +            return False      errorlist = []      for ent in srclist: diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index d5b0d1d7..0ecc2e4c 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -167,11 +167,12 @@ def enumerate_disk(device, nodeps=False):      parts = [x for x in (info.strip()).splitlines() if len(x.split()) > 0]      for part in parts: -        d = {'name': None, -             'type': None, -             'fstype': None, -             'label': None, -            } +        d = { +            'name': None, +            'type': None, +            'fstype': None, +            'label': None, +        }          for key, value in value_splitter(part):              d[key.lower()] = value @@ -701,11 +702,12 @@ def lookup_force_flag(fs):      """      A force flag might be -F or -F, this look it up      """ -    flags = {'ext': '-F', -             'btrfs': '-f', -             'xfs': '-f', -             'reiserfs': '-f', -            } +    flags = { +        'ext': '-F', +        'btrfs': '-f', +        'xfs': '-f', +        'reiserfs': '-f', +    }      if 'ext' in fs.lower():          fs = 'ext' @@ -824,10 +826,11 @@ def mkfs(fs_cfg):      # Create the commands      if fs_cmd: -        fs_cmd = fs_cfg['cmd'] % {'label': label, -                                  'filesystem': fs_type, -                                  'device': device, -                                 } +        fs_cmd = fs_cfg['cmd'] % { +            'label': label, +            'filesystem': fs_type, +            'device': device, +        }      else:          # Find the mkfs command          mkfs_cmd = util.which("mkfs.%s" % fs_type) diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py index 456597af..acd3e60a 100644 --- a/cloudinit/config/cc_grub_dpkg.py +++ b/cloudinit/config/cc_grub_dpkg.py @@ -38,11 +38,11 @@ def handle(name, cfg, _cloud, log, _args):      idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None)      idevs_empty = util.get_cfg_option_str(mycfg, -        "grub-pc/install_devices_empty", None) +                                          "grub-pc/install_devices_empty", +                                          None)      if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or -            (os.path.exists("/dev/xvda1") -            and not os.path.exists("/dev/xvda"))): +       (os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda"))):          if idevs is None:              idevs = ""          if idevs_empty is None: @@ -66,7 +66,7 @@ def handle(name, cfg, _cloud, log, _args):                   (idevs, idevs_empty))      log.debug("Setting grub debconf-set-selections with '%s','%s'" % -        (idevs, idevs_empty)) +              (idevs, idevs_empty))      try:          util.subp(['debconf-set-selections'], dconf_sel) diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py index f1c1adff..aa844ee9 100644 --- a/cloudinit/config/cc_keys_to_console.py +++ b/cloudinit/config/cc_keys_to_console.py @@ -48,7 +48,7 @@ def handle(name, cfg, cloud, log, _args):                                              "ssh_fp_console_blacklist", [])      key_blacklist = util.get_cfg_option_list(cfg,                                               "ssh_key_console_blacklist", -                                              ["ssh-dss"]) +                                             ["ssh-dss"])      try:          cmd = [helper_path] diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index 11089d8d..4fe3ee21 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -204,12 +204,12 @@ def setup_swapfile(fname, size=None, maxsize=None):      try:          util.ensure_dir(tdir)          util.log_time(LOG.debug, msg, func=util.subp, -            args=[['sh', '-c', -                   ('rm -f "$1" && umask 0066 && ' -                    '{ fallocate -l "${2}M" "$1" || ' -                    '  dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && ' -                    'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'), -                   'setup_swap', fname, mbsize]]) +                      args=[['sh', '-c', +                            ('rm -f "$1" && umask 0066 && ' +                             '{ fallocate -l "${2}M" "$1" || ' +                             ' dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && ' +                             'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'), +                             'setup_swap', fname, mbsize]])      except Exception as e:          raise IOError("Failed %s: %s" % (msg, e)) diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 7d9567e3..cc3f7f70 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -105,7 +105,7 @@ def handle(_name, cfg, _cloud, log, _args):      log.debug("After pid %s ends, will execute: %s" % (mypid, ' '.join(args))) -    util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log,  +    util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log,                   condition, execmd, [args, devnull_fp]) diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index 4501598e..774d3322 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -36,8 +36,8 @@ def _autostart_puppet(log):      # Set puppet to automatically start      if os.path.exists('/etc/default/puppet'):          util.subp(['sed', '-i', -                  '-e', 's/^START=.*/START=yes/', -                  '/etc/default/puppet'], capture=False) +                   '-e', 's/^START=.*/START=yes/', +                   '/etc/default/puppet'], capture=False)      elif os.path.exists('/bin/systemctl'):          util.subp(['/bin/systemctl', 'enable', 'puppet.service'],                    capture=False) @@ -65,7 +65,7 @@ def handle(name, cfg, cloud, log, _args):                    " doing nothing."))      elif install:          log.debug(("Attempting to install puppet %s,"), -                   version if version else 'latest') +                  version if version else 'latest')          cloud.distro.install_packages(('puppet', version))      # ... and then update the puppet configuration diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index cbc07853..2a2a9f59 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -166,7 +166,7 @@ def handle(name, cfg, _cloud, log, args):              func=do_resize, args=(resize_cmd, log))      else:          util.log_time(logfunc=log.debug, msg="Resizing", -            func=do_resize, args=(resize_cmd, log)) +                      func=do_resize, args=(resize_cmd, log))      action = 'Resized'      if resize_root == NOBLOCK: diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py index 3b30c47e..6f474aed 100644 --- a/cloudinit/config/cc_rh_subscription.py +++ b/cloudinit/config/cc_rh_subscription.py @@ -127,8 +127,8 @@ class SubscriptionManager(object):              return False, not_bool          if (self.servicelevel is not None) and \ -                ((not self.auto_attach) -                 or (util.is_false(str(self.auto_attach)))): +           ((not self.auto_attach) or +           (util.is_false(str(self.auto_attach)))):              no_auto = ("The service-level key must be used in conjunction "                         "with the auto-attach key.  Please re-run with " diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py index 5d7f4331..f43d8d5a 100644 --- a/cloudinit/config/cc_set_hostname.py +++ b/cloudinit/config/cc_set_hostname.py @@ -24,7 +24,7 @@ from cloudinit import util  def handle(name, cfg, cloud, log, _args):      if util.get_cfg_option_bool(cfg, "preserve_hostname", False):          log.debug(("Configuration option 'preserve_hostname' is set," -                    " not setting the hostname in module %s"), name) +                   " not setting the hostname in module %s"), name)          return      (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index 5bd2dec6..d24e43c0 100644 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -30,9 +30,10 @@ from cloudinit import distros as ds  from cloudinit import ssh_util  from cloudinit import util -DISABLE_ROOT_OPTS = ("no-port-forwarding,no-agent-forwarding," -"no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\" " -"rather than the user \\\"root\\\".\';echo;sleep 10\"") +DISABLE_ROOT_OPTS = ( +    "no-port-forwarding,no-agent-forwarding," +    "no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\"" +    " rather than the user \\\"root\\\".\';echo;sleep 10\"")  GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519']  KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key' diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py index d3dd1f32..15703efe 100644 --- a/cloudinit/config/cc_update_etc_hosts.py +++ b/cloudinit/config/cc_update_etc_hosts.py @@ -41,10 +41,10 @@ def handle(name, cfg, cloud, log, _args):          if not tpl_fn_name:              raise RuntimeError(("No hosts template could be"                                  " found for distro %s") % -                                (cloud.distro.osfamily)) +                               (cloud.distro.osfamily))          templater.render_to_file(tpl_fn_name, '/etc/hosts', -                                {'hostname': hostname, 'fqdn': fqdn}) +                                 {'hostname': hostname, 'fqdn': fqdn})      elif manage_hosts == "localhost":          (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) @@ -57,4 +57,4 @@ def handle(name, cfg, cloud, log, _args):          cloud.distro.update_etc_hosts(hostname, fqdn)      else:          log.debug(("Configuration option 'manage_etc_hosts' is not set," -                    " not managing /etc/hosts in module %s"), name) +                   " not managing /etc/hosts in module %s"), name) diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py index e396ba13..5b78afe1 100644 --- a/cloudinit/config/cc_update_hostname.py +++ b/cloudinit/config/cc_update_hostname.py @@ -29,7 +29,7 @@ frequency = PER_ALWAYS  def handle(name, cfg, cloud, log, _args):      if util.get_cfg_option_bool(cfg, "preserve_hostname", False):          log.debug(("Configuration option 'preserve_hostname' is set," -                    " not updating the hostname in module %s"), name) +                   " not updating the hostname in module %s"), name)          return      (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 3b821af9..64fba869 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -92,7 +92,7 @@ def handle(name, cfg, _cloud, log, _args):          for req_field in ['baseurl']:              if req_field not in repo_config:                  log.warn(("Repository %s does not contain a %s" -                           " configuration 'required' entry"), +                          " configuration 'required' entry"),                           repo_id, req_field)                  missing_required += 1          if not missing_required: diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 8167c594..4bad8708 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -211,8 +211,8 @@ class Distro(object):          # If the system hostname is different than the previous          # one or the desired one lets update it as well -        if (not sys_hostname) or (sys_hostname == prev_hostname -                                  and sys_hostname != hostname): +        if ((not sys_hostname) or (sys_hostname == prev_hostname and +           sys_hostname != hostname)):              update_files.append(sys_fn)          # If something else has changed the hostname after we set it @@ -221,7 +221,7 @@ class Distro(object):          if (sys_hostname and prev_hostname and                  sys_hostname != prev_hostname):              LOG.info("%s differs from %s, assuming user maintained hostname.", -                       prev_hostname_fn, sys_fn) +                     prev_hostname_fn, sys_fn)              return          # Remove duplicates (incase the previous config filename) @@ -289,7 +289,7 @@ class Distro(object):      def _bring_up_interface(self, device_name):          cmd = ['ifup', device_name]          LOG.debug("Attempting to run bring up interface %s using command %s", -                   device_name, cmd) +                  device_name, cmd)          try:              (_out, err) = util.subp(cmd)              if len(err): @@ -548,7 +548,7 @@ class Distro(object):              for member in members:                  if not util.is_user(member):                      LOG.warn("Unable to add group member '%s' to group '%s'" -                            "; user does not exist.", member, name) +                             "; user does not exist.", member, name)                      continue                  util.subp(['usermod', '-a', '-G', name, member]) @@ -886,7 +886,7 @@ def fetch(name):      locs, looked_locs = importer.find_module(name, ['', __name__], ['Distro'])      if not locs:          raise ImportError("No distribution found for distro %s (searched %s)" -                           % (name, looked_locs)) +                          % (name, looked_locs))      mod = importer.import_module(locs[0])      cls = getattr(mod, 'Distro')      return cls diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py index 45fcf26f..93a2e008 100644 --- a/cloudinit/distros/arch.py +++ b/cloudinit/distros/arch.py @@ -74,7 +74,7 @@ class Distro(distros.Distro):                  'Interface': dev,                  'IP': info.get('bootproto'),                  'Address': "('%s/%s')" % (info.get('address'), -                        info.get('netmask')), +                                          info.get('netmask')),                  'Gateway': info.get('gateway'),                  'DNS': str(tuple(info.get('dns-nameservers'))).replace(',', '')              } @@ -86,7 +86,7 @@ class Distro(distros.Distro):          if nameservers:              util.write_file(self.resolve_conf_fn, -                    convert_resolv_conf(nameservers)) +                            convert_resolv_conf(nameservers))          return dev_names @@ -102,7 +102,7 @@ class Distro(distros.Distro):      def _bring_up_interface(self, device_name):          cmd = ['netctl', 'restart', device_name]          LOG.debug("Attempting to run bring up interface %s using command %s", -                   device_name, cmd) +                  device_name, cmd)          try:              (_out, err) = util.subp(cmd)              if len(err): diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 6d3a82bf..db5890b1 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -159,8 +159,9 @@ class Distro(distros.Distro):          # Allow the output of this to flow outwards (ie not be captured)          util.log_time(logfunc=LOG.debug, -            msg="apt-%s [%s]" % (command, ' '.join(cmd)), func=util.subp, -            args=(cmd,), kwargs={'env': e, 'capture': False}) +                      msg="apt-%s [%s]" % (command, ' '.join(cmd)), +                      func=util.subp, +                      args=(cmd,), kwargs={'env': e, 'capture': False})      def update_package_sources(self):          self._runner.run("update-sources", self.package_command, diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 4c484639..72012056 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -205,8 +205,8 @@ class Distro(distros.Distro):          redact_opts = ['passwd']          for key, val in kwargs.items(): -            if (key in adduser_opts and val -                    and isinstance(val, six.string_types)): +            if (key in adduser_opts and val and +               isinstance(val, six.string_types)):                  adduser_cmd.extend([adduser_opts[key], val])                  # Redact certain fields from the logs diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py index 9e80583c..6267dd6e 100644 --- a/cloudinit/distros/gentoo.py +++ b/cloudinit/distros/gentoo.py @@ -66,7 +66,7 @@ class Distro(distros.Distro):      def _bring_up_interface(self, device_name):          cmd = ['/etc/init.d/net.%s' % device_name, 'restart']          LOG.debug("Attempting to run bring up interface %s using command %s", -                   device_name, cmd) +                  device_name, cmd)          try:              (_out, err) = util.subp(cmd)              if len(err): @@ -88,7 +88,7 @@ class Distro(distros.Distro):                  (_out, err) = util.subp(cmd)                  if len(err):                      LOG.warn("Running %s resulted in stderr output: %s", cmd, -                            err) +                             err)              except util.ProcessExecutionError:                  util.logexc(LOG, "Running interface command %s failed", cmd)                  return False diff --git a/cloudinit/distros/parsers/hostname.py b/cloudinit/distros/parsers/hostname.py index 84a1de42..efb185d4 100644 --- a/cloudinit/distros/parsers/hostname.py +++ b/cloudinit/distros/parsers/hostname.py @@ -84,5 +84,5 @@ class HostnameConf(object):              hostnames_found.add(head)          if len(hostnames_found) > 1:              raise IOError("Multiple hostnames (%s) found!" -                           % (hostnames_found)) +                          % (hostnames_found))          return entries diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py index 8aee03a4..2ed13d9c 100644 --- a/cloudinit/distros/parsers/resolv_conf.py +++ b/cloudinit/distros/parsers/resolv_conf.py @@ -132,7 +132,7 @@ class ResolvConf(object):              # Some hard limit on 256 chars total              raise ValueError(("Adding %r would go beyond the "                                "256 maximum search list character limit") -                              % (search_domain)) +                             % (search_domain))          self._remove_option('search')          self._contents.append(('option', ['search', s_list, '']))          return flat_sds diff --git a/cloudinit/distros/parsers/sys_conf.py b/cloudinit/distros/parsers/sys_conf.py index d795e12f..6157cf32 100644 --- a/cloudinit/distros/parsers/sys_conf.py +++ b/cloudinit/distros/parsers/sys_conf.py @@ -77,8 +77,7 @@ class SysConf(configobj.ConfigObj):          quot_func = None          if value[0] in ['"', "'"] and value[-1] in ['"', "'"]:              if len(value) == 1: -                quot_func = (lambda x: -                                self._get_single_quote(x) % x) +                quot_func = (lambda x: self._get_single_quote(x) % x)          else:              # Quote whitespace if it isn't the start + end of a shell command              if value.strip().startswith("$(") and value.strip().endswith(")"): @@ -91,10 +90,10 @@ class SysConf(configobj.ConfigObj):                          # to use single quotes which won't get expanded...                          if re.search(r"[\n\"']", value):                              quot_func = (lambda x: -                                            self._get_triple_quote(x) % x) +                                         self._get_triple_quote(x) % x)                          else:                              quot_func = (lambda x: -                                            self._get_single_quote(x) % x) +                                         self._get_single_quote(x) % x)                      else:                          quot_func = pipes.quote          if not quot_func: diff --git a/cloudinit/filters/launch_index.py b/cloudinit/filters/launch_index.py index 5bebd318..baecdac9 100644 --- a/cloudinit/filters/launch_index.py +++ b/cloudinit/filters/launch_index.py @@ -61,7 +61,7 @@ class Filter(object):                      discarded += 1              LOG.debug(("Discarding %s multipart messages "                         "which do not match launch index %s"), -                       discarded, self.wanted_idx) +                      discarded, self.wanted_idx)              new_message = copy.copy(message)              new_message.set_payload(new_msgs)              new_message[ud.ATTACHMENT_FIELD] = str(len(new_msgs)) diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index 5e99d185..a6eb20fe 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -139,9 +139,10 @@ class FileSemaphores(object):          # but the item had run before we did canon_sem_name.          if cname != name and os.path.exists(self._get_path(name, freq)):              LOG.warn("%s has run without canonicalized name [%s].\n" -                "likely the migrator has not yet run. It will run next boot.\n" -                "run manually with: cloud-init single --name=migrator" -                % (name, cname)) +                     "likely the migrator has not yet run. " +                     "It will run next boot.\n" +                     "run manually with: cloud-init single --name=migrator" +                     % (name, cname))              return True          return False diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index bd80a8a6..b03ab895 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -38,7 +38,8 @@ LOG = logging.getLogger(__name__)  DS_NAME = 'Azure'  DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}  AGENT_START = ['service', 'walinuxagent', 'start'] -BOUNCE_COMMAND = ['sh', '-xc', +BOUNCE_COMMAND = [ +    'sh', '-xc',      "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"]  BUILTIN_DS_CONFIG = { @@ -91,9 +92,9 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):      """      policy = cfg['hostname_bounce']['policy']      previous_hostname = get_hostname(hostname_command) -    if (not util.is_true(cfg.get('set_hostname')) -            or util.is_false(policy) -            or (previous_hostname == temp_hostname and policy != 'force')): +    if (not util.is_true(cfg.get('set_hostname')) or +       util.is_false(policy) or +       (previous_hostname == temp_hostname and policy != 'force')):          yield None          return      set_hostname(temp_hostname, hostname_command) @@ -123,8 +124,8 @@ class DataSourceAzureNet(sources.DataSource):          with temporary_hostname(temp_hostname, self.ds_cfg,                                  hostname_command=hostname_command) \                  as previous_hostname: -            if (previous_hostname is not None -                    and util.is_true(self.ds_cfg.get('set_hostname'))): +            if (previous_hostname is not None and +               util.is_true(self.ds_cfg.get('set_hostname'))):                  cfg = self.ds_cfg['hostname_bounce']                  try:                      perform_hostname_bounce(hostname=temp_hostname, @@ -152,7 +153,8 @@ class DataSourceAzureNet(sources.DataSource):                  else:                      bname = str(pk['fingerprint'] + ".crt")                      fp_files += [os.path.join(ddir, bname)] -                    LOG.debug("ssh authentication: using fingerprint from fabirc") +                    LOG.debug("ssh authentication: " +                              "using fingerprint from fabirc")              missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",                                      func=wait_for_files, @@ -506,7 +508,7 @@ def read_azure_ovf(contents):          raise BrokenAzureDataSource("invalid xml: %s" % e)      results = find_child(dom.documentElement, -        lambda n: n.localName == "ProvisioningSection") +                         lambda n: n.localName == "ProvisioningSection")      if len(results) == 0:          raise NonAzureDataSource("No ProvisioningSection") @@ -516,7 +518,8 @@ def read_azure_ovf(contents):      provSection = results[0]      lpcs_nodes = find_child(provSection, -        lambda n: n.localName == "LinuxProvisioningConfigurationSet") +                            lambda n: +                            n.localName == "LinuxProvisioningConfigurationSet")      if len(results) == 0:          raise NonAzureDataSource("No LinuxProvisioningConfigurationSet") diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index eb474079..e3916208 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -39,7 +39,7 @@ FS_TYPES = ('vfat', 'iso9660')  LABEL_TYPES = ('config-2',)  POSSIBLE_MOUNTS = ('sr', 'cd')  OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS -                  for i in range(0, 2))) +                        for i in range(0, 2)))  class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 0032d06c..6a897f7d 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -61,12 +61,12 @@ class DataSourceEc2(sources.DataSource):              if not self.wait_for_metadata_service():                  return False              start_time = time.time() -            self.userdata_raw = ec2.get_instance_userdata(self.api_ver, -                self.metadata_address) +            self.userdata_raw = \ +                ec2.get_instance_userdata(self.api_ver, self.metadata_address)              self.metadata = ec2.get_instance_metadata(self.api_ver,                                                        self.metadata_address)              LOG.debug("Crawl of metadata service took %s seconds", -                       int(time.time() - start_time)) +                      int(time.time() - start_time))              return True          except Exception:              util.logexc(LOG, "Failed reading from metadata address %s", @@ -132,13 +132,13 @@ class DataSourceEc2(sources.DataSource):          start_time = time.time()          url = uhelp.wait_for_url(urls=urls, max_wait=max_wait, -                                timeout=timeout, status_cb=LOG.warn) +                                 timeout=timeout, status_cb=LOG.warn)          if url:              LOG.debug("Using metadata source: '%s'", url2base[url])          else:              LOG.critical("Giving up on md from %s after %s seconds", -                            urls, int(time.time() - start_time)) +                         urls, int(time.time() - start_time))          self.metadata_address = url2base.get(url)          return bool(url) diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index cfc59ca5..f18c4cee 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -275,17 +275,18 @@ if __name__ == "__main__":          parser = argparse.ArgumentParser(description='Interact with MAAS DS')          parser.add_argument("--config", metavar="file", -            help="specify DS config file", default=None) +                            help="specify DS config file", default=None)          parser.add_argument("--ckey", metavar="key", -            help="the consumer key to auth with", default=None) +                            help="the consumer key to auth with", default=None)          parser.add_argument("--tkey", metavar="key", -            help="the token key to auth with", default=None) +                            help="the token key to auth with", default=None)          parser.add_argument("--csec", metavar="secret", -            help="the consumer secret (likely '')", default="") +                            help="the consumer secret (likely '')", default="")          parser.add_argument("--tsec", metavar="secret", -            help="the token secret to auth with", default=None) +                            help="the token secret to auth with", default=None)          parser.add_argument("--apiver", metavar="version", -            help="the apiver to use ("" can be used)", default=MD_VERSION) +                            help="the apiver to use ("" can be used)", +                            default=MD_VERSION)          subcmds = parser.add_subparsers(title="subcommands", dest="subcmd")          subcmds.add_parser('crawl', help="crawl the datasource") @@ -297,7 +298,7 @@ if __name__ == "__main__":          args = parser.parse_args()          creds = {'consumer_key': args.ckey, 'token_key': args.tkey, -            'token_secret': args.tsec, 'consumer_secret': args.csec} +                 'token_secret': args.tsec, 'consumer_secret': args.csec}          if args.config:              cfg = util.read_conf(args.config) diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index d12601a4..be467bad 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -66,18 +66,21 @@ class DataSourceOVF(sources.DataSource):          system_type = util.read_dmi_data("system-product-name")          if system_type is None: -           LOG.debug("No system-product-name found") +            LOG.debug("No system-product-name found")          elif 'vmware' in system_type.lower():              LOG.debug("VMware Virtual Platform found") -            deployPkgPluginPath = search_file("/usr/lib/vmware-tools", "libdeployPkgPlugin.so") +            deployPkgPluginPath = search_file("/usr/lib/vmware-tools", +                                              "libdeployPkgPlugin.so")              if deployPkgPluginPath: -                vmwareImcConfigFilePath = util.log_time(logfunc=LOG.debug, +                vmwareImcConfigFilePath = \ +                    util.log_time(logfunc=LOG.debug,                                    msg="waiting for configuration file",                                    func=wait_for_imc_cfg_file,                                    args=("/tmp", "cust.cfg"))              if vmwareImcConfigFilePath: -                LOG.debug("Found VMware DeployPkg Config File Path at %s" % vmwareImcConfigFilePath) +                LOG.debug("Found VMware DeployPkg Config File Path at %s" % +                          vmwareImcConfigFilePath)              else:                  LOG.debug("Didn't find VMware DeployPkg Config File Path") @@ -147,7 +150,7 @@ class DataSourceOVF(sources.DataSource):      def get_public_ssh_keys(self):          if 'public-keys' not in self.metadata: -           return [] +            return []          pks = self.metadata['public-keys']          if isinstance(pks, (list)):              return pks @@ -170,7 +173,7 @@ class DataSourceOVFNet(DataSourceOVF):  def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5):      waited = 0 -     +      while waited < maxwait:          fileFullPath = search_file(dirpath, filename)          if fileFullPath: @@ -179,6 +182,7 @@ def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5):          waited += naplen      return None +  # This will return a dict with some content  #  meta-data, user-data, some config  def read_vmware_imc(config): @@ -186,13 +190,14 @@ def read_vmware_imc(config):      cfg = {}      ud = ""      if config.host_name: -       if config.domain_name: -          md['local-hostname'] = config.host_name + "." + config.domain_name -       else: -          md['local-hostname'] = config.host_name +        if config.domain_name: +            md['local-hostname'] = config.host_name + "." + config.domain_name +        else: +            md['local-hostname'] = config.host_name      return (md, ud, cfg) +  # This will return a dict with some content  #  meta-data, user-data, some config  def read_ovf_environment(contents): @@ -328,14 +333,14 @@ def get_properties(contents):      # could also check here that elem.namespaceURI ==      #   "http://schemas.dmtf.org/ovf/environment/1"      propSections = find_child(dom.documentElement, -        lambda n: n.localName == "PropertySection") +                              lambda n: n.localName == "PropertySection")      if len(propSections) == 0:          raise XmlError("No 'PropertySection's")      props = {}      propElems = find_child(propSections[0], -                            (lambda n: n.localName == "Property")) +                           (lambda n: n.localName == "Property"))      for elem in propElems:          key = elem.attributes.getNamedItemNS(envNsURI, "key").value @@ -347,7 +352,7 @@ def get_properties(contents):  def search_file(dirpath, filename):      if not dirpath or not filename: -       return None +        return None      for root, dirs, files in os.walk(dirpath):          if filename in files: @@ -355,6 +360,7 @@ def search_file(dirpath, filename):      return None +  class XmlError(Exception):      pass diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index ac2c3b45..b26940d1 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -404,7 +404,8 @@ def read_context_disk_dir(source_dir, asuser=None):      if ssh_key_var:          lines = context.get(ssh_key_var).splitlines()          results['metadata']['public-keys'] = [l for l in lines -            if len(l) and not l.startswith("#")] +                                              if len(l) and not +                                              l.startswith("#")]      # custom hostname -- try hostname or leave cloud-init      # itself create hostname from IP address later diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 7453379a..139ee52c 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -90,8 +90,7 @@ BUILTIN_DS_CONFIG = {                           'user-data',                           'user-script',                           'sdc:datacenter_name', -                         'sdc:uuid', -                        ], +                         'sdc:uuid'],      'base64_keys': [],      'base64_all': False,      'disk_aliases': {'ephemeral0': '/dev/vdb'}, @@ -450,7 +449,7 @@ class JoyentMetadataClient(object):          response = bytearray()          response.extend(self.metasource.read(1)) -        while response[-1:] !=  b'\n': +        while response[-1:] != b'\n':              response.extend(self.metasource.read(1))          response = response.rstrip().decode('ascii')          LOG.debug('Read "%s" from metadata transport.', response) @@ -513,7 +512,7 @@ def write_boot_content(content, content_f, link=None, shebang=False,          except Exception as e:              util.logexc(LOG, ("Failed to identify script type for %s" % -                             content_f, e)) +                              content_f, e))      if link:          try: diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py index 6d721134..8c5c08cf 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_nic.py +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py @@ -46,12 +46,12 @@ class NicConfigurator:          """          primary_nics = [nic for nic in self.nics if nic.primary]          if not primary_nics: -           return None +            return None          elif len(primary_nics) > 1: -           raise Exception('There can only be one primary nic', +            raise Exception('There can only be one primary nic',                              [nic.mac for nic in primary_nics])          else: -           return primary_nics[0] +            return primary_nics[0]      def find_devices(self):          """ @@ -185,8 +185,8 @@ class NicConfigurator:          lines = []          for addr in addrs: -            lines.append('    up route -A inet6 add default gw %s metric 10000' % -                         addr.gateway) +            lines.append('    up route -A inet6 add default gw ' +                         '%s metric 10000' % addr.gateway)          return lines diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 9b2f5ed5..c74a7ae2 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -31,7 +31,8 @@ LOG = logging.getLogger(__name__)  DEF_SSHD_CFG = "/etc/ssh/sshd_config"  # taken from openssh source key.c/key_type_from_name -VALID_KEY_TYPES = ("rsa", "dsa", "ssh-rsa", "ssh-dss", "ecdsa", +VALID_KEY_TYPES = ( +    "rsa", "dsa", "ssh-rsa", "ssh-dss", "ecdsa",      "ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com",      "ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com",      "ssh-rsa-cert-v01@openssh.com", "ssh-dss-cert-v01@openssh.com", diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 9f192c8d..dbcf3d55 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -509,13 +509,13 @@ class Init(object):      def consume_data(self, frequency=PER_INSTANCE):          # Consume the userdata first, because we need want to let the part          # handlers run first (for merging stuff) -        with events.ReportEventStack( -            "consume-user-data", "reading and applying user-data", -            parent=self.reporter): +        with events.ReportEventStack("consume-user-data", +                                     "reading and applying user-data", +                                     parent=self.reporter):                  self._consume_userdata(frequency) -        with events.ReportEventStack( -            "consume-vendor-data", "reading and applying vendor-data", -            parent=self.reporter): +        with events.ReportEventStack("consume-vendor-data", +                                     "reading and applying vendor-data", +                                     parent=self.reporter):                  self._consume_vendordata(frequency)          # Perform post-consumption adjustments so that @@ -655,7 +655,7 @@ class Modules(object):              else:                  raise TypeError(("Failed to read '%s' item in config,"                                   " unknown type %s") % -                                 (item, type_utils.obj_name(item))) +                                (item, type_utils.obj_name(item)))          return module_list      def _fixup_modules(self, raw_mods): @@ -762,8 +762,8 @@ class Modules(object):          if skipped:              LOG.info("Skipping modules %s because they are not verified " -                      "on distro '%s'.  To run anyway, add them to " -                      "'unverified_modules' in config.", skipped, d_name) +                     "on distro '%s'.  To run anyway, add them to " +                     "'unverified_modules' in config.", skipped, d_name)          if forced:              LOG.info("running unverified_modules: %s", forced) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index f2e1390e..936f7da5 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -252,9 +252,9 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,              # attrs              return UrlResponse(r)          except exceptions.RequestException as e: -            if (isinstance(e, (exceptions.HTTPError)) -                    and hasattr(e, 'response')  # This appeared in v 0.10.8 -                    and hasattr(e.response, 'status_code')): +            if (isinstance(e, (exceptions.HTTPError)) and +               hasattr(e, 'response') and  # This appeared in v 0.10.8 +               hasattr(e.response, 'status_code')):                  excps.append(UrlError(e, code=e.response.status_code,                                        headers=e.response.headers,                                        url=url)) diff --git a/cloudinit/util.py b/cloudinit/util.py index 0a639bb9..e7407ea4 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -612,7 +612,7 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None):  def make_url(scheme, host, port=None, -                path='', params='', query='', fragment=''): +             path='', params='', query='', fragment=''):      pieces = []      pieces.append(scheme or '') @@ -804,8 +804,8 @@ def load_yaml(blob, default=None, allowed=(dict,)):      blob = decode_binary(blob)      try:          LOG.debug("Attempting to load yaml from string " -                 "of length %s with allowed root types %s", -                 len(blob), allowed) +                  "of length %s with allowed root types %s", +                  len(blob), allowed)          converted = safeyaml.load(blob)          if not isinstance(converted, allowed):              # Yes this will just be caught, but thats ok for now... @@ -878,7 +878,7 @@ def read_conf_with_confd(cfgfile):              if not isinstance(confd, six.string_types):                  raise TypeError(("Config file %s contains 'conf_d' "                                   "with non-string type %s") % -                                 (cfgfile, type_utils.obj_name(confd))) +                                (cfgfile, type_utils.obj_name(confd)))              else:                  confd = str(confd).strip()      elif os.path.isdir("%s.d" % cfgfile): @@ -1041,7 +1041,8 @@ def is_resolvable(name):          for iname in badnames:              try:                  result = socket.getaddrinfo(iname, None, 0, 0, -                    socket.SOCK_STREAM, socket.AI_CANONNAME) +                                            socket.SOCK_STREAM, +                                            socket.AI_CANONNAME)                  badresults[iname] = []                  for (_fam, _stype, _proto, cname, sockaddr) in result:                      badresults[iname].append("%s: %s" % (cname, sockaddr[0])) @@ -1109,7 +1110,7 @@ def close_stdin():  def find_devs_with(criteria=None, oformat='device', -                    tag=None, no_cache=False, path=None): +                   tag=None, no_cache=False, path=None):      """      find devices matching given criteria (via blkid)      criteria can be *one* of: @@ -1628,7 +1629,7 @@ def write_file(filename, content, mode=0o644, omode="wb"):          content = decode_binary(content)          write_type = 'characters'      LOG.debug("Writing to %s - %s: [%s] %s %s", -               filename, omode, mode, len(content), write_type) +              filename, omode, mode, len(content), write_type)      with SeLinuxGuard(path=filename):          with open(filename, omode) as fh:              fh.write(content) diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index c603bfdb..9c1ec1d4 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -27,11 +27,12 @@ from cloudinit import stages  from cloudinit import user_data as ud  from cloudinit import util -INSTANCE_ID = "i-testing" -  from . import helpers +INSTANCE_ID = "i-testing" + +  class FakeDataSource(sources.DataSource):      def __init__(self, userdata=None, vendordata=None): diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py index e9cd2fa5..85759c68 100644 --- a/tests/unittests/test_datasource/test_altcloud.py +++ b/tests/unittests/test_datasource/test_altcloud.py @@ -134,8 +134,7 @@ class TestGetCloudType(TestCase):          '''          util.read_dmi_data = _dmi_data('RHEV')          dsrc = DataSourceAltCloud({}, None, self.paths) -        self.assertEquals('RHEV', \ -            dsrc.get_cloud_type()) +        self.assertEquals('RHEV', dsrc.get_cloud_type())      def test_vsphere(self):          ''' @@ -144,8 +143,7 @@ class TestGetCloudType(TestCase):          '''          util.read_dmi_data = _dmi_data('VMware Virtual Platform')          dsrc = DataSourceAltCloud({}, None, self.paths) -        self.assertEquals('VSPHERE', \ -            dsrc.get_cloud_type()) +        self.assertEquals('VSPHERE', dsrc.get_cloud_type())      def test_unknown(self):          ''' @@ -154,8 +152,7 @@ class TestGetCloudType(TestCase):          '''          util.read_dmi_data = _dmi_data('Unrecognized Platform')          dsrc = DataSourceAltCloud({}, None, self.paths) -        self.assertEquals('UNKNOWN', \ -            dsrc.get_cloud_type()) +        self.assertEquals('UNKNOWN', dsrc.get_cloud_type())  class TestGetDataCloudInfoFile(TestCase): @@ -412,27 +409,27 @@ class TestReadUserDataCallback(TestCase):          '''Test read_user_data_callback() with both files.'''          self.assertEquals('test user data', -            read_user_data_callback(self.mount_dir)) +                          read_user_data_callback(self.mount_dir))      def test_callback_dc(self):          '''Test read_user_data_callback() with only DC file.'''          _remove_user_data_files(self.mount_dir, -            dc_file=False, -            non_dc_file=True) +                                dc_file=False, +                                non_dc_file=True)          self.assertEquals('test user data', -            read_user_data_callback(self.mount_dir)) +                          read_user_data_callback(self.mount_dir))      def test_callback_non_dc(self):          '''Test read_user_data_callback() with only non-DC file.'''          _remove_user_data_files(self.mount_dir, -            dc_file=True, -            non_dc_file=False) +                                dc_file=True, +                                non_dc_file=False)          self.assertEquals('test user data', -            read_user_data_callback(self.mount_dir)) +                          read_user_data_callback(self.mount_dir))      def test_callback_none(self):          '''Test read_user_data_callback() no files are found.''' diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 3933794f..4c9c7d8b 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -207,7 +207,7 @@ class TestAzureDataSource(TestCase):          yaml_cfg = "{agent_command: my_command}\n"          cfg = yaml.safe_load(yaml_cfg)          odata = {'HostName': "myhost", 'UserName': "myuser", -                'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}} +                 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}}          data = {'ovfcontent': construct_valid_ovf_env(data=odata)}          dsrc = self._get_ds(data) @@ -219,8 +219,8 @@ class TestAzureDataSource(TestCase):          # set dscfg in via base64 encoded yaml          cfg = {'agent_command': "my_command"}          odata = {'HostName': "myhost", 'UserName': "myuser", -                'dscfg': {'text': b64e(yaml.dump(cfg)), -                          'encoding': 'base64'}} +                 'dscfg': {'text': b64e(yaml.dump(cfg)), +                           'encoding': 'base64'}}          data = {'ovfcontent': construct_valid_ovf_env(data=odata)}          dsrc = self._get_ds(data) @@ -267,7 +267,8 @@ class TestAzureDataSource(TestCase):          # should equal that after the '$'          pos = defuser['passwd'].rfind("$") + 1          self.assertEqual(defuser['passwd'], -            crypt.crypt(odata['UserPassword'], defuser['passwd'][0:pos])) +                         crypt.crypt(odata['UserPassword'], +                         defuser['passwd'][0:pos]))      def test_userdata_plain(self):          mydata = "FOOBAR" @@ -364,8 +365,8 @@ class TestAzureDataSource(TestCase):          # Make sure that user can affect disk aliases          dscfg = {'disk_aliases': {'ephemeral0': '/dev/sdc'}}          odata = {'HostName': "myhost", 'UserName': "myuser", -                'dscfg': {'text': b64e(yaml.dump(dscfg)), -                          'encoding': 'base64'}} +                 'dscfg': {'text': b64e(yaml.dump(dscfg)), +                           'encoding': 'base64'}}          usercfg = {'disk_setup': {'/dev/sdc': {'something': '...'},                                    'ephemeral0': False}}          userdata = '#cloud-config' + yaml.dump(usercfg) + "\n" @@ -634,7 +635,7 @@ class TestReadAzureOvf(TestCase):      def test_invalid_xml_raises_non_azure_ds(self):          invalid_xml = "<foo>" + construct_valid_ovf_env(data={})          self.assertRaises(DataSourceAzure.BrokenAzureDataSource, -            DataSourceAzure.read_azure_ovf, invalid_xml) +                          DataSourceAzure.read_azure_ovf, invalid_xml)      def test_load_with_pubkeys(self):          mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}] diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 83aca505..3954ceb3 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -293,9 +293,8 @@ class TestConfigDriveDataSource(TestCase):              util.is_partition = my_is_partition              devs_with_answers = {"TYPE=vfat": [], -                "TYPE=iso9660": ["/dev/vdb"], -                "LABEL=config-2": ["/dev/vdb"], -            } +                                 "TYPE=iso9660": ["/dev/vdb"], +                                 "LABEL=config-2": ["/dev/vdb"]}              self.assertEqual(["/dev/vdb"], ds.find_candidate_devs())              # add a vfat item @@ -306,9 +305,10 @@ class TestConfigDriveDataSource(TestCase):              # verify that partitions are considered, that have correct label.              devs_with_answers = {"TYPE=vfat": ["/dev/sda1"], -                "TYPE=iso9660": [], "LABEL=config-2": ["/dev/vdb3"]} +                                 "TYPE=iso9660": [], +                                 "LABEL=config-2": ["/dev/vdb3"]}              self.assertEqual(["/dev/vdb3"], -                              ds.find_candidate_devs()) +                             ds.find_candidate_devs())          finally:              util.find_devs_with = orig_find_devs_with @@ -319,7 +319,7 @@ class TestConfigDriveDataSource(TestCase):          populate_dir(self.tmp, CFG_DRIVE_FILES_V2)          myds = cfg_ds_from_dir(self.tmp)          self.assertEqual(myds.get_public_ssh_keys(), -           [OSTACK_META['public_keys']['mykey']]) +                         [OSTACK_META['public_keys']['mykey']])  def cfg_ds_from_dir(seed_d): diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index eb97b692..77d15cac 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -25,9 +25,9 @@ class TestMAASDataSource(TestCase):          """Verify a valid seeddir is read as such."""          data = {'instance-id': 'i-valid01', -            'local-hostname': 'valid01-hostname', -            'user-data': b'valid01-userdata', -            'public-keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname'} +                'local-hostname': 'valid01-hostname', +                'user-data': b'valid01-userdata', +                'public-keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname'}          my_d = os.path.join(self.tmp, "valid")          populate_dir(my_d, data) @@ -45,8 +45,8 @@ class TestMAASDataSource(TestCase):          """Verify extra files do not affect seed_dir validity."""          data = {'instance-id': 'i-valid-extra', -            'local-hostname': 'valid-extra-hostname', -            'user-data': b'valid-extra-userdata', 'foo': 'bar'} +                'local-hostname': 'valid-extra-hostname', +                'user-data': b'valid-extra-userdata', 'foo': 'bar'}          my_d = os.path.join(self.tmp, "valid_extra")          populate_dir(my_d, data) @@ -64,7 +64,7 @@ class TestMAASDataSource(TestCase):          """Verify that invalid seed_dir raises MAASSeedDirMalformed."""          valid = {'instance-id': 'i-instanceid', -            'local-hostname': 'test-hostname', 'user-data': ''} +                 'local-hostname': 'test-hostname', 'user-data': ''}          my_based = os.path.join(self.tmp, "valid_extra") @@ -94,8 +94,8 @@ class TestMAASDataSource(TestCase):      def test_seed_dir_missing(self):          """Verify that missing seed_dir raises MAASSeedDirNone."""          self.assertRaises(DataSourceMAAS.MAASSeedDirNone, -            DataSourceMAAS.read_maas_seed_dir, -            os.path.join(self.tmp, "nonexistantdirectory")) +                          DataSourceMAAS.read_maas_seed_dir, +                          os.path.join(self.tmp, "nonexistantdirectory"))      def test_seed_url_valid(self):          """Verify that valid seed_url is read as such.""" diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index ccb9f080..616e9f0e 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -462,8 +462,8 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase):                  payloadstr = ' {0}'.format(self.response_parts['payload'])              return ('V2 {length} {crc} {request_id} '                      '{command}{payloadstr}\n'.format( -                    payloadstr=payloadstr, -                    **self.response_parts).encode('ascii')) +                     payloadstr=payloadstr, +                     **self.response_parts).encode('ascii'))          self.metasource_data = None @@ -500,7 +500,7 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase):          written_line = self.serial.write.call_args[0][0]          print(type(written_line))          self.assertEndsWith(written_line.decode('ascii'), -            b'\n'.decode('ascii')) +                            b'\n'.decode('ascii'))          self.assertEqual(1, written_line.count(b'\n'))      def _get_written_line(self, key='some_key'): diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/test_handler/test_handler_power_state.py index cd376e9c..04ce5687 100644 --- a/tests/unittests/test_handler/test_handler_power_state.py +++ b/tests/unittests/test_handler/test_handler_power_state.py @@ -74,7 +74,7 @@ class TestLoadPowerState(t_help.TestCase):  class TestCheckCondition(t_help.TestCase):      def cmd_with_exit(self, rc):          return([sys.executable, '-c', 'import sys; sys.exit(%s)' % rc]) -         +      def test_true_is_true(self):          self.assertEqual(psc.check_condition(True), True) @@ -94,7 +94,6 @@ class TestCheckCondition(t_help.TestCase):          self.assertEqual(mocklog.warn.call_count, 1) -  def check_lps_ret(psc_return, mode=None):      if len(psc_return) != 3:          raise TypeError("length returned = %d" % len(psc_return)) diff --git a/tests/unittests/test_handler/test_handler_seed_random.py b/tests/unittests/test_handler/test_handler_seed_random.py index 0bcdcb31..34d11f21 100644 --- a/tests/unittests/test_handler/test_handler_seed_random.py +++ b/tests/unittests/test_handler/test_handler_seed_random.py @@ -190,7 +190,8 @@ class TestRandomSeed(t_help.TestCase):          c = self._get_cloud('ubuntu', {})          self.whichdata = {}          self.assertRaises(ValueError, cc_seed_random.handle, -            'test', {'random_seed': {'command_required': True}}, c, LOG, []) +                          'test', {'random_seed': {'command_required': True}}, +                          c, LOG, [])      def test_seed_command_and_required(self):          c = self._get_cloud('ubuntu', {}) diff --git a/tests/unittests/test_handler/test_handler_snappy.py b/tests/unittests/test_handler/test_handler_snappy.py index eceb14d9..8aeff53c 100644 --- a/tests/unittests/test_handler/test_handler_snappy.py +++ b/tests/unittests/test_handler/test_handler_snappy.py @@ -125,8 +125,7 @@ class TestInstallPackages(t_help.TestCase):               "pkg1.smoser.config": "pkg1.smoser.config-data",               "pkg1.config": "pkg1.config-data",               "pkg2.smoser_0.0_amd64.snap": "pkg2-snapdata", -             "pkg2.smoser_0.0_amd64.config": "pkg2.config", -            }) +             "pkg2.smoser_0.0_amd64.config": "pkg2.config"})          ret = get_package_ops(              packages=[], configs={}, installed=[], fspath=self.tmp) diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py index 3b317121..9aeb1cde 100644 --- a/tests/unittests/test_sshutil.py +++ b/tests/unittests/test_sshutil.py @@ -32,7 +32,8 @@ VALID_CONTENT = {      ),  } -TEST_OPTIONS = ("no-port-forwarding,no-agent-forwarding,no-X11-forwarding," +TEST_OPTIONS = ( +    "no-port-forwarding,no-agent-forwarding,no-X11-forwarding,"      'command="echo \'Please login as the user \"ubuntu\" rather than the'      'user \"root\".\';echo;sleep 10"') diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py index 0c19a2c2..b9863650 100644 --- a/tests/unittests/test_templating.py +++ b/tests/unittests/test_templating.py @@ -114,5 +114,6 @@ $a,$b'''                                                                 codename)          out_data = templater.basic_render(in_data, -            {'mirror': mirror, 'codename': codename}) +                                          {'mirror': mirror, +                                           'codename': codename})          self.assertEqual(ex_data, out_data) diff --git a/tools/hacking.py b/tools/hacking.py index 3175df38..1a0631c2 100755 --- a/tools/hacking.py +++ b/tools/hacking.py @@ -47,10 +47,10 @@ def import_normalize(line):      # handle "from x import y as z" to "import x.y as z"      split_line = line.split()      if (line.startswith("from ") and "," not in line and -           split_line[2] == "import" and split_line[3] != "*" and -           split_line[1] != "__future__" and -           (len(split_line) == 4 or -           (len(split_line) == 6 and split_line[4] == "as"))): +       split_line[2] == "import" and split_line[3] != "*" and +       split_line[1] != "__future__" and +       (len(split_line) == 4 or +       (len(split_line) == 6 and split_line[4] == "as"))):          return "import %s.%s" % (split_line[1], split_line[3])      else:          return line @@ -74,7 +74,7 @@ def cloud_import_alphabetical(physical_line, line_number, lines):              split_line[0] == "import" and split_previous[0] == "import"):          if split_line[1] < split_previous[1]:              return (0, "N306: imports not in alphabetical order (%s, %s)" -                % (split_previous[1], split_line[1])) +                    % (split_previous[1], split_line[1]))  def cloud_docstring_start_space(physical_line): @@ -87,8 +87,8 @@ def cloud_docstring_start_space(physical_line):      pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE])  # start      if (pos != -1 and len(physical_line) > pos + 1):          if (physical_line[pos + 3] == ' '): -            return (pos, "N401: one line docstring should not start with" -                " a space") +            return (pos, +                    "N401: one line docstring should not start with a space")  def cloud_todo_format(physical_line): @@ -167,4 +167,4 @@ if __name__ == "__main__":      finally:          if len(_missingImport) > 0:              print >> sys.stderr, ("%i imports missing in this test environment" -                    % len(_missingImport)) +                                  % len(_missingImport)) diff --git a/tools/mock-meta.py b/tools/mock-meta.py index dfbc2a71..1c746f17 100755 --- a/tools/mock-meta.py +++ b/tools/mock-meta.py @@ -126,11 +126,11 @@ class WebException(Exception):  def yamlify(data):      formatted = yaml.dump(data, -        line_break="\n", -        indent=4, -        explicit_start=True, -        explicit_end=True, -        default_flow_style=False) +                          line_break="\n", +                          indent=4, +                          explicit_start=True, +                          explicit_end=True, +                          default_flow_style=False)      return formatted @@ -282,7 +282,7 @@ class MetaDataHandler(object):          else:              log.warn(("Did not implement action %s, "                        "returning empty response: %r"), -                      action, NOT_IMPL_RESPONSE) +                     action, NOT_IMPL_RESPONSE)              return NOT_IMPL_RESPONSE @@ -404,14 +404,17 @@ def setup_logging(log_level, fmt='%(levelname)s: @%(name)s : %(message)s'):  def extract_opts():      parser = OptionParser()      parser.add_option("-p", "--port", dest="port", action="store", type=int, -        default=80, metavar="PORT", -        help="port from which to serve traffic (default: %default)") +                      default=80, metavar="PORT", +                      help=("port from which to serve traffic" +                            " (default: %default)"))      parser.add_option("-a", "--addr", dest="address", action="store", type=str, -        default='0.0.0.0', metavar="ADDRESS", -        help="address from which to serve traffic (default: %default)") +                      default='0.0.0.0', metavar="ADDRESS", +                      help=("address from which to serve traffic" +                            " (default: %default)"))      parser.add_option("-f", '--user-data-file', dest='user_data_file', -        action='store', metavar='FILE', -        help="user data filename to serve back to incoming requests") +                      action='store', metavar='FILE', +                      help=("user data filename to serve back to" +                            "incoming requests"))      (options, args) = parser.parse_args()      out = dict()      out['extra'] = args diff --git a/tools/run-pep8 b/tools/run-pep8 index ccd6be5a..086400fc 100755 --- a/tools/run-pep8 +++ b/tools/run-pep8 @@ -1,39 +1,22 @@  #!/bin/bash -if [ $# -eq 0 ]; then -   files=( bin/cloud-init $(find * -name "*.py" -type f) ) +pycheck_dirs=( "cloudinit/" "bin/" "tests/" "tools/" ) +# FIXME: cloud-init modifies sys module path, pep8 does not like +# bin_files=( "bin/cloud-init" ) +CR=" +" +[ "$1" = "-v" ] && { verbose="$1"; shift; } || verbose="" + +set -f +if [ $# -eq 0 ]; then unset IFS +   IFS="$CR" +   files=( "${bin_files[@]}" "${pycheck_dirs[@]}" ) +   unset IFS  else -   files=( "$@" ); +   files=( "$@" )  fi -if [ -f 'hacking.py' ] -then -    base=`pwd` -else -    base=`pwd`/tools/ -fi - -IGNORE="" - -# King Arthur: Be quiet! ... Be Quiet! I Order You to Be Quiet. -IGNORE="$IGNORE,E121" # Continuation line indentation is not a multiple of four -IGNORE="$IGNORE,E123" # Closing bracket does not match indentation of opening bracket's line -IGNORE="$IGNORE,E124" # Closing bracket missing visual indentation -IGNORE="$IGNORE,E125" # Continuation line does not distinguish itself from next logical line -IGNORE="$IGNORE,E126" # Continuation line over-indented for hanging indent -IGNORE="$IGNORE,E127" # Continuation line over-indented for visual indent -IGNORE="$IGNORE,E128" # Continuation line under-indented for visual indent -IGNORE="$IGNORE,E502" # The backslash is redundant between brackets -IGNORE="${IGNORE#,}"  # remove the leading ',' added above - -cmd=( -    ${base}/hacking.py - -    --ignore="$IGNORE" - -    "${files[@]}" -) - -echo -e "\nRunning 'cloudinit' pep8:" -echo "${cmd[@]}" -"${cmd[@]}" +myname=${0##*/} +cmd=( "${myname#run-}" $verbose "${files[@]}" ) +echo "Running: " "${cmd[@]}" 1>&2 +exec "${cmd[@]}" diff --git a/tools/run-pyflakes b/tools/run-pyflakes new file mode 100755 index 00000000..4bea17f4 --- /dev/null +++ b/tools/run-pyflakes @@ -0,0 +1,18 @@ +#!/bin/bash + +PYTHON_VERSION=${PYTHON_VERSION:-2} +CR=" +" +pycheck_dirs=( "cloudinit/" "bin/" "tests/" "tools/" ) + +set -f +if [ $# -eq 0 ]; then +   files=( "${pycheck_dirs[@]}" ) +else +   files=( "$@" ) +fi + +cmd=( "python${PYTHON_VERSION}" -m "pyflakes" "${files[@]}" ) + +echo "Running: " "${cmd[@]}" 1>&2 +exec "${cmd[@]}" diff --git a/tools/run-pyflakes3 b/tools/run-pyflakes3 new file mode 100755 index 00000000..e9f0863d --- /dev/null +++ b/tools/run-pyflakes3 @@ -0,0 +1,2 @@ +#!/bin/sh +PYTHON_VERSION=3 exec "${0%/*}/run-pyflakes" "$@" | 
