summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog4
-rw-r--r--HACKING.rst4
-rw-r--r--Makefile5
-rw-r--r--cloudinit/config/cc_apt_configure.py2
-rw-r--r--cloudinit/config/cc_byobu.py2
-rw-r--r--cloudinit/config/cc_chef.py3
-rw-r--r--cloudinit/config/cc_disk_setup.py6
-rw-r--r--cloudinit/config/cc_grub_dpkg.py3
-rw-r--r--cloudinit/config/cc_mounts.py2
-rw-r--r--cloudinit/config/cc_phone_home.py2
-rw-r--r--cloudinit/config/cc_power_state_change.py4
-rw-r--r--cloudinit/config/cc_resizefs.py12
-rw-r--r--cloudinit/config/cc_rightscale_userdata.py34
-rw-r--r--cloudinit/config/cc_rsyslog.py4
-rw-r--r--cloudinit/config/cc_set_passwords.py2
-rw-r--r--cloudinit/config/cc_ssh.py2
-rw-r--r--cloudinit/config/cc_ssh_authkey_fingerprints.py5
-rw-r--r--cloudinit/config/cc_yum_add_repo.py4
-rw-r--r--cloudinit/distros/__init__.py4
-rw-r--r--cloudinit/distros/parsers/resolv_conf.py4
-rw-r--r--cloudinit/handlers/boot_hook.py3
-rw-r--r--cloudinit/handlers/cloud_config.py3
-rw-r--r--cloudinit/handlers/shell_script.py3
-rw-r--r--cloudinit/handlers/upstart_job.py3
-rw-r--r--cloudinit/mergers/m_list.py2
-rw-r--r--cloudinit/patcher.py2
-rw-r--r--cloudinit/sources/DataSourceAzure.py4
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py2
-rw-r--r--cloudinit/sources/DataSourceOVF.py4
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py2
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py32
-rw-r--r--cloudinit/sources/__init__.py2
-rw-r--r--cloudinit/sources/helpers/openstack.py117
-rw-r--r--cloudinit/stages.py2
-rw-r--r--cloudinit/type_utils.py2
-rw-r--r--cloudinit/url_helper.py17
-rw-r--r--cloudinit/util.py22
-rw-r--r--packages/debian/control.in1
-rw-r--r--pylintrc19
-rwxr-xr-xsetup.py2
-rw-r--r--test-requirements.txt3
-rw-r--r--tests/unittests/test__init__.py3
-rw-r--r--tests/unittests/test_datasource/test_azure.py2
-rw-r--r--tests/unittests/test_datasource/test_nocloud.py4
-rw-r--r--tests/unittests/test_datasource/test_openstack.py23
-rw-r--r--tests/unittests/test_distros/test_generic.py6
-rw-r--r--tests/unittests/test_handler/test_handler_growpart.py10
-rw-r--r--tests/unittests/test_handler/test_handler_power_state.py2
-rw-r--r--tests/unittests/test_handler/test_handler_yum_add_repo.py2
-rw-r--r--tests/unittests/test_merging.py2
-rw-r--r--tests/unittests/test_util.py6
-rwxr-xr-xtools/hacking.py6
-rwxr-xr-xtools/mock-meta.py8
-rwxr-xr-xtools/run-pep82
-rwxr-xr-xtools/run-pylint26
55 files changed, 217 insertions, 240 deletions
diff --git a/ChangeLog b/ChangeLog
index b420d037..b1d223ed 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -23,6 +23,10 @@
unless config explicitly provided (LP: #1329583) [Garrett Holmstrom])
- fix rendering resolv.conf if no 'options' are provided (LP: #1328953)
- docs: fix disk-setup to reference 'table_type' [Rail Aliiev] (LP: #1313114)
+ - ssh_authkey_fingerprints: fix bug that prevented disabling the module.
+ (LP: #1340903) [Patrick Lucas]
+ - no longer use pylint as a checker, fix pep8 [Jay Faulkner].
+ - Openstack: do not load some urls twice.
0.7.5:
- open 0.7.5
- Add a debug log message around import failures
diff --git a/HACKING.rst b/HACKING.rst
index 66bf7c90..6bfe4b4d 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -19,9 +19,9 @@ To get changes into cloud-init, the process to follow is:
- ``bzr commit``
-* Check pylint and pep8 and test, and address any issues:
+* Check pep8 and test, and address any issues:
- - ``make test pylint pep8``
+ - ``make test pep8``
* Push to launchpad to a personal branch:
diff --git a/Makefile b/Makefile
index c8b75e73..009257ca 100644
--- a/Makefile
+++ b/Makefile
@@ -19,9 +19,6 @@ all: test check_version
pep8:
@$(CWD)/tools/run-pep8 $(PY_FILES)
-pylint:
- @$(CWD)/tools/run-pylint $(PY_FILES)
-
pyflakes:
pyflakes $(PY_FILES)
@@ -61,5 +58,5 @@ rpm:
deb:
./packages/bddeb
-.PHONY: test pylint pyflakes 2to3 clean pep8 rpm deb yaml check_version
+.PHONY: test pyflakes 2to3 clean pep8 rpm deb yaml check_version
.PHONY: pip-test-requirements pip-requirements clean_pyc
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index 29c13a3d..f10b76a3 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -235,7 +235,7 @@ def find_apt_mirror_info(cloud, cfg):
mirror = util.search_for_mirror(search)
if (not mirror and
- util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)):
+ util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)):
mydom = ""
doms = []
diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py
index 92d428b7..ef0ce7ab 100644
--- a/cloudinit/config/cc_byobu.py
+++ b/cloudinit/config/cc_byobu.py
@@ -43,7 +43,7 @@ def handle(name, cfg, cloud, log, args):
valid = ("enable-user", "enable-system", "enable",
"disable-user", "disable-system", "disable")
- if not value in valid:
+ if value not in valid:
log.warn("Unknown value %s for byobu_by_default", value)
mod_user = value.endswith("-user")
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index 727769cd..806deed9 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -87,7 +87,8 @@ def handle(name, cfg, cloud, log, _args):
# If chef is not installed, we install chef based on 'install_type'
if (not os.path.isfile('/usr/bin/chef-client') or
- util.get_cfg_option_bool(chef_cfg, 'force_install', default=False)):
+ util.get_cfg_option_bool(chef_cfg,
+ 'force_install', default=False)):
install_type = util.get_cfg_option_str(chef_cfg, 'install_type',
'packages')
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index 0b970e4e..1660832b 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -271,7 +271,7 @@ def find_device_node(device, fs_type=None, label=None, valid_targets=None,
return ('/dev/%s' % d['name'], False)
if (d['fstype'] == fs_type and
- ((label_match and d['label'] == label) or not label_match)):
+ ((label_match and d['label'] == label) or not label_match)):
# If we find a matching device, we return that
return ('/dev/%s' % d['name'], True)
@@ -447,7 +447,7 @@ def get_partition_mbr_layout(size, layout):
return "0,"
if ((len(layout) == 0 and isinstance(layout, list)) or
- not isinstance(layout, list)):
+ not isinstance(layout, list)):
raise Exception("Partition layout is invalid")
last_part_num = len(layout)
@@ -484,7 +484,7 @@ def get_partition_mbr_layout(size, layout):
def purge_disk_ptable(device):
# wipe the first and last megabyte of a disk (or file)
# gpt stores partition table both at front and at end.
- null = '\0' # pylint: disable=W1401
+ null = '\0'
start_len = 1024 * 1024
end_len = 1024 * 1024
with open(device, "rb+") as fp:
diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py
index b3ce6fb6..85716a91 100644
--- a/cloudinit/config/cc_grub_dpkg.py
+++ b/cloudinit/config/cc_grub_dpkg.py
@@ -36,7 +36,8 @@ def handle(_name, cfg, _cloud, log, _args):
"grub-pc/install_devices_empty", None)
if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or
- (os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda"))):
+ (os.path.exists("/dev/xvda1")
+ and not os.path.exists("/dev/xvda"))):
if idevs is None:
idevs = ""
if idevs_empty is None:
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index 80590118..ba1303d1 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -18,7 +18,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from string import whitespace # pylint: disable=W0402
+from string import whitespace
import logging
import os.path
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index 2e058ccd..5bc68b83 100644
--- a/cloudinit/config/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -47,7 +47,7 @@ def handle(name, cfg, cloud, log, args):
if len(args) != 0:
ph_cfg = util.read_conf(args[0])
else:
- if not 'phone_home' in cfg:
+ if 'phone_home' not in cfg:
log.debug(("Skipping module named %s, "
"no 'phone_home' configuration found"), name)
return
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index 638daef8..09d37371 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -119,7 +119,7 @@ def load_power_state(cfg):
def doexit(sysexit):
- os._exit(sysexit) # pylint: disable=W0212
+ os._exit(sysexit)
def execmd(exe_args, output=None, data_in=None):
@@ -127,7 +127,7 @@ def execmd(exe_args, output=None, data_in=None):
proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE,
stdout=output, stderr=subprocess.STDOUT)
proc.communicate(data_in)
- ret = proc.returncode # pylint: disable=E1101
+ ret = proc.returncode
except Exception:
doexit(EXIT_FAIL)
doexit(ret)
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index e290efe0..b9655749 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -28,20 +28,20 @@ from cloudinit import util
frequency = PER_ALWAYS
-def _resize_btrfs(mount_point, devpth): # pylint: disable=W0613
+def _resize_btrfs(mount_point, devpth):
return ('btrfs', 'filesystem', 'resize', 'max', mount_point)
-def _resize_ext(mount_point, devpth): # pylint: disable=W0613
+def _resize_ext(mount_point, devpth):
return ('resize2fs', devpth)
-def _resize_xfs(mount_point, devpth): # pylint: disable=W0613
+def _resize_xfs(mount_point, devpth):
return ('xfs_growfs', devpth)
-def _resize_ufs(mount_point, devpth): # pylint: disable=W0613
- return ('growfs', '-y', devpth)
+def _resize_ufs(mount_point, devpth):
+ return ('growfs', devpth)
# Do not use a dictionary as these commands should be able to be used
# for multiple filesystem types if possible, e.g. one command for
@@ -105,7 +105,7 @@ def handle(name, cfg, _cloud, log, args):
container = util.is_container()
if (devpth == "/dev/root" and not os.path.exists(devpth) and
- not container):
+ not container):
devpth = rootdev_from_cmdline(util.get_cmdline())
if devpth is None:
log.warn("Unable to find device '/dev/root'")
diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py
index c771728d..7d2ec10a 100644
--- a/cloudinit/config/cc_rightscale_userdata.py
+++ b/cloudinit/config/cc_rightscale_userdata.py
@@ -18,22 +18,22 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-##
-## The purpose of this script is to allow cloud-init to consume
-## rightscale style userdata. rightscale user data is key-value pairs
-## in a url-query-string like format.
-##
-## for cloud-init support, there will be a key named
-## 'CLOUD_INIT_REMOTE_HOOK'.
-##
-## This cloud-config module will
-## - read the blob of data from raw user data, and parse it as key/value
-## - for each key that is found, download the content to
-## the local instance/scripts directory and set them executable.
-## - the files in that directory will be run by the user-scripts module
-## Therefore, this must run before that.
-##
-##
+#
+# The purpose of this script is to allow cloud-init to consume
+# rightscale style userdata. rightscale user data is key-value pairs
+# in a url-query-string like format.
+#
+# for cloud-init support, there will be a key named
+# 'CLOUD_INIT_REMOTE_HOOK'.
+#
+# This cloud-config module will
+# - read the blob of data from raw user data, and parse it as key/value
+# - for each key that is found, download the content to
+# the local instance/scripts directory and set them executable.
+# - the files in that directory will be run by the user-scripts module
+# Therefore, this must run before that.
+#
+#
import os
@@ -58,7 +58,7 @@ def handle(name, _cfg, cloud, log, _args):
try:
mdict = parse_qs(ud)
- if not mdict or not MY_HOOKNAME in mdict:
+ if mdict or MY_HOOKNAME not in mdict:
log.debug(("Skipping module %s, "
"did not find %s in parsed"
" raw userdata"), name, MY_HOOKNAME)
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 0c2c6880..57486edc 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -35,7 +35,7 @@ def handle(name, cfg, cloud, log, _args):
# *.* @@syslogd.example.com
# process 'rsyslog'
- if not 'rsyslog' in cfg:
+ if 'rsyslog' not in cfg:
log.debug(("Skipping module named %s,"
" no 'rsyslog' key in configuration"), name)
return
@@ -46,7 +46,7 @@ def handle(name, cfg, cloud, log, _args):
files = []
for i, ent in enumerate(cfg['rsyslog']):
if isinstance(ent, dict):
- if not "content" in ent:
+ if "content" not in ent:
log.warn("No 'content' entry in config entry %s", i + 1)
continue
content = ent['content']
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index 4a3b21af..24e33915 100644
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -28,7 +28,7 @@ from cloudinit import distros as ds
from cloudinit import ssh_util
from cloudinit import util
-from string import letters, digits # pylint: disable=W0402
+from string import letters, digits
# We are removing certain 'painful' letters/numbers
PW_SET = (letters.translate(None, 'loLOI') +
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 64a5e3cb..4c76581c 100644
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -75,7 +75,7 @@ def handle(_name, cfg, cloud, log, _args):
util.write_file(tgt_fn, val, tgt_perms)
for (priv, pub) in PRIV_2_PUB.iteritems():
- if pub in cfg['ssh_keys'] or not priv in cfg['ssh_keys']:
+ if pub in cfg['ssh_keys'] or priv not in cfg['ssh_keys']:
continue
pair = (KEY_2_FILE[priv][0], KEY_2_FILE[pub][0])
cmd = ['sh', '-xc', KEY_GEN_TPL % pair]
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
index be8083db..51580633 100644
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -55,7 +55,7 @@ def _gen_fingerprint(b64_text, hash_meth='md5'):
def _is_printable_key(entry):
if any([entry.keytype, entry.base64, entry.comment, entry.options]):
if (entry.keytype and
- entry.keytype.lower().strip() in ['ssh-dss', 'ssh-rsa']):
+ entry.keytype.lower().strip() in ['ssh-dss', 'ssh-rsa']):
return True
return False
@@ -92,9 +92,10 @@ def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5',
def handle(name, cfg, cloud, log, _args):
- if 'no_ssh_fingerprints' in cfg:
+ if util.is_true(cfg.get('no_ssh_fingerprints', False)):
log.debug(("Skipping module named %s, "
"logging of ssh fingerprints disabled"), name)
+ return
hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5")
(users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index 5c273825..0d836f28 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -42,7 +42,7 @@ def _format_repo_value(val):
return val
-## TODO(harlowja): move to distro?
+# TODO(harlowja): move to distro?
# See man yum.conf
def _format_repository_config(repo_id, repo_config):
to_be = configobj.ConfigObj()
@@ -89,7 +89,7 @@ def handle(name, cfg, _cloud, log, _args):
repo_config = n_repo_config
missing_required = 0
for req_field in ['baseurl']:
- if not req_field in repo_config:
+ if req_field not in repo_config:
log.warn(("Repository %s does not contain a %s"
" configuration 'required' entry"),
repo_id, req_field)
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 1a56dfb3..4b41220e 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -167,7 +167,7 @@ class Distro(object):
def expand_osfamily(family_list):
distros = []
for family in family_list:
- if not family in OSFAMILIES:
+ if family not in OSFAMILIES:
raise ValueError("No distibutions found for osfamily %s"
% (family))
distros.extend(OSFAMILIES[family])
@@ -218,7 +218,7 @@ class Distro(object):
fn)
if (sys_hostname and prev_hostname and
- sys_hostname != prev_hostname):
+ sys_hostname != prev_hostname):
LOG.debug("%s differs from %s, assuming user maintained hostname.",
prev_hostname_fn, sys_fn)
diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py
index 1be9d46b..5733c25a 100644
--- a/cloudinit/distros/parsers/resolv_conf.py
+++ b/cloudinit/distros/parsers/resolv_conf.py
@@ -137,8 +137,8 @@ class ResolvConf(object):
self._contents.append(('option', ['search', s_list, '']))
return flat_sds
- @local_domain.setter # pl51222 pylint: disable=E1101
- def local_domain(self, domain): # pl51222 pylint: disable=E0102
+ @local_domain.setter
+ def local_domain(self, domain):
self.parse()
self._remove_option('domain')
self._contents.append(('option', ['domain', str(domain), '']))
diff --git a/cloudinit/handlers/boot_hook.py b/cloudinit/handlers/boot_hook.py
index 1848ce2c..3a50cf87 100644
--- a/cloudinit/handlers/boot_hook.py
+++ b/cloudinit/handlers/boot_hook.py
@@ -53,8 +53,7 @@ class BootHookPartHandler(handlers.Handler):
util.write_file(filepath, contents.lstrip(), 0700)
return filepath
- def handle_part(self, _data, ctype, filename, # pylint: disable=W0221
- payload, frequency): # pylint: disable=W0613
+ def handle_part(self, data, ctype, filename, payload, frequency):
if ctype in handlers.CONTENT_SIGNALS:
return
diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py
index 4232700f..bf994e33 100644
--- a/cloudinit/handlers/cloud_config.py
+++ b/cloudinit/handlers/cloud_config.py
@@ -138,8 +138,7 @@ class CloudConfigPartHandler(handlers.Handler):
self.file_names = []
self.cloud_buf = None
- def handle_part(self, _data, ctype, filename, # pylint: disable=W0221
- payload, _frequency, headers): # pylint: disable=W0613
+ def handle_part(self, data, ctype, filename, payload, frequency, headers):
if ctype == handlers.CONTENT_START:
self._reset()
return
diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py
index 30c1ed89..9755ab05 100644
--- a/cloudinit/handlers/shell_script.py
+++ b/cloudinit/handlers/shell_script.py
@@ -44,8 +44,7 @@ class ShellScriptPartHandler(handlers.Handler):
handlers.type_from_starts_with(SHELL_PREFIX),
]
- def handle_part(self, _data, ctype, filename, # pylint: disable=W0221
- payload, frequency): # pylint: disable=W0613
+ def handle_part(self, data, ctype, filename, payload, frequency):
if ctype in handlers.CONTENT_SIGNALS:
# TODO(harlowja): maybe delete existing things here
return
diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py
index bac4cad2..50d193c4 100644
--- a/cloudinit/handlers/upstart_job.py
+++ b/cloudinit/handlers/upstart_job.py
@@ -44,8 +44,7 @@ class UpstartJobPartHandler(handlers.Handler):
handlers.type_from_starts_with(UPSTART_PREFIX),
]
- def handle_part(self, _data, ctype, filename, # pylint: disable=W0221
- payload, frequency):
+ def handle_part(self, data, ctype, filename, payload, frequency):
if ctype in handlers.CONTENT_SIGNALS:
return
diff --git a/cloudinit/mergers/m_list.py b/cloudinit/mergers/m_list.py
index 62999b4e..3b87b0fc 100644
--- a/cloudinit/mergers/m_list.py
+++ b/cloudinit/mergers/m_list.py
@@ -53,7 +53,7 @@ class Merger(object):
def _on_list(self, value, merge_with):
if (self._method == 'replace' and
- not isinstance(merge_with, (tuple, list))):
+ not isinstance(merge_with, (tuple, list))):
return merge_with
# Ok we now know that what we are merging with is a list or tuple.
diff --git a/cloudinit/patcher.py b/cloudinit/patcher.py
index 0f3c034e..f6609d6f 100644
--- a/cloudinit/patcher.py
+++ b/cloudinit/patcher.py
@@ -41,7 +41,7 @@ def _patch_logging():
fallback_handler = QuietStreamHandler(sys.stderr)
fallback_handler.setFormatter(logging.Formatter(FALL_FORMAT))
- def handleError(self, record): # pylint: disable=W0613
+ def handleError(self, record):
try:
fallback_handler.handle(record)
fallback_handler.flush()
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index bd75e6d8..09bc196d 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -452,7 +452,7 @@ def load_azure_ovf_pubkeys(sshnode):
continue
if (len(child.childNodes) != 1 or
- child.childNodes[0].nodeType != text_node):
+ child.childNodes[0].nodeType != text_node):
continue
cur[name] = child.childNodes[0].wholeText.strip()
@@ -521,7 +521,7 @@ def read_azure_ovf(contents):
simple = False
value = ""
if (len(child.childNodes) == 1 and
- child.childNodes[0].nodeType == dom.TEXT_NODE):
+ child.childNodes[0].nodeType == dom.TEXT_NODE):
simple = True
value = child.childNodes[0].wholeText
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index a315aae0..c26a645c 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -181,7 +181,7 @@ class DataSourceNoCloud(sources.DataSource):
# and the source of the seed was self.dsmode
# ('local' for NoCloud, 'net' for NoCloudNet')
if ('network-interfaces' in mydata['meta-data'] and
- (self.dsmode in ("local", seeded_interfaces))):
+ (self.dsmode in ("local", seeded_interfaces))):
LOG.debug("Updating network interfaces from %s", self)
self.distro.apply_network(
mydata['meta-data']['network-interfaces'])
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index 77b43e17..2f53c1ba 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -107,7 +107,7 @@ class DataSourceOVF(sources.DataSource):
return True
def get_public_ssh_keys(self):
- if not 'public-keys' in self.metadata:
+ if 'public-keys' not in self.metadata:
return []
pks = self.metadata['public-keys']
if isinstance(pks, (list)):
@@ -205,7 +205,7 @@ def transport_iso9660(require_iso=True):
fullp = os.path.join("/dev/", dev)
if (fullp in mounts or
- not cdmatch.match(dev) or os.path.isdir(fullp)):
+ not cdmatch.match(dev) or os.path.isdir(fullp)):
continue
try:
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index 34557f8b..e2469f6e 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -28,7 +28,7 @@ import base64
import os
import pwd
import re
-import string # pylint: disable=W0402
+import string
from cloudinit import log as logging
from cloudinit import sources
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 65ec0339..2733a2f6 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -41,7 +41,7 @@ import serial
LOG = logging.getLogger(__name__)
SMARTOS_ATTRIB_MAP = {
- #Cloud-init Key : (SmartOS Key, Strip line endings)
+ # Cloud-init Key : (SmartOS Key, Strip line endings)
'local-hostname': ('hostname', True),
'public-keys': ('root_authorized_keys', True),
'user-script': ('user-script', False),
@@ -96,21 +96,21 @@ BUILTIN_CLOUD_CONFIG = {
'device': 'ephemeral0'}],
}
-## builtin vendor-data is a boothook that writes a script into
-## /var/lib/cloud/scripts/per-boot. *That* script then handles
-## executing the 'operator-script' and 'user-script' files
-## that cloud-init writes into /var/lib/cloud/instance/data/
-## if they exist.
-##
-## This is all very indirect, but its done like this so that at
-## some point in the future, perhaps cloud-init wouldn't do it at
-## all, but rather the vendor actually provide vendor-data that accomplished
-## their desires. (That is the point of vendor-data).
-##
-## cloud-init does cheat a bit, and write the operator-script and user-script
-## itself. It could have the vendor-script do that, but it seems better
-## to not require the image to contain a tool (mdata-get) to read those
-## keys when we have a perfectly good one inside cloud-init.
+# builtin vendor-data is a boothook that writes a script into
+# /var/lib/cloud/scripts/per-boot. *That* script then handles
+# executing the 'operator-script' and 'user-script' files
+# that cloud-init writes into /var/lib/cloud/instance/data/
+# if they exist.
+#
+# This is all very indirect, but its done like this so that at
+# some point in the future, perhaps cloud-init wouldn't do it at
+# all, but rather the vendor actually provide vendor-data that accomplished
+# their desires. (That is the point of vendor-data).
+#
+# cloud-init does cheat a bit, and write the operator-script and user-script
+# itself. It could have the vendor-script do that, but it seems better
+# to not require the image to contain a tool (mdata-get) to read those
+# keys when we have a perfectly good one inside cloud-init.
BUILTIN_VENDOR_DATA = """\
#cloud-boothook
#!/bin/sh
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index fef4d460..7d52a2e6 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -166,7 +166,7 @@ class DataSource(object):
defhost = "localhost"
domain = defdomain
- if not self.metadata or not 'local-hostname' in self.metadata:
+ if self.metadata or 'local-hostname' not in self.metadata:
# this is somewhat questionable really.
# the cloud datasource was asked for a hostname
# and didn't have one. raising error might be more appropriate
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 0fac0335..3c6bb6aa 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -150,17 +150,38 @@ class BaseReader(object):
pass
@abc.abstractmethod
- def _path_exists(self, path):
+ def _path_read(self, path):
pass
@abc.abstractmethod
- def _path_read(self, path):
+ def _fetch_available_versions(self):
pass
@abc.abstractmethod
def _read_ec2_metadata(self):
pass
+ def _find_working_version(self, version):
+ try:
+ versions_available = self._fetch_available_versions(self)
+ except Exception as e:
+ LOG.warn("Unable to read openstack versions from %s due to: %s",
+ self.base_path, e)
+ versions_available = []
+
+ search_versions = [version] + list(OS_VERSIONS)
+ selected_version = OS_LATEST
+ for potential_version in search_versions:
+ if potential_version not in versions_available:
+ continue
+ selected_version = potential_version
+ break
+
+ if selected_version != version:
+ LOG.warn("Version '%s' not available, attempting to use"
+ " version '%s' instead", version, selected_version)
+ return selected_version
+
def _read_content_path(self, item):
path = item.get('content_path', '').lstrip("/")
path_pieces = path.split("/")
@@ -170,23 +191,6 @@ class BaseReader(object):
path = self._path_join(self.base_path, "openstack", *path_pieces)
return self._path_read(path)
- def _find_working_version(self, version):
- search_versions = [version] + list(OS_VERSIONS)
- for potential_version in search_versions:
- if not potential_version:
- continue
- path = self._path_join(self.base_path, "openstack",
- potential_version)
- if self._path_exists(path):
- if potential_version != version:
- LOG.debug("Version '%s' not available, attempting to use"
- " version '%s' instead", version,
- potential_version)
- return potential_version
- LOG.debug("Version '%s' not available, attempting to use '%s'"
- " instead", version, OS_LATEST)
- return OS_LATEST
-
def read_v2(self, version=None):
"""Reads a version 2 formatted location.
@@ -228,15 +232,18 @@ class BaseReader(object):
path = self._path_join(self.base_path, path)
data = None
found = False
- if self._path_exists(path):
- try:
- data = self._path_read(path)
- except IOError:
- raise NonReadable("Failed to read: %s" % path)
- found = True
+ try:
+ data = self._path_read(path)
+ except IOError as e:
+ if not required:
+ LOG.debug("Failed reading optional path %s due"
+ " to: %s", path, e)
+ else:
+ LOG.exception("Failed reading mandatory path %s", path)
else:
- if required:
- raise NonReadable("Missing mandatory path: %s" % path)
+ found = True
+ if required and not found:
+ raise NonReadable("Missing mandatory path: %s" % path)
if found and translator:
try:
data = translator(data)
@@ -304,21 +311,27 @@ class BaseReader(object):
class ConfigDriveReader(BaseReader):
def __init__(self, base_path):
super(ConfigDriveReader, self).__init__(base_path)
+ self._versions = None
def _path_join(self, base, *add_ons):
components = [base] + list(add_ons)
return os.path.join(*components)
- def _path_exists(self, path):
- return os.path.exists(path)
-
def _path_read(self, path):
return util.load_file(path)
+ def _fetch_available_versions(self):
+ if self._versions is None:
+ path = self._path_join(self.base_path, 'openstack')
+ found = [d for d in os.listdir(path)
+ if os.path.isdir(os.path.join(path))]
+ self._versions = tuple(found)
+ return self._versions
+
def _read_ec2_metadata(self):
path = self._path_join(self.base_path,
'ec2', 'latest', 'meta-data.json')
- if not self._path_exists(path):
+ if not os.path.exists(path):
return {}
else:
try:
@@ -338,7 +351,7 @@ class ConfigDriveReader(BaseReader):
found = {}
for name in FILES_V1.keys():
path = self._path_join(self.base_path, name)
- if self._path_exists(path):
+ if os.path.exists(path):
found[name] = path
if len(found) == 0:
raise NonReadable("%s: no files found" % (self.base_path))
@@ -400,17 +413,26 @@ class MetadataReader(BaseReader):
self.ssl_details = ssl_details
self.timeout = float(timeout)
self.retries = int(retries)
+ self._versions = None
+
+ def _fetch_available_versions(self):
+ # <baseurl>/openstack/ returns a newline separated list of versions
+ if self._versions is not None:
+ return self.os_versions
+ found = []
+ content = self._path_read(version_path)
+ for line in content.splitlines():
+ line = line.strip()
+ if not line:
+ continue
+ found.append(line)
+ self._versions = tuple(found)
+ return self._versions
- def _path_read(self, path):
- response = url_helper.readurl(path,
- retries=self.retries,
- ssl_details=self.ssl_details,
- timeout=self.timeout)
- return response.contents
- def _path_exists(self, path):
+ def _path_read(self, path):
- def should_retry_cb(request, cause):
+ def should_retry_cb(_request_args, cause):
try:
code = int(cause.code)
if code >= 400:
@@ -420,15 +442,12 @@ class MetadataReader(BaseReader):
pass
return True
- try:
- response = url_helper.readurl(path,
- retries=self.retries,
- ssl_details=self.ssl_details,
- timeout=self.timeout,
- exception_cb=should_retry_cb)
- return response.ok()
- except IOError:
- return False
+ response = url_helper.readurl(path,
+ retries=self.retries,
+ ssl_details=self.ssl_details,
+ timeout=self.timeout,
+ exception_cb=should_retry_cb)
+ return response.contents
def _path_join(self, base, *add_ons):
return url_helper.combine_url(base, *add_ons)
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 9e071fc4..d29d480a 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -642,7 +642,7 @@ class Modules(object):
# Try the modules frequency, otherwise fallback to a known one
if not freq:
freq = mod.frequency
- if not freq in FREQUENCIES:
+ if freq not in FREQUENCIES:
freq = PER_INSTANCE
LOG.debug("Running module %s (%s) with frequency %s",
name, mod, freq)
diff --git a/cloudinit/type_utils.py b/cloudinit/type_utils.py
index 2decbfc5..cc3d9495 100644
--- a/cloudinit/type_utils.py
+++ b/cloudinit/type_utils.py
@@ -19,8 +19,6 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-# pylint: disable=C0302
import types
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 4a83169a..3074dd08 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -44,7 +44,7 @@ try:
from distutils.version import LooseVersion
import pkg_resources
_REQ = pkg_resources.get_distribution('requests')
- _REQ_VER = LooseVersion(_REQ.version) # pylint: disable=E1103
+ _REQ_VER = LooseVersion(_REQ.version)
if _REQ_VER >= LooseVersion('0.8.8'):
SSL_ENABLED = True
if _REQ_VER >= LooseVersion('0.7.0') and _REQ_VER < LooseVersion('1.0.0'):
@@ -54,7 +54,7 @@ except:
def _cleanurl(url):
- parsed_url = list(urlparse(url, scheme='http')) # pylint: disable=E1123
+ parsed_url = list(urlparse(url, scheme='http'))
if not parsed_url[1] and parsed_url[2]:
# Swap these since this seems to be a common
# occurrence when given urls like 'www.google.com'
@@ -90,7 +90,7 @@ class StringResponse(object):
self.contents = contents
self.url = None
- def ok(self, *args, **kwargs): # pylint: disable=W0613
+ def ok(self, *args, **kwargs):
if self.code != 200:
return False
return True
@@ -150,7 +150,7 @@ class UrlError(IOError):
def _get_ssl_args(url, ssl_details):
ssl_args = {}
- scheme = urlparse(url).scheme # pylint: disable=E1101
+ scheme = urlparse(url).scheme
if scheme == 'https' and ssl_details:
if not SSL_ENABLED:
LOG.warn("SSL is not supported in requests v%s, "
@@ -227,18 +227,17 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
r = requests.request(**req_args)
if check_status:
- r.raise_for_status() # pylint: disable=E1103
+ r.raise_for_status()
LOG.debug("Read from %s (%s, %sb) after %s attempts", url,
- r.status_code, len(r.content), # pylint: disable=E1103
- (i + 1))
+ r.status_code, len(r.content), (i + 1))
# Doesn't seem like we can make it use a different
# subclass for responses, so add our own backward-compat
# attrs
return UrlResponse(r)
except exceptions.RequestException as e:
if (isinstance(e, (exceptions.HTTPError))
- and hasattr(e, 'response') # This appeared in v 0.10.8
- and hasattr(e.response, 'status_code')):
+ and hasattr(e, 'response') # This appeared in v 0.10.8
+ and hasattr(e.response, 'status_code')):
excps.append(UrlError(e, code=e.response.status_code,
headers=e.response.headers))
else:
diff --git a/cloudinit/util.py b/cloudinit/util.py
index bc681f4a..bdb0f268 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -19,8 +19,6 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-# pylint: disable=C0302
from StringIO import StringIO
@@ -42,7 +40,7 @@ import re
import shutil
import socket
import stat
-import string # pylint: disable=W0402
+import string
import subprocess
import sys
import tempfile
@@ -198,11 +196,11 @@ def fork_cb(child_cb, *args):
if fid == 0:
try:
child_cb(*args)
- os._exit(0) # pylint: disable=W0212
+ os._exit(0)
except:
logexc(LOG, "Failed forking and calling callback %s",
type_utils.obj_name(child_cb))
- os._exit(1) # pylint: disable=W0212
+ os._exit(1)
else:
LOG.debug("Forked child %s who will run callback %s",
fid, type_utils.obj_name(child_cb))
@@ -423,7 +421,7 @@ def get_cfg_option_list(yobj, key, default=None):
@return: The configuration option as a list of strings or default if key
is not found.
"""
- if not key in yobj:
+ if key not in yobj:
return default
if yobj[key] is None:
return []
@@ -487,7 +485,7 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None):
new_fp = open(arg, owith)
elif mode == "|":
proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
- new_fp = proc.stdin # pylint: disable=E1101
+ new_fp = proc.stdin
else:
raise TypeError("Invalid type for output format: %s" % outfmt)
@@ -509,7 +507,7 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None):
new_fp = open(arg, owith)
elif mode == "|":
proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
- new_fp = proc.stdin # pylint: disable=E1101
+ new_fp = proc.stdin
else:
raise TypeError("Invalid type for error format: %s" % errfmt)
@@ -937,7 +935,7 @@ def is_resolvable(name):
should also not exist. The random entry will be resolved inside
the search list.
"""
- global _DNS_REDIRECT_IP # pylint: disable=W0603
+ global _DNS_REDIRECT_IP
if _DNS_REDIRECT_IP is None:
badips = set()
badnames = ("does-not-exist.example.com.", "example.invalid.",
@@ -1148,7 +1146,7 @@ def chownbyname(fname, user=None, group=None):
# this returns the specific 'mode' entry, cleanly formatted, with value
def get_output_cfg(cfg, mode):
ret = [None, None]
- if not cfg or not 'output' in cfg:
+ if cfg or 'output' not in cfg:
return ret
outcfg = cfg['output']
@@ -1532,7 +1530,7 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
(out, err) = sp.communicate(data)
except OSError as e:
raise ProcessExecutionError(cmd=args, reason=e)
- rc = sp.returncode # pylint: disable=E1101
+ rc = sp.returncode
if rc not in rcs:
raise ProcessExecutionError(stdout=out, stderr=err,
exit_code=rc,
@@ -1745,7 +1743,7 @@ def parse_mount_info(path, mountinfo_lines, log=LOG):
# Ignore mount points higher than an already seen mount
# point.
if (match_mount_point_elements is not None and
- len(match_mount_point_elements) > len(mount_point_elements)):
+ len(match_mount_point_elements) > len(mount_point_elements)):
continue
# Find the '-' which terminates a list of optional columns to
diff --git a/packages/debian/control.in b/packages/debian/control.in
index c892747c..9207e5f4 100644
--- a/packages/debian/control.in
+++ b/packages/debian/control.in
@@ -9,7 +9,6 @@ Build-Depends: debhelper (>= 9),
python (>= 2.6.6-3~),
python-nose,
pyflakes,
- pylint,
python-setuptools,
python-selinux,
python-cheetah,
diff --git a/pylintrc b/pylintrc
deleted file mode 100644
index ee886510..00000000
--- a/pylintrc
+++ /dev/null
@@ -1,19 +0,0 @@
-[General]
-init-hook='import sys; sys.path.append("tests/")'
-
-[MESSAGES CONTROL]
-# See: http://pylint-messages.wikidot.com/all-codes
-# W0142: *args and **kwargs are fine.
-# W0511: TODOs in code comments are fine.
-# W0702: No exception type(s) specified
-# W0703: Catch "Exception"
-# C0103: Invalid name
-# C0111: Missing docstring
-disable=W0142,W0511,W0702,W0703,C0103,C0111
-
-[REPORTS]
-reports=no
-include-ids=yes
-
-[FORMAT]
-max-line-length=79
diff --git a/setup.py b/setup.py
index 4d20f16c..bd41bc91 100755
--- a/setup.py
+++ b/setup.py
@@ -46,7 +46,7 @@ def tiny_p(cmd, capture=True):
sp = subprocess.Popen(cmd, stdout=stdout,
stderr=stderr, stdin=None)
(out, err) = sp.communicate()
- ret = sp.returncode # pylint: disable=E1101
+ ret = sp.returncode
if ret not in [0]:
raise RuntimeError("Failed running %s [rc=%s] (%s, %s)"
% (cmd, ret, out, err))
diff --git a/test-requirements.txt b/test-requirements.txt
index 4be0211d..2edb8066 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,6 +1,5 @@
httpretty>=0.7.1
mocker
nose
-pep8
+pep8==1.5.7
pyflakes
-pylint
diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py
index 03065c8b..17965488 100644
--- a/tests/unittests/test__init__.py
+++ b/tests/unittests/test__init__.py
@@ -18,8 +18,7 @@ class FakeModule(handlers.Handler):
def list_types(self):
return self.types
- def handle_part(self, _data, ctype, filename, # pylint: disable=W0221
- payload, frequency):
+ def handle_part(self, data, ctype, filename, payload, frequency):
pass
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 88c82d5e..e992a006 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -235,7 +235,7 @@ class TestAzureDataSource(MockerTestCase):
self.assertEqual(dsrc.userdata_raw, mydata)
def test_no_datasource_expected(self):
- #no source should be found if no seed_dir and no devs
+ # no source should be found if no seed_dir and no devs
data = {}
dsrc = self._get_ds({})
ret = dsrc.get_data()
diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py
index 14274562..8bcc026c 100644
--- a/tests/unittests/test_datasource/test_nocloud.py
+++ b/tests/unittests/test_datasource/test_nocloud.py
@@ -50,7 +50,7 @@ class TestNoCloudDataSource(MockerTestCase):
self.assertTrue(ret)
def test_fs_label(self):
- #find_devs_with should not be called ff fs_label is None
+ # find_devs_with should not be called ff fs_label is None
ds = DataSourceNoCloud.DataSourceNoCloud
class PsuedoException(Exception):
@@ -74,7 +74,7 @@ class TestNoCloudDataSource(MockerTestCase):
self.assertFalse(ret)
def test_no_datasource_expected(self):
- #no source should be found if no cmdline, config, and fs_label=None
+ # no source should be found if no cmdline, config, and fs_label=None
sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
ds = DataSourceNoCloud.DataSourceNoCloud
diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py
index f43cbec8..530fba20 100644
--- a/tests/unittests/test_datasource/test_openstack.py
+++ b/tests/unittests/test_datasource/test_openstack.py
@@ -67,8 +67,8 @@ OSTACK_META = {
CONTENT_0 = 'This is contents of /etc/foo.cfg\n'
CONTENT_1 = '# this is /etc/bar/bar.cfg\n'
OS_FILES = {
- 'openstack/2012-08-10/meta_data.json': json.dumps(OSTACK_META),
- 'openstack/2012-08-10/user_data': USER_DATA,
+ 'openstack/latest/meta_data.json': json.dumps(OSTACK_META),
+ 'openstack/latest/user_data': USER_DATA,
'openstack/content/0000': CONTENT_0,
'openstack/content/0001': CONTENT_1,
'openstack/latest/meta_data.json': json.dumps(OSTACK_META),
@@ -78,6 +78,9 @@ OS_FILES = {
EC2_FILES = {
'latest/user-data': USER_DATA,
}
+EC2_VERSIONS = [
+ 'latest',
+]
def _register_uris(version, ec2_files, ec2_meta, os_files):
@@ -85,6 +88,9 @@ def _register_uris(version, ec2_files, ec2_meta, os_files):
same data returned by the openstack metadata service (and ec2 service)."""
def match_ec2_url(uri, headers):
+ path = uri.path.strip("/")
+ if len(path) == 0:
+ return (200, headers, "\n".join(EC2_VERSIONS))
path = uri.path.lstrip("/")
if path in ec2_files:
return (200, headers, ec2_files.get(path))
@@ -110,11 +116,20 @@ def _register_uris(version, ec2_files, ec2_meta, os_files):
return (200, headers, str(value))
return (404, headers, '')
- def get_request_callback(method, uri, headers):
- uri = urlparse(uri)
+ def match_os_uri(uri, headers):
+ path = uri.path.strip("/")
+ if path == 'openstack':
+ return (200, headers, "\n".join([openstack.OS_LATEST]))
path = uri.path.lstrip("/")
if path in os_files:
return (200, headers, os_files.get(path))
+ return (404, headers, '')
+
+ def get_request_callback(method, uri, headers):
+ uri = urlparse(uri)
+ path = uri.path.lstrip("/").split("/")
+ if path[0] == 'openstack':
+ return match_os_uri(uri, headers)
return match_ec2_url(uri, headers)
hp.register_uri(hp.GET, re.compile(r'http://169.254.169.254/.*'),
diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py
index c24c790e..db6aa0e8 100644
--- a/tests/unittests/test_distros/test_generic.py
+++ b/tests/unittests/test_distros/test_generic.py
@@ -26,8 +26,8 @@ package_mirrors = [
unknown_arch_info
]
-gpmi = distros._get_package_mirror_info # pylint: disable=W0212
-gapmi = distros._get_arch_package_mirror_info # pylint: disable=W0212
+gpmi = distros._get_package_mirror_info
+gapmi = distros._get_arch_package_mirror_info
class TestGenericDistro(helpers.FilesystemMockingTestCase):
@@ -193,7 +193,7 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase):
'security': 'http://security-mirror2-intel'})
-#def _get_package_mirror_info(mirror_info, availability_zone=None,
+# def _get_package_mirror_info(mirror_info, availability_zone=None,
# mirror_filter=util.search_for_mirror):
diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py
index f6dc4521..5d0636d1 100644
--- a/tests/unittests/test_handler/test_handler_growpart.py
+++ b/tests/unittests/test_handler/test_handler_growpart.py
@@ -53,7 +53,7 @@ class TestDisabled(MockerTestCase):
self.handle = cc_growpart.handle
def test_mode_off(self):
- #Test that nothing is done if mode is off.
+ # Test that nothing is done if mode is off.
# this really only verifies that resizer_factory isn't called
config = {'growpart': {'mode': 'off'}}
@@ -109,7 +109,7 @@ class TestConfig(MockerTestCase):
self.assertTrue(isinstance(ret, cc_growpart.ResizeGrowPart))
def test_handle_with_no_growpart_entry(self):
- #if no 'growpart' entry in config, then mode=auto should be used
+ # if no 'growpart' entry in config, then mode=auto should be used
myresizer = object()
@@ -141,7 +141,7 @@ class TestResize(MockerTestCase):
self.mocker.order()
def test_simple_devices(self):
- #test simple device list
+ # test simple device list
# this patches out devent2dev, os.stat, and device_part_info
# so in the end, doesn't test a lot
devs = ["/dev/XXda1", "/dev/YYda2"]
@@ -187,7 +187,7 @@ class TestResize(MockerTestCase):
find("/dev/YYda2", resized)[1])
self.assertEqual(cc_growpart.RESIZE.SKIPPED,
find(enoent[0], resized)[1])
- #self.assertEqual(resize_calls,
+ # self.assertEqual(resize_calls,
# [("/dev/XXda", "1", "/dev/XXda1"),
# ("/dev/YYda", "2", "/dev/YYda2")])
finally:
@@ -203,8 +203,6 @@ def simple_device_part_info(devpath):
class Bunch(object):
- st_mode = None # fix pylint complaint
-
def __init__(self, **kwds):
self.__dict__.update(kwds)
diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/test_handler/test_handler_power_state.py
index 4b7b2112..2f86b8f8 100644
--- a/tests/unittests/test_handler/test_handler_power_state.py
+++ b/tests/unittests/test_handler/test_handler_power_state.py
@@ -67,7 +67,7 @@ def check_lps_ret(psc_return, mode=None):
cmd = psc_return[0]
timeout = psc_return[1]
- if not 'shutdown' in psc_return[0][0]:
+ if 'shutdown' not in psc_return[0][0]:
errs.append("string 'shutdown' not in cmd")
if mode is not None:
diff --git a/tests/unittests/test_handler/test_handler_yum_add_repo.py b/tests/unittests/test_handler/test_handler_yum_add_repo.py
index 156441c7..21b89c34 100644
--- a/tests/unittests/test_handler/test_handler_yum_add_repo.py
+++ b/tests/unittests/test_handler/test_handler_yum_add_repo.py
@@ -24,7 +24,7 @@ class TestConfig(helpers.FilesystemMockingTestCase):
'epel-testing': {
'name': 'Extra Packages for Enterprise Linux 5 - Testing',
# Missing this should cause the repo not to be written
- #'baseurl': 'http://blah.org/pub/epel/testing/5/$basearch',
+ # 'baseurl': 'http://blah.org/pub/epel/testing/5/$basearch',
'enabled': False,
'gpgcheck': True,
'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL',
diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py
index 17704f8e..07b610f7 100644
--- a/tests/unittests/test_merging.py
+++ b/tests/unittests/test_merging.py
@@ -11,7 +11,7 @@ import glob
import os
import random
import re
-import string # pylint: disable=W0402
+import string
SOURCE_PAT = "source*.*yaml"
EXPECTED_PAT = "expected%s.yaml"
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 0cb41520..35e92445 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -1,5 +1,3 @@
-# pylint: disable=C0301
-# the mountinfo data lines are too long
import os
import stat
import yaml
@@ -18,7 +16,7 @@ class FakeSelinux(object):
self.match_what = match_what
self.restored = []
- def matchpathcon(self, path, mode): # pylint: disable=W0613
+ def matchpathcon(self, path, mode):
if path == self.match_what:
return
else:
@@ -27,7 +25,7 @@ class FakeSelinux(object):
def is_selinux_enabled(self):
return True
- def restorecon(self, path, recursive): # pylint: disable=W0613
+ def restorecon(self, path, recursive):
self.restored.append(path)
diff --git a/tools/hacking.py b/tools/hacking.py
index 26a07c53..e7797564 100755
--- a/tools/hacking.py
+++ b/tools/hacking.py
@@ -71,7 +71,7 @@ def cloud_import_alphabetical(physical_line, line_number, lines):
# with or without "as y"
length = [2, 4]
if (len(split_line) in length and len(split_previous) in length and
- split_line[0] == "import" and split_previous[0] == "import"):
+ split_line[0] == "import" and split_previous[0] == "import"):
if split_line[1] < split_previous[1]:
return (0, "N306: imports not in alphabetical order (%s, %s)"
% (split_previous[1], split_line[1]))
@@ -154,7 +154,7 @@ def add_cloud():
if not inspect.isfunction(function):
continue
if name.startswith("cloud_"):
- exec("pep8.%s = %s" % (name, name)) # pylint: disable=W0122
+ exec("pep8.%s = %s" % (name, name))
if __name__ == "__main__":
# NOVA based 'hacking.py' error codes start with an N
@@ -163,7 +163,7 @@ if __name__ == "__main__":
pep8.current_file = current_file
pep8.readlines = readlines
try:
- pep8._main() # pylint: disable=W0212
+ pep8._main()
finally:
if len(_missingImport) > 0:
print >> sys.stderr, ("%i imports missing in this test environment"
diff --git a/tools/mock-meta.py b/tools/mock-meta.py
index c79f0598..dfbc2a71 100755
--- a/tools/mock-meta.py
+++ b/tools/mock-meta.py
@@ -23,7 +23,7 @@ import json
import logging
import os
import random
-import string # pylint: disable=W0402
+import string
import sys
import yaml
@@ -306,7 +306,7 @@ class UserDataHandler(object):
blob = "\n".join(lines)
return blob.strip()
- def get_data(self, params, who, **kwargs): # pylint: disable=W0613
+ def get_data(self, params, who, **kwargs):
if not params:
return self._get_user_blob(who=who)
return NOT_IMPL_RESPONSE
@@ -427,8 +427,8 @@ def extract_opts():
def setup_fetchers(opts):
- global meta_fetcher # pylint: disable=W0603
- global user_fetcher # pylint: disable=W0603
+ global meta_fetcher
+ global user_fetcher
meta_fetcher = MetaDataHandler(opts)
user_fetcher = UserDataHandler(opts)
diff --git a/tools/run-pep8 b/tools/run-pep8
index cfce5edd..d0a131f6 100755
--- a/tools/run-pep8
+++ b/tools/run-pep8
@@ -13,7 +13,7 @@ else
base=`pwd`/tools/
fi
-IGNORE="E501" # Line too long (these are caught by pylint)
+IGNORE=""
# King Arthur: Be quiet! ... Be Quiet! I Order You to Be Quiet.
IGNORE="$IGNORE,E121" # Continuation line indentation is not a multiple of four
diff --git a/tools/run-pylint b/tools/run-pylint
deleted file mode 100755
index 0fe0c64a..00000000
--- a/tools/run-pylint
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-
-if [ $# -eq 0 ]; then
- files=( bin/cloud-init $(find * -name "*.py" -type f) )
-else
- files=( "$@" );
-fi
-
-RC_FILE="pylintrc"
-if [ ! -f $RC_FILE ]; then
- RC_FILE="../pylintrc"
-fi
-
-cmd=(
- pylint
- --rcfile=$RC_FILE
- --disable=R
- --disable=I
- --dummy-variables-rgx="_"
- "${files[@]}"
-)
-
-echo -e "\nRunning pylint:"
-echo "${cmd[@]}"
-"${cmd[@]}"
-