summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorScott Moser <smoser@ubuntu.com>2016-03-04 01:55:28 -0500
committerScott Moser <smoser@ubuntu.com>2016-03-04 01:55:28 -0500
commit1a9a408a7bf6d7a1a06254ad2939bd549acba69b (patch)
treeec4f76ea5260e269c83addbfafa4376ea2678059
parentbbf105baafbe788f7babbda188b513180424e256 (diff)
parent8092805079d011093724d87e9485e5bf79479a72 (diff)
downloadvyos-cloud-init-1a9a408a7bf6d7a1a06254ad2939bd549acba69b.tar.gz
vyos-cloud-init-1a9a408a7bf6d7a1a06254ad2939bd549acba69b.zip
merge with trunk
-rw-r--r--ChangeLog15
-rw-r--r--Makefile46
-rwxr-xr-xbin/cloud-init43
-rw-r--r--cloudinit/config/cc_apt_configure.py6
-rw-r--r--cloudinit/config/cc_disk_setup.py31
-rw-r--r--cloudinit/config/cc_final_message.py6
-rw-r--r--cloudinit/config/cc_grub_dpkg.py9
-rw-r--r--cloudinit/config/cc_keys_to_console.py2
-rw-r--r--cloudinit/config/cc_landscape.py12
-rw-r--r--cloudinit/config/cc_lxd.py85
-rw-r--r--cloudinit/config/cc_mounts.py12
-rw-r--r--cloudinit/config/cc_power_state_change.py2
-rw-r--r--cloudinit/config/cc_puppet.py6
-rw-r--r--cloudinit/config/cc_resizefs.py2
-rw-r--r--cloudinit/config/cc_rh_subscription.py4
-rw-r--r--cloudinit/config/cc_set_hostname.py2
-rw-r--r--cloudinit/config/cc_ssh.py7
-rw-r--r--cloudinit/config/cc_update_etc_hosts.py6
-rw-r--r--cloudinit/config/cc_update_hostname.py2
-rw-r--r--cloudinit/config/cc_yum_add_repo.py2
-rw-r--r--cloudinit/distros/__init__.py18
-rw-r--r--cloudinit/distros/arch.py6
-rw-r--r--cloudinit/distros/debian.py5
-rw-r--r--cloudinit/distros/freebsd.py4
-rw-r--r--cloudinit/distros/gentoo.py4
-rw-r--r--cloudinit/distros/parsers/hostname.py2
-rw-r--r--cloudinit/distros/parsers/resolv_conf.py2
-rw-r--r--cloudinit/distros/parsers/sys_conf.py7
-rw-r--r--cloudinit/filters/launch_index.py2
-rw-r--r--cloudinit/helpers.py33
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py2
-rw-r--r--cloudinit/sources/DataSourceAzure.py23
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py2
-rw-r--r--cloudinit/sources/DataSourceDigitalOcean.py4
-rw-r--r--cloudinit/sources/DataSourceEc2.py12
-rw-r--r--cloudinit/sources/DataSourceMAAS.py17
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py4
-rw-r--r--cloudinit/sources/DataSourceNone.py4
-rw-r--r--cloudinit/sources/DataSourceOVF.py24
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py7
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py262
-rw-r--r--cloudinit/sources/helpers/azure.py26
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py11
-rw-r--r--cloudinit/ssh_util.py3
-rw-r--r--cloudinit/stages.py18
-rw-r--r--cloudinit/url_helper.py6
-rw-r--r--cloudinit/util.py23
-rw-r--r--config/cloud.cfg1
-rw-r--r--doc/examples/cloud-config-datasources.txt7
-rw-r--r--doc/examples/cloud-config-lxd.txt28
-rw-r--r--doc/examples/cloud-config-mount-points.txt2
-rw-r--r--doc/examples/cloud-config-rsyslog.txt4
-rw-r--r--doc/examples/cloud-config-user-groups.txt6
-rwxr-xr-xpackages/bddeb11
-rw-r--r--packages/debian/cloud-init.postinst16
-rw-r--r--packages/debian/cloud-init.preinst20
-rw-r--r--packages/debian/control.in3
-rwxr-xr-xpackages/debian/rules.in14
-rwxr-xr-xsetup.py77
-rw-r--r--systemd/cloud-config.service2
-rw-r--r--systemd/cloud-final.service3
-rwxr-xr-xsystemd/cloud-init-generator128
-rw-r--r--systemd/cloud-init-local.service2
-rw-r--r--systemd/cloud-init.service2
-rw-r--r--systemd/cloud-init.target6
-rw-r--r--tests/unittests/test_data.py5
-rw-r--r--tests/unittests/test_datasource/test_altcloud.py23
-rw-r--r--tests/unittests/test_datasource/test_azure.py15
-rw-r--r--tests/unittests/test_datasource/test_azure_helper.py78
-rw-r--r--tests/unittests/test_datasource/test_configdrive.py32
-rw-r--r--tests/unittests/test_datasource/test_maas.py16
-rw-r--r--tests/unittests/test_datasource/test_opennebula.py2
-rw-r--r--tests/unittests/test_datasource/test_smartos.py84
-rw-r--r--tests/unittests/test_distros/test_user_data_normalize.py29
-rw-r--r--tests/unittests/test_handler/test_handler_lxd.py75
-rw-r--r--tests/unittests/test_handler/test_handler_power_state.py5
-rw-r--r--tests/unittests/test_handler/test_handler_seed_random.py3
-rw-r--r--tests/unittests/test_handler/test_handler_snappy.py3
-rw-r--r--tests/unittests/test_sshutil.py3
-rw-r--r--tests/unittests/test_templating.py3
-rwxr-xr-xtools/hacking.py16
-rwxr-xr-xtools/mock-meta.py27
-rwxr-xr-xtools/run-pep851
-rwxr-xr-xtools/run-pyflakes18
-rwxr-xr-xtools/run-pyflakes32
-rwxr-xr-xtools/validate-yaml.py2
-rw-r--r--tox.ini19
87 files changed, 1107 insertions, 567 deletions
diff --git a/ChangeLog b/ChangeLog
index f41412fa..2f1f9f87 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -69,6 +69,21 @@
as install from server or desktop ISO. (LP: #1177432)
- cc_mounts: use 'nofail' if system uses systemd. (LP: #1514485)
- Azure: get instance id from dmi instead of SharedConfig (LP: #1506187)
+ - systemd/power_state: fix power_state to work even if cloud-final
+ exited non-zero (LP: #1449318)
+ - SmartOS: Add support for Joyent LX-Brand Zones (LP: #1540965)
+ [Robert C Jennings]
+ - systemd: support using systemd-detect-virt to detect container
+ (LP: #1539016) [Martin Pitt]
+ - docs: fix lock_passwd documentation [Robert C Jennings]
+ - Azure: Handle escaped quotes in WALinuxAgentShim.find_endpoint.
+ (LP: #1488891) [Dan Watkins]
+ - lxd: add support for setting up lxd using 'lxd init' (LP: #1522879)
+ - Add Image Customization Parser for VMware vSphere Hypervisor
+ Support. [Sankar Tanguturi]
+ - timezone: use a symlink rather than copy for /etc/localtime
+ unless it is already a file (LP: #1543025).
+
0.7.6:
- open 0.7.6
- Enable vendordata on CloudSigma datasource (LP: #1303986)
diff --git a/Makefile b/Makefile
index bb0c5253..32c50aee 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
CWD=$(shell pwd)
-PY_FILES=$(shell find cloudinit bin tests tools -name "*.py" -type f )
-PY_FILES+="bin/cloud-init"
+PYVER ?= 3
+noseopts ?= -v
YAML_FILES=$(shell find cloudinit bin tests tools -name "*.yaml" -type f )
YAML_FILES+=$(shell find doc/examples -name "cloud-config*.txt" -type f )
@@ -10,17 +10,42 @@ CODE_VERSION=$(shell python -c "from cloudinit import version; print version.ver
PIP_INSTALL := pip install
+ifeq ($(PYVER),3)
+ pyflakes = pyflakes3
+ unittests = unittest3
+ yaml = yaml
+else
+ifeq ($(PYVER),2)
+ pyflakes = pyflakes
+ unittests = unittest
+else
+ pyflakes = pyflakes pyflakes3
+ unittests = unittest unittest3
+endif
+endif
+
ifeq ($(distro),)
distro = redhat
endif
-all: test check_version
+all: check
+
+check: check_version pep8 $(pyflakes) test $(yaml)
pep8:
- @$(CWD)/tools/run-pep8 $(PY_FILES)
+ @$(CWD)/tools/run-pep8
pyflakes:
- @$(CWD)/tools/tox-venv py34 pyflakes $(PY_FILES)
+ @$(CWD)/tools/run-pyflakes
+
+pyflakes3:
+ @$(CWD)/tools/run-pyflakes3
+
+unittest: clean_pyc
+ nosetests $(noseopts) tests/unittests
+
+unittest3: clean_pyc
+ nosetests3 $(noseopts) tests/unittests
pip-requirements:
@echo "Installing cloud-init dependencies..."
@@ -30,9 +55,7 @@ pip-test-requirements:
@echo "Installing cloud-init test dependencies..."
$(PIP_INSTALL) -r "$@.txt" -q
-test: clean_pyc
- @echo "Running tests..."
- @nosetests $(noseopts) tests/
+test: $(unittests)
check_version:
@if [ "$(CHANGELOG_VERSION)" != "$(CODE_VERSION)" ]; then \
@@ -43,9 +66,6 @@ check_version:
clean_pyc:
@find . -type f -name "*.pyc" -delete
-2to3:
- 2to3 $(PY_FILES)
-
clean: clean_pyc
rm -rf /var/log/cloud-init.log /var/lib/cloud/
@@ -58,5 +78,5 @@ rpm:
deb:
./packages/bddeb
-.PHONY: test pyflakes 2to3 clean pep8 rpm deb yaml check_version
-.PHONY: pip-test-requirements pip-requirements clean_pyc
+.PHONY: test pyflakes pyflakes3 clean pep8 rpm deb yaml check_version
+.PHONY: pip-test-requirements pip-requirements clean_pyc unittest unittest3
diff --git a/bin/cloud-init b/bin/cloud-init
index 9b90c45e..7f665e7e 100755
--- a/bin/cloud-init
+++ b/bin/cloud-init
@@ -194,7 +194,7 @@ def main_init(name, args):
if args.debug:
# Reset so that all the debug handlers are closed out
LOG.debug(("Logging being reset, this logger may no"
- " longer be active shortly"))
+ " longer be active shortly"))
logging.resetLogging()
logging.setupLogging(init.cfg)
apply_reporting_cfg(init.cfg)
@@ -276,9 +276,9 @@ def main_init(name, args):
# This may run user-data handlers and/or perform
# url downloads and such as needed.
(ran, _results) = init.cloudify().run('consume_data',
- init.consume_data,
- args=[PER_INSTANCE],
- freq=PER_INSTANCE)
+ init.consume_data,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE)
if not ran:
# Just consume anything that is set to run per-always
# if nothing ran in the per-instance code
@@ -349,7 +349,7 @@ def main_modules(action_name, args):
if args.debug:
# Reset so that all the debug handlers are closed out
LOG.debug(("Logging being reset, this logger may no"
- " longer be active shortly"))
+ " longer be active shortly"))
logging.resetLogging()
logging.setupLogging(mods.cfg)
apply_reporting_cfg(init.cfg)
@@ -534,7 +534,8 @@ def status_wrapper(name, args, data_d=None, link_d=None):
errors.extend(v1[m].get('errors', []))
atomic_write_json(result_path,
- {'v1': {'datasource': v1['datasource'], 'errors': errors}})
+ {'v1': {'datasource': v1['datasource'],
+ 'errors': errors}})
util.sym_link(os.path.relpath(result_path, link_d), result_link,
force=True)
@@ -578,13 +579,13 @@ def main():
# These settings are used for the 'config' and 'final' stages
parser_mod = subparsers.add_parser('modules',
- help=('activates modules '
- 'using a given configuration key'))
+ help=('activates modules using '
+ 'a given configuration key'))
parser_mod.add_argument("--mode", '-m', action='store',
- help=("module configuration name "
- "to use (default: %(default)s)"),
- default='config',
- choices=('init', 'config', 'final'))
+ help=("module configuration name "
+ "to use (default: %(default)s)"),
+ default='config',
+ choices=('init', 'config', 'final'))
parser_mod.set_defaults(action=('modules', main_modules))
# These settings are used when you want to query information
@@ -600,22 +601,22 @@ def main():
# This subcommand allows you to run a single module
parser_single = subparsers.add_parser('single',
- help=('run a single module '))
+ help=('run a single module '))
parser_single.set_defaults(action=('single', main_single))
parser_single.add_argument("--name", '-n', action="store",
- help="module name to run",
- required=True)
+ help="module name to run",
+ required=True)
parser_single.add_argument("--frequency", action="store",
- help=("frequency of the module"),
- required=False,
- choices=list(FREQ_SHORT_NAMES.keys()))
+ help=("frequency of the module"),
+ required=False,
+ choices=list(FREQ_SHORT_NAMES.keys()))
parser_single.add_argument("--report", action="store_true",
help="enable reporting",
required=False)
parser_single.add_argument("module_args", nargs="*",
- metavar='argument',
- help=('any additional arguments to'
- ' pass to this module'))
+ metavar='argument',
+ help=('any additional arguments to'
+ ' pass to this module'))
parser_single.set_defaults(action=('single', main_single))
args = parser.parse_args()
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index 9e9e9e26..702977cb 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -91,7 +91,8 @@ def handle(name, cfg, cloud, log, _args):
if matchcfg:
matcher = re.compile(matchcfg).search
else:
- matcher = lambda f: False
+ def matcher(x):
+ return False
errors = add_sources(cfg['apt_sources'], params,
aa_repo_match=matcher)
@@ -173,7 +174,8 @@ def add_sources(srclist, template_params=None, aa_repo_match=None):
template_params = {}
if aa_repo_match is None:
- aa_repo_match = lambda f: False
+ def aa_repo_match(x):
+ return False
errorlist = []
for ent in srclist:
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index d5b0d1d7..0ecc2e4c 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -167,11 +167,12 @@ def enumerate_disk(device, nodeps=False):
parts = [x for x in (info.strip()).splitlines() if len(x.split()) > 0]
for part in parts:
- d = {'name': None,
- 'type': None,
- 'fstype': None,
- 'label': None,
- }
+ d = {
+ 'name': None,
+ 'type': None,
+ 'fstype': None,
+ 'label': None,
+ }
for key, value in value_splitter(part):
d[key.lower()] = value
@@ -701,11 +702,12 @@ def lookup_force_flag(fs):
"""
A force flag might be -F or -F, this look it up
"""
- flags = {'ext': '-F',
- 'btrfs': '-f',
- 'xfs': '-f',
- 'reiserfs': '-f',
- }
+ flags = {
+ 'ext': '-F',
+ 'btrfs': '-f',
+ 'xfs': '-f',
+ 'reiserfs': '-f',
+ }
if 'ext' in fs.lower():
fs = 'ext'
@@ -824,10 +826,11 @@ def mkfs(fs_cfg):
# Create the commands
if fs_cmd:
- fs_cmd = fs_cfg['cmd'] % {'label': label,
- 'filesystem': fs_type,
- 'device': device,
- }
+ fs_cmd = fs_cfg['cmd'] % {
+ 'label': label,
+ 'filesystem': fs_type,
+ 'device': device,
+ }
else:
# Find the mkfs command
mkfs_cmd = util.which("mkfs.%s" % fs_type)
diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py
index ad957e12..4a51476f 100644
--- a/cloudinit/config/cc_final_message.py
+++ b/cloudinit/config/cc_final_message.py
@@ -28,9 +28,9 @@ frequency = PER_ALWAYS
# Jinja formated default message
FINAL_MESSAGE_DEF = (
- "## template: jinja\n"
- "Cloud-init v. {{version}} finished at {{timestamp}}."
- " Datasource {{datasource}}. Up {{uptime}} seconds"
+ "## template: jinja\n"
+ "Cloud-init v. {{version}} finished at {{timestamp}}."
+ " Datasource {{datasource}}. Up {{uptime}} seconds"
)
diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py
index 456597af..3c2d9985 100644
--- a/cloudinit/config/cc_grub_dpkg.py
+++ b/cloudinit/config/cc_grub_dpkg.py
@@ -37,12 +37,11 @@ def handle(name, cfg, _cloud, log, _args):
return
idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None)
- idevs_empty = util.get_cfg_option_str(mycfg,
- "grub-pc/install_devices_empty", None)
+ idevs_empty = util.get_cfg_option_str(
+ mycfg, "grub-pc/install_devices_empty", None)
if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or
- (os.path.exists("/dev/xvda1")
- and not os.path.exists("/dev/xvda"))):
+ (os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda"))):
if idevs is None:
idevs = ""
if idevs_empty is None:
@@ -66,7 +65,7 @@ def handle(name, cfg, _cloud, log, _args):
(idevs, idevs_empty))
log.debug("Setting grub debconf-set-selections with '%s','%s'" %
- (idevs, idevs_empty))
+ (idevs, idevs_empty))
try:
util.subp(['debconf-set-selections'], dconf_sel)
diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py
index f1c1adff..aa844ee9 100644
--- a/cloudinit/config/cc_keys_to_console.py
+++ b/cloudinit/config/cc_keys_to_console.py
@@ -48,7 +48,7 @@ def handle(name, cfg, cloud, log, _args):
"ssh_fp_console_blacklist", [])
key_blacklist = util.get_cfg_option_list(cfg,
"ssh_key_console_blacklist",
- ["ssh-dss"])
+ ["ssh-dss"])
try:
cmd = [helper_path]
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index 0b9d846e..68fcb27f 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -38,12 +38,12 @@ distros = ['ubuntu']
# defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2
LSC_BUILTIN_CFG = {
- 'client': {
- 'log_level': "info",
- 'url': "https://landscape.canonical.com/message-system",
- 'ping_url': "http://landscape.canonical.com/ping",
- 'data_path': "/var/lib/landscape/client",
- }
+ 'client': {
+ 'log_level': "info",
+ 'url': "https://landscape.canonical.com/message-system",
+ 'ping_url': "http://landscape.canonical.com/ping",
+ 'data_path': "/var/lib/landscape/client",
+ }
}
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
new file mode 100644
index 00000000..63b8fb63
--- /dev/null
+++ b/cloudinit/config/cc_lxd.py
@@ -0,0 +1,85 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2016 Canonical Ltd.
+#
+# Author: Wesley Wiedenmeier <wesley.wiedenmeier@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+This module initializes lxd using 'lxd init'
+
+Example config:
+ #cloud-config
+ lxd:
+ init:
+ network_address: <ip addr>
+ network_port: <port>
+ storage_backend: <zfs/dir>
+ storage_create_device: <dev>
+ storage_create_loop: <size>
+ storage_pool: <name>
+ trust_password: <password>
+"""
+
+from cloudinit import util
+
+
+def handle(name, cfg, cloud, log, args):
+ # Get config
+ lxd_cfg = cfg.get('lxd')
+ if not lxd_cfg:
+ log.debug("Skipping module named %s, not present or disabled by cfg")
+ return
+ if not isinstance(lxd_cfg, dict):
+ log.warn("lxd config must be a dictionary. found a '%s'",
+ type(lxd_cfg))
+ return
+
+ init_cfg = lxd_cfg.get('init')
+ if not isinstance(init_cfg, dict):
+ log.warn("lxd/init config must be a dictionary. found a '%s'",
+ type(init_cfg))
+ init_cfg = {}
+
+ if not init_cfg:
+ log.debug("no lxd/init config. disabled.")
+ return
+
+ packages = []
+ # Ensure lxd is installed
+ if not util.which("lxd"):
+ packages.append('lxd')
+
+ # if using zfs, get the utils
+ if init_cfg.get("storage_backend") == "zfs" and not util.which('zfs'):
+ packages.append('zfs')
+
+ if len(packages):
+ try:
+ cloud.distro.install_packages(packages)
+ except util.ProcessExecutionError as exc:
+ log.warn("failed to install packages %s: %s", packages, exc)
+ return
+
+ # Set up lxd if init config is given
+ init_keys = (
+ 'network_address', 'network_port', 'storage_backend',
+ 'storage_create_device', 'storage_create_loop',
+ 'storage_pool', 'trust_password')
+ cmd = ['lxd', 'init', '--auto']
+ for k in init_keys:
+ if init_cfg.get(k):
+ cmd.extend(["--%s=%s" %
+ (k.replace('_', '-'), str(init_cfg[k]))])
+ util.subp(cmd)
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index 11089d8d..4fe3ee21 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -204,12 +204,12 @@ def setup_swapfile(fname, size=None, maxsize=None):
try:
util.ensure_dir(tdir)
util.log_time(LOG.debug, msg, func=util.subp,
- args=[['sh', '-c',
- ('rm -f "$1" && umask 0066 && '
- '{ fallocate -l "${2}M" "$1" || '
- ' dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && '
- 'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'),
- 'setup_swap', fname, mbsize]])
+ args=[['sh', '-c',
+ ('rm -f "$1" && umask 0066 && '
+ '{ fallocate -l "${2}M" "$1" || '
+ ' dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && '
+ 'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'),
+ 'setup_swap', fname, mbsize]])
except Exception as e:
raise IOError("Failed %s: %s" % (msg, e))
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index 7d9567e3..cc3f7f70 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -105,7 +105,7 @@ def handle(_name, cfg, _cloud, log, _args):
log.debug("After pid %s ends, will execute: %s" % (mypid, ' '.join(args)))
- util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log,
+ util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log,
condition, execmd, [args, devnull_fp])
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index 4501598e..774d3322 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -36,8 +36,8 @@ def _autostart_puppet(log):
# Set puppet to automatically start
if os.path.exists('/etc/default/puppet'):
util.subp(['sed', '-i',
- '-e', 's/^START=.*/START=yes/',
- '/etc/default/puppet'], capture=False)
+ '-e', 's/^START=.*/START=yes/',
+ '/etc/default/puppet'], capture=False)
elif os.path.exists('/bin/systemctl'):
util.subp(['/bin/systemctl', 'enable', 'puppet.service'],
capture=False)
@@ -65,7 +65,7 @@ def handle(name, cfg, cloud, log, _args):
" doing nothing."))
elif install:
log.debug(("Attempting to install puppet %s,"),
- version if version else 'latest')
+ version if version else 'latest')
cloud.distro.install_packages(('puppet', version))
# ... and then update the puppet configuration
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index cbc07853..2a2a9f59 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -166,7 +166,7 @@ def handle(name, cfg, _cloud, log, args):
func=do_resize, args=(resize_cmd, log))
else:
util.log_time(logfunc=log.debug, msg="Resizing",
- func=do_resize, args=(resize_cmd, log))
+ func=do_resize, args=(resize_cmd, log))
action = 'Resized'
if resize_root == NOBLOCK:
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index 3b30c47e..6f474aed 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -127,8 +127,8 @@ class SubscriptionManager(object):
return False, not_bool
if (self.servicelevel is not None) and \
- ((not self.auto_attach)
- or (util.is_false(str(self.auto_attach)))):
+ ((not self.auto_attach) or
+ (util.is_false(str(self.auto_attach)))):
no_auto = ("The service-level key must be used in conjunction "
"with the auto-attach key. Please re-run with "
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index 5d7f4331..f43d8d5a 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -24,7 +24,7 @@ from cloudinit import util
def handle(name, cfg, cloud, log, _args):
if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
log.debug(("Configuration option 'preserve_hostname' is set,"
- " not setting the hostname in module %s"), name)
+ " not setting the hostname in module %s"), name)
return
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 5bd2dec6..d24e43c0 100644
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -30,9 +30,10 @@ from cloudinit import distros as ds
from cloudinit import ssh_util
from cloudinit import util
-DISABLE_ROOT_OPTS = ("no-port-forwarding,no-agent-forwarding,"
-"no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\" "
-"rather than the user \\\"root\\\".\';echo;sleep 10\"")
+DISABLE_ROOT_OPTS = (
+ "no-port-forwarding,no-agent-forwarding,"
+ "no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\""
+ " rather than the user \\\"root\\\".\';echo;sleep 10\"")
GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519']
KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key'
diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py
index d3dd1f32..15703efe 100644
--- a/cloudinit/config/cc_update_etc_hosts.py
+++ b/cloudinit/config/cc_update_etc_hosts.py
@@ -41,10 +41,10 @@ def handle(name, cfg, cloud, log, _args):
if not tpl_fn_name:
raise RuntimeError(("No hosts template could be"
" found for distro %s") %
- (cloud.distro.osfamily))
+ (cloud.distro.osfamily))
templater.render_to_file(tpl_fn_name, '/etc/hosts',
- {'hostname': hostname, 'fqdn': fqdn})
+ {'hostname': hostname, 'fqdn': fqdn})
elif manage_hosts == "localhost":
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
@@ -57,4 +57,4 @@ def handle(name, cfg, cloud, log, _args):
cloud.distro.update_etc_hosts(hostname, fqdn)
else:
log.debug(("Configuration option 'manage_etc_hosts' is not set,"
- " not managing /etc/hosts in module %s"), name)
+ " not managing /etc/hosts in module %s"), name)
diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py
index e396ba13..5b78afe1 100644
--- a/cloudinit/config/cc_update_hostname.py
+++ b/cloudinit/config/cc_update_hostname.py
@@ -29,7 +29,7 @@ frequency = PER_ALWAYS
def handle(name, cfg, cloud, log, _args):
if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
log.debug(("Configuration option 'preserve_hostname' is set,"
- " not updating the hostname in module %s"), name)
+ " not updating the hostname in module %s"), name)
return
(hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index 3b821af9..64fba869 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -92,7 +92,7 @@ def handle(name, cfg, _cloud, log, _args):
for req_field in ['baseurl']:
if req_field not in repo_config:
log.warn(("Repository %s does not contain a %s"
- " configuration 'required' entry"),
+ " configuration 'required' entry"),
repo_id, req_field)
missing_required += 1
if not missing_required:
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 71884b32..4bad8708 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -211,8 +211,8 @@ class Distro(object):
# If the system hostname is different than the previous
# one or the desired one lets update it as well
- if (not sys_hostname) or (sys_hostname == prev_hostname
- and sys_hostname != hostname):
+ if ((not sys_hostname) or (sys_hostname == prev_hostname and
+ sys_hostname != hostname)):
update_files.append(sys_fn)
# If something else has changed the hostname after we set it
@@ -221,7 +221,7 @@ class Distro(object):
if (sys_hostname and prev_hostname and
sys_hostname != prev_hostname):
LOG.info("%s differs from %s, assuming user maintained hostname.",
- prev_hostname_fn, sys_fn)
+ prev_hostname_fn, sys_fn)
return
# Remove duplicates (incase the previous config filename)
@@ -289,7 +289,7 @@ class Distro(object):
def _bring_up_interface(self, device_name):
cmd = ['ifup', device_name]
LOG.debug("Attempting to run bring up interface %s using command %s",
- device_name, cmd)
+ device_name, cmd)
try:
(_out, err) = util.subp(cmd)
if len(err):
@@ -548,7 +548,7 @@ class Distro(object):
for member in members:
if not util.is_user(member):
LOG.warn("Unable to add group member '%s' to group '%s'"
- "; user does not exist.", member, name)
+ "; user does not exist.", member, name)
continue
util.subp(['usermod', '-a', '-G', name, member])
@@ -886,7 +886,7 @@ def fetch(name):
locs, looked_locs = importer.find_module(name, ['', __name__], ['Distro'])
if not locs:
raise ImportError("No distribution found for distro %s (searched %s)"
- % (name, looked_locs))
+ % (name, looked_locs))
mod = importer.import_module(locs[0])
cls = getattr(mod, 'Distro')
return cls
@@ -897,5 +897,9 @@ def set_etc_timezone(tz, tz_file=None, tz_conf="/etc/timezone",
util.write_file(tz_conf, str(tz).rstrip() + "\n")
# This ensures that the correct tz will be used for the system
if tz_local and tz_file:
- util.copy(tz_file, tz_local)
+ # use a symlink if there exists a symlink or tz_local is not present
+ if os.path.islink(tz_local) or not os.path.exists(tz_local):
+ os.symlink(tz_file, tz_local)
+ else:
+ util.copy(tz_file, tz_local)
return
diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py
index 45fcf26f..93a2e008 100644
--- a/cloudinit/distros/arch.py
+++ b/cloudinit/distros/arch.py
@@ -74,7 +74,7 @@ class Distro(distros.Distro):
'Interface': dev,
'IP': info.get('bootproto'),
'Address': "('%s/%s')" % (info.get('address'),
- info.get('netmask')),
+ info.get('netmask')),
'Gateway': info.get('gateway'),
'DNS': str(tuple(info.get('dns-nameservers'))).replace(',', '')
}
@@ -86,7 +86,7 @@ class Distro(distros.Distro):
if nameservers:
util.write_file(self.resolve_conf_fn,
- convert_resolv_conf(nameservers))
+ convert_resolv_conf(nameservers))
return dev_names
@@ -102,7 +102,7 @@ class Distro(distros.Distro):
def _bring_up_interface(self, device_name):
cmd = ['netctl', 'restart', device_name]
LOG.debug("Attempting to run bring up interface %s using command %s",
- device_name, cmd)
+ device_name, cmd)
try:
(_out, err) = util.subp(cmd)
if len(err):
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index 6d3a82bf..db5890b1 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -159,8 +159,9 @@ class Distro(distros.Distro):
# Allow the output of this to flow outwards (ie not be captured)
util.log_time(logfunc=LOG.debug,
- msg="apt-%s [%s]" % (command, ' '.join(cmd)), func=util.subp,
- args=(cmd,), kwargs={'env': e, 'capture': False})
+ msg="apt-%s [%s]" % (command, ' '.join(cmd)),
+ func=util.subp,
+ args=(cmd,), kwargs={'env': e, 'capture': False})
def update_package_sources(self):
self._runner.run("update-sources", self.package_command,
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index 4c484639..72012056 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -205,8 +205,8 @@ class Distro(distros.Distro):
redact_opts = ['passwd']
for key, val in kwargs.items():
- if (key in adduser_opts and val
- and isinstance(val, six.string_types)):
+ if (key in adduser_opts and val and
+ isinstance(val, six.string_types)):
adduser_cmd.extend([adduser_opts[key], val])
# Redact certain fields from the logs
diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py
index 9e80583c..6267dd6e 100644
--- a/cloudinit/distros/gentoo.py
+++ b/cloudinit/distros/gentoo.py
@@ -66,7 +66,7 @@ class Distro(distros.Distro):
def _bring_up_interface(self, device_name):
cmd = ['/etc/init.d/net.%s' % device_name, 'restart']
LOG.debug("Attempting to run bring up interface %s using command %s",
- device_name, cmd)
+ device_name, cmd)
try:
(_out, err) = util.subp(cmd)
if len(err):
@@ -88,7 +88,7 @@ class Distro(distros.Distro):
(_out, err) = util.subp(cmd)
if len(err):
LOG.warn("Running %s resulted in stderr output: %s", cmd,
- err)
+ err)
except util.ProcessExecutionError:
util.logexc(LOG, "Running interface command %s failed", cmd)
return False
diff --git a/cloudinit/distros/parsers/hostname.py b/cloudinit/distros/parsers/hostname.py
index 84a1de42..efb185d4 100644
--- a/cloudinit/distros/parsers/hostname.py
+++ b/cloudinit/distros/parsers/hostname.py
@@ -84,5 +84,5 @@ class HostnameConf(object):
hostnames_found.add(head)
if len(hostnames_found) > 1:
raise IOError("Multiple hostnames (%s) found!"
- % (hostnames_found))
+ % (hostnames_found))
return entries
diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py
index 8aee03a4..2ed13d9c 100644
--- a/cloudinit/distros/parsers/resolv_conf.py
+++ b/cloudinit/distros/parsers/resolv_conf.py
@@ -132,7 +132,7 @@ class ResolvConf(object):
# Some hard limit on 256 chars total
raise ValueError(("Adding %r would go beyond the "
"256 maximum search list character limit")
- % (search_domain))
+ % (search_domain))
self._remove_option('search')
self._contents.append(('option', ['search', s_list, '']))
return flat_sds
diff --git a/cloudinit/distros/parsers/sys_conf.py b/cloudinit/distros/parsers/sys_conf.py
index d795e12f..6157cf32 100644
--- a/cloudinit/distros/parsers/sys_conf.py
+++ b/cloudinit/distros/parsers/sys_conf.py
@@ -77,8 +77,7 @@ class SysConf(configobj.ConfigObj):
quot_func = None
if value[0] in ['"', "'"] and value[-1] in ['"', "'"]:
if len(value) == 1:
- quot_func = (lambda x:
- self._get_single_quote(x) % x)
+ quot_func = (lambda x: self._get_single_quote(x) % x)
else:
# Quote whitespace if it isn't the start + end of a shell command
if value.strip().startswith("$(") and value.strip().endswith(")"):
@@ -91,10 +90,10 @@ class SysConf(configobj.ConfigObj):
# to use single quotes which won't get expanded...
if re.search(r"[\n\"']", value):
quot_func = (lambda x:
- self._get_triple_quote(x) % x)
+ self._get_triple_quote(x) % x)
else:
quot_func = (lambda x:
- self._get_single_quote(x) % x)
+ self._get_single_quote(x) % x)
else:
quot_func = pipes.quote
if not quot_func:
diff --git a/cloudinit/filters/launch_index.py b/cloudinit/filters/launch_index.py
index 5bebd318..baecdac9 100644
--- a/cloudinit/filters/launch_index.py
+++ b/cloudinit/filters/launch_index.py
@@ -61,7 +61,7 @@ class Filter(object):
discarded += 1
LOG.debug(("Discarding %s multipart messages "
"which do not match launch index %s"),
- discarded, self.wanted_idx)
+ discarded, self.wanted_idx)
new_message = copy.copy(message)
new_message.set_payload(new_msgs)
new_message[ud.ATTACHMENT_FIELD] = str(len(new_msgs))
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index 5e99d185..0cf982f3 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -139,9 +139,10 @@ class FileSemaphores(object):
# but the item had run before we did canon_sem_name.
if cname != name and os.path.exists(self._get_path(name, freq)):
LOG.warn("%s has run without canonicalized name [%s].\n"
- "likely the migrator has not yet run. It will run next boot.\n"
- "run manually with: cloud-init single --name=migrator"
- % (name, cname))
+ "likely the migrator has not yet run. "
+ "It will run next boot.\n"
+ "run manually with: cloud-init single --name=migrator"
+ % (name, cname))
return True
return False
@@ -335,19 +336,19 @@ class Paths(object):
template_dir = path_cfgs.get('templates_dir', '/etc/cloud/templates/')
self.template_tpl = os.path.join(template_dir, '%s.tmpl')
self.lookups = {
- "handlers": "handlers",
- "scripts": "scripts",
- "vendor_scripts": "scripts/vendor",
- "sem": "sem",
- "boothooks": "boothooks",
- "userdata_raw": "user-data.txt",
- "userdata": "user-data.txt.i",
- "obj_pkl": "obj.pkl",
- "cloud_config": "cloud-config.txt",
- "vendor_cloud_config": "vendor-cloud-config.txt",
- "data": "data",
- "vendordata_raw": "vendor-data.txt",
- "vendordata": "vendor-data.txt.i",
+ "handlers": "handlers",
+ "scripts": "scripts",
+ "vendor_scripts": "scripts/vendor",
+ "sem": "sem",
+ "boothooks": "boothooks",
+ "userdata_raw": "user-data.txt",
+ "userdata": "user-data.txt.i",
+ "obj_pkl": "obj.pkl",
+ "cloud_config": "cloud-config.txt",
+ "vendor_cloud_config": "vendor-cloud-config.txt",
+ "data": "data",
+ "vendordata_raw": "vendor-data.txt",
+ "vendordata": "vendor-data.txt.i",
}
# Set when a datasource becomes active
self.datasource = ds
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index 60d58d6d..cd61df31 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -284,7 +284,7 @@ class DataSourceAltCloud(sources.DataSource):
# In the future 'dsmode' like behavior can be added to offer user
# the ability to run before networking.
datasources = [
- (DataSourceAltCloud, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceAltCloud, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index bd80a8a6..2af0ad9b 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -38,7 +38,8 @@ LOG = logging.getLogger(__name__)
DS_NAME = 'Azure'
DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}
AGENT_START = ['service', 'walinuxagent', 'start']
-BOUNCE_COMMAND = ['sh', '-xc',
+BOUNCE_COMMAND = [
+ 'sh', '-xc',
"i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"]
BUILTIN_DS_CONFIG = {
@@ -91,9 +92,9 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
"""
policy = cfg['hostname_bounce']['policy']
previous_hostname = get_hostname(hostname_command)
- if (not util.is_true(cfg.get('set_hostname'))
- or util.is_false(policy)
- or (previous_hostname == temp_hostname and policy != 'force')):
+ if (not util.is_true(cfg.get('set_hostname')) or
+ util.is_false(policy) or
+ (previous_hostname == temp_hostname and policy != 'force')):
yield None
return
set_hostname(temp_hostname, hostname_command)
@@ -123,8 +124,8 @@ class DataSourceAzureNet(sources.DataSource):
with temporary_hostname(temp_hostname, self.ds_cfg,
hostname_command=hostname_command) \
as previous_hostname:
- if (previous_hostname is not None
- and util.is_true(self.ds_cfg.get('set_hostname'))):
+ if (previous_hostname is not None and
+ util.is_true(self.ds_cfg.get('set_hostname'))):
cfg = self.ds_cfg['hostname_bounce']
try:
perform_hostname_bounce(hostname=temp_hostname,
@@ -152,7 +153,8 @@ class DataSourceAzureNet(sources.DataSource):
else:
bname = str(pk['fingerprint'] + ".crt")
fp_files += [os.path.join(ddir, bname)]
- LOG.debug("ssh authentication: using fingerprint from fabirc")
+ LOG.debug("ssh authentication: "
+ "using fingerprint from fabirc")
missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
func=wait_for_files,
@@ -506,7 +508,7 @@ def read_azure_ovf(contents):
raise BrokenAzureDataSource("invalid xml: %s" % e)
results = find_child(dom.documentElement,
- lambda n: n.localName == "ProvisioningSection")
+ lambda n: n.localName == "ProvisioningSection")
if len(results) == 0:
raise NonAzureDataSource("No ProvisioningSection")
@@ -516,7 +518,8 @@ def read_azure_ovf(contents):
provSection = results[0]
lpcs_nodes = find_child(provSection,
- lambda n: n.localName == "LinuxProvisioningConfigurationSet")
+ lambda n:
+ n.localName == "LinuxProvisioningConfigurationSet")
if len(results) == 0:
raise NonAzureDataSource("No LinuxProvisioningConfigurationSet")
@@ -633,7 +636,7 @@ class NonAzureDataSource(Exception):
# Used to match classes to dependencies
datasources = [
- (DataSourceAzureNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceAzureNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index eb474079..e3916208 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -39,7 +39,7 @@ FS_TYPES = ('vfat', 'iso9660')
LABEL_TYPES = ('config-2',)
POSSIBLE_MOUNTS = ('sr', 'cd')
OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS
- for i in range(0, 2)))
+ for i in range(0, 2)))
class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
index 5d47564d..12e863d2 100644
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ b/cloudinit/sources/DataSourceDigitalOcean.py
@@ -101,8 +101,8 @@ class DataSourceDigitalOcean(sources.DataSource):
# Used to match classes to dependencies
datasources = [
- (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
- ]
+ (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
# Return a list of data sources that match this set of dependencies
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 0032d06c..3ef2c6af 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -61,12 +61,12 @@ class DataSourceEc2(sources.DataSource):
if not self.wait_for_metadata_service():
return False
start_time = time.time()
- self.userdata_raw = ec2.get_instance_userdata(self.api_ver,
- self.metadata_address)
+ self.userdata_raw = \
+ ec2.get_instance_userdata(self.api_ver, self.metadata_address)
self.metadata = ec2.get_instance_metadata(self.api_ver,
self.metadata_address)
LOG.debug("Crawl of metadata service took %s seconds",
- int(time.time() - start_time))
+ int(time.time() - start_time))
return True
except Exception:
util.logexc(LOG, "Failed reading from metadata address %s",
@@ -132,13 +132,13 @@ class DataSourceEc2(sources.DataSource):
start_time = time.time()
url = uhelp.wait_for_url(urls=urls, max_wait=max_wait,
- timeout=timeout, status_cb=LOG.warn)
+ timeout=timeout, status_cb=LOG.warn)
if url:
LOG.debug("Using metadata source: '%s'", url2base[url])
else:
LOG.critical("Giving up on md from %s after %s seconds",
- urls, int(time.time() - start_time))
+ urls, int(time.time() - start_time))
self.metadata_address = url2base.get(url)
return bool(url)
@@ -206,7 +206,7 @@ class DataSourceEc2(sources.DataSource):
# Used to match classes to dependencies
datasources = [
- (DataSourceEc2, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceEc2, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index cfc59ca5..d828f078 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -254,7 +254,7 @@ class MAASSeedDirMalformed(Exception):
# Used to match classes to dependencies
datasources = [
- (DataSourceMAAS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceMAAS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
@@ -275,17 +275,18 @@ if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Interact with MAAS DS')
parser.add_argument("--config", metavar="file",
- help="specify DS config file", default=None)
+ help="specify DS config file", default=None)
parser.add_argument("--ckey", metavar="key",
- help="the consumer key to auth with", default=None)
+ help="the consumer key to auth with", default=None)
parser.add_argument("--tkey", metavar="key",
- help="the token key to auth with", default=None)
+ help="the token key to auth with", default=None)
parser.add_argument("--csec", metavar="secret",
- help="the consumer secret (likely '')", default="")
+ help="the consumer secret (likely '')", default="")
parser.add_argument("--tsec", metavar="secret",
- help="the token secret to auth with", default=None)
+ help="the token secret to auth with", default=None)
parser.add_argument("--apiver", metavar="version",
- help="the apiver to use ("" can be used)", default=MD_VERSION)
+ help="the apiver to use ("" can be used)",
+ default=MD_VERSION)
subcmds = parser.add_subparsers(title="subcommands", dest="subcmd")
subcmds.add_parser('crawl', help="crawl the datasource")
@@ -297,7 +298,7 @@ if __name__ == "__main__":
args = parser.parse_args()
creds = {'consumer_key': args.ckey, 'token_key': args.tkey,
- 'token_secret': args.tsec, 'consumer_secret': args.csec}
+ 'token_secret': args.tsec, 'consumer_secret': args.csec}
if args.config:
cfg = util.read_conf(args.config)
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index 4dffe6e6..4cad6877 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -263,8 +263,8 @@ class DataSourceNoCloudNet(DataSourceNoCloud):
# Used to match classes to dependencies
datasources = [
- (DataSourceNoCloud, (sources.DEP_FILESYSTEM, )),
- (DataSourceNoCloudNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceNoCloud, (sources.DEP_FILESYSTEM, )),
+ (DataSourceNoCloudNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py
index 12a8a992..d1a62b2a 100644
--- a/cloudinit/sources/DataSourceNone.py
+++ b/cloudinit/sources/DataSourceNone.py
@@ -47,8 +47,8 @@ class DataSourceNone(sources.DataSource):
# Used to match classes to dependencies
datasources = [
- (DataSourceNone, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
- (DataSourceNone, []),
+ (DataSourceNone, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceNone, []),
]
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index d07f6219..23996b4a 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -69,16 +69,15 @@ class DataSourceOVF(sources.DataSource):
LOG.debug("No system-product-name found")
elif 'vmware' in system_type.lower():
LOG.debug("VMware Virtualization Platform found")
- if not util.get_cfg_option_bool(self.sys_cfg,
- "disable_vmware_customization",
- True):
+ if not util.get_cfg_option_bool(
+ self.sys_cfg, "disable_vmware_customization", True):
deployPkgPluginPath = search_file("/usr/lib/vmware-tools",
"libdeployPkgPlugin.so")
if deployPkgPluginPath:
- vmwareImcConfigFilePath = util.log_time(logfunc=LOG.debug,
- msg="waiting for configuration file",
- func=wait_for_imc_cfg_file,
- args=("/tmp", "cust.cfg"))
+ vmwareImcConfigFilePath = util.log_time(
+ logfunc=LOG.debug,
+ msg="waiting for configuration file",
+ func=wait_for_imc_cfg_file, args=("/tmp", "cust.cfg"))
if vmwareImcConfigFilePath:
LOG.debug("Found VMware DeployPkg Config File at %s" %
@@ -97,7 +96,8 @@ class DataSourceOVF(sources.DataSource):
nicConfigurator.configure()
vmwarePlatformFound = True
except Exception as inst:
- LOG.debug("Error while parsing the Customization Config File")
+ LOG.debug("Error while parsing the Customization "
+ "Config File: %s", inst)
finally:
dirPath = os.path.dirname(vmwareImcConfigFilePath)
shutil.rmtree(dirPath)
@@ -336,14 +336,14 @@ def get_properties(contents):
# could also check here that elem.namespaceURI ==
# "http://schemas.dmtf.org/ovf/environment/1"
propSections = find_child(dom.documentElement,
- lambda n: n.localName == "PropertySection")
+ lambda n: n.localName == "PropertySection")
if len(propSections) == 0:
raise XmlError("No 'PropertySection's")
props = {}
propElems = find_child(propSections[0],
- (lambda n: n.localName == "Property"))
+ (lambda n: n.localName == "Property"))
for elem in propElems:
key = elem.attributes.getNamedItemNS(envNsURI, "key").value
@@ -370,8 +370,8 @@ class XmlError(Exception):
# Used to match classes to dependencies
datasources = (
- (DataSourceOVF, (sources.DEP_FILESYSTEM, )),
- (DataSourceOVFNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceOVF, (sources.DEP_FILESYSTEM, )),
+ (DataSourceOVFNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
)
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index ac2c3b45..681f3a96 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -149,8 +149,8 @@ class BrokenContextDiskDir(Exception):
class OpenNebulaNetwork(object):
REG_DEV_MAC = re.compile(
- r'^\d+: (eth\d+):.*?link\/ether (..:..:..:..:..:..) ?',
- re.MULTILINE | re.DOTALL)
+ r'^\d+: (eth\d+):.*?link\/ether (..:..:..:..:..:..) ?',
+ re.MULTILINE | re.DOTALL)
def __init__(self, ip, context):
self.ip = ip
@@ -404,7 +404,8 @@ def read_context_disk_dir(source_dir, asuser=None):
if ssh_key_var:
lines = context.get(ssh_key_var).splitlines()
results['metadata']['public-keys'] = [l for l in lines
- if len(l) and not l.startswith("#")]
+ if len(l) and not
+ l.startswith("#")]
# custom hostname -- try hostname or leave cloud-init
# itself create hostname from IP address later
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index c9b497df..5edab152 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -20,10 +20,13 @@
# Datasource for provisioning on SmartOS. This works on Joyent
# and public/private Clouds using SmartOS.
#
-# SmartOS hosts use a serial console (/dev/ttyS1) on Linux Guests.
+# SmartOS hosts use a serial console (/dev/ttyS1) on KVM Linux Guests
# The meta-data is transmitted via key/value pairs made by
# requests on the console. For example, to get the hostname, you
# would send "GET hostname" on /dev/ttyS1.
+# For Linux Guests running in LX-Brand Zones on SmartOS hosts
+# a socket (/native/.zonecontrol/metadata.sock) is used instead
+# of a serial console.
#
# Certain behavior is defined by the DataDictionary
# http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html
@@ -34,6 +37,8 @@ import contextlib
import os
import random
import re
+import socket
+import stat
import serial
@@ -46,6 +51,7 @@ LOG = logging.getLogger(__name__)
SMARTOS_ATTRIB_MAP = {
# Cloud-init Key : (SmartOS Key, Strip line endings)
+ 'instance-id': ('sdc:uuid', True),
'local-hostname': ('hostname', True),
'public-keys': ('root_authorized_keys', True),
'user-script': ('user-script', False),
@@ -76,6 +82,7 @@ DS_CFG_PATH = ['datasource', DS_NAME]
#
BUILTIN_DS_CONFIG = {
'serial_device': '/dev/ttyS1',
+ 'metadata_sockfile': '/native/.zonecontrol/metadata.sock',
'seed_timeout': 60,
'no_base64_decode': ['root_authorized_keys',
'motd_sys_info',
@@ -83,7 +90,7 @@ BUILTIN_DS_CONFIG = {
'user-data',
'user-script',
'sdc:datacenter_name',
- ],
+ 'sdc:uuid'],
'base64_keys': [],
'base64_all': False,
'disk_aliases': {'ephemeral0': '/dev/vdb'},
@@ -94,7 +101,7 @@ BUILTIN_CLOUD_CONFIG = {
'ephemeral0': {'table_type': 'mbr',
'layout': False,
'overwrite': False}
- },
+ },
'fs_setup': [{'label': 'ephemeral0',
'filesystem': 'ext3',
'device': 'ephemeral0'}],
@@ -150,17 +157,27 @@ class DataSourceSmartOS(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.is_smartdc = None
-
self.ds_cfg = util.mergemanydict([
self.ds_cfg,
util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
BUILTIN_DS_CONFIG])
self.metadata = {}
- self.cfg = BUILTIN_CLOUD_CONFIG
- self.seed = self.ds_cfg.get("serial_device")
- self.seed_timeout = self.ds_cfg.get("serial_timeout")
+ # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but
+ # report 'BrandZ virtual linux' as the kernel version
+ if os.uname()[3].lower() == 'brandz virtual linux':
+ LOG.debug("Host is SmartOS, guest in Zone")
+ self.is_smartdc = True
+ self.smartos_type = 'lx-brand'
+ self.cfg = {}
+ self.seed = self.ds_cfg.get("metadata_sockfile")
+ else:
+ self.is_smartdc = True
+ self.smartos_type = 'kvm'
+ self.seed = self.ds_cfg.get("serial_device")
+ self.cfg = BUILTIN_CLOUD_CONFIG
+ self.seed_timeout = self.ds_cfg.get("serial_timeout")
self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode')
self.b64_keys = self.ds_cfg.get('base64_keys')
self.b64_all = self.ds_cfg.get('base64_all')
@@ -170,12 +187,49 @@ class DataSourceSmartOS(sources.DataSource):
root = sources.DataSource.__str__(self)
return "%s [seed=%s]" % (root, self.seed)
+ def _get_seed_file_object(self):
+ if not self.seed:
+ raise AttributeError("seed device is not set")
+
+ if self.smartos_type == 'lx-brand':
+ if not stat.S_ISSOCK(os.stat(self.seed).st_mode):
+ LOG.debug("Seed %s is not a socket", self.seed)
+ return None
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ sock.connect(self.seed)
+ return sock.makefile('rwb')
+ else:
+ if not stat.S_ISCHR(os.stat(self.seed).st_mode):
+ LOG.debug("Seed %s is not a character device")
+ return None
+ ser = serial.Serial(self.seed, timeout=self.seed_timeout)
+ if not ser.isOpen():
+ raise SystemError("Unable to open %s" % self.seed)
+ return ser
+ return None
+
+ def _set_provisioned(self):
+ '''Mark the instance provisioning state as successful.
+
+ When run in a zone, the host OS will look for /var/svc/provisioning
+ to be renamed as /var/svc/provision_success. This should be done
+ after meta-data is successfully retrieved and from this point
+ the host considers the provision of the zone to be a success and
+ keeps the zone running.
+ '''
+
+ LOG.debug('Instance provisioning state set as successful')
+ svc_path = '/var/svc'
+ if os.path.exists('/'.join([svc_path, 'provisioning'])):
+ os.rename('/'.join([svc_path, 'provisioning']),
+ '/'.join([svc_path, 'provision_success']))
+
def get_data(self):
md = {}
ud = ""
if not device_exists(self.seed):
- LOG.debug("No serial device '%s' found for SmartOS datasource",
+ LOG.debug("No metadata device '%s' found for SmartOS datasource",
self.seed)
return False
@@ -185,29 +239,36 @@ class DataSourceSmartOS(sources.DataSource):
LOG.debug("Disabling SmartOS datasource on arm (LP: #1243287)")
return False
- dmi_info = dmi_data()
- if dmi_info is False:
- LOG.debug("No dmidata utility found")
- return False
-
- system_uuid, system_type = tuple(dmi_info)
- if 'smartdc' not in system_type.lower():
- LOG.debug("Host is not on SmartOS. system_type=%s", system_type)
+ # SDC KVM instances will provide dmi data, LX-brand does not
+ if self.smartos_type == 'kvm':
+ dmi_info = dmi_data()
+ if dmi_info is False:
+ LOG.debug("No dmidata utility found")
+ return False
+
+ system_type = dmi_info
+ if 'smartdc' not in system_type.lower():
+ LOG.debug("Host is not on SmartOS. system_type=%s",
+ system_type)
+ return False
+ LOG.debug("Host is SmartOS, guest in KVM")
+
+ seed_obj = self._get_seed_file_object()
+ if seed_obj is None:
+ LOG.debug('Seed file object not found.')
return False
- self.is_smartdc = True
- md['instance-id'] = system_uuid
+ with contextlib.closing(seed_obj) as seed:
+ b64_keys = self.query('base64_keys', seed, strip=True, b64=False)
+ if b64_keys is not None:
+ self.b64_keys = [k.strip() for k in str(b64_keys).split(',')]
- b64_keys = self.query('base64_keys', strip=True, b64=False)
- if b64_keys is not None:
- self.b64_keys = [k.strip() for k in str(b64_keys).split(',')]
+ b64_all = self.query('base64_all', seed, strip=True, b64=False)
+ if b64_all is not None:
+ self.b64_all = util.is_true(b64_all)
- b64_all = self.query('base64_all', strip=True, b64=False)
- if b64_all is not None:
- self.b64_all = util.is_true(b64_all)
-
- for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items():
- smartos_noun, strip = attribute
- md[ci_noun] = self.query(smartos_noun, strip=strip)
+ for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items():
+ smartos_noun, strip = attribute
+ md[ci_noun] = self.query(smartos_noun, seed, strip=strip)
# @datadictionary: This key may contain a program that is written
# to a file in the filesystem of the guest on each boot and then
@@ -240,7 +301,7 @@ class DataSourceSmartOS(sources.DataSource):
# Handle the cloud-init regular meta
if not md['local-hostname']:
- md['local-hostname'] = system_uuid
+ md['local-hostname'] = md['instance-id']
ud = None
if md['user-data']:
@@ -257,6 +318,8 @@ class DataSourceSmartOS(sources.DataSource):
self.metadata = util.mergemanydict([md, self.metadata])
self.userdata_raw = ud
self.vendordata_raw = md['vendor-data']
+
+ self._set_provisioned()
return True
def device_name_to_device(self, name):
@@ -268,40 +331,64 @@ class DataSourceSmartOS(sources.DataSource):
def get_instance_id(self):
return self.metadata['instance-id']
- def query(self, noun, strip=False, default=None, b64=None):
+ def query(self, noun, seed_file, strip=False, default=None, b64=None):
if b64 is None:
if noun in self.smartos_no_base64:
b64 = False
elif self.b64_all or noun in self.b64_keys:
b64 = True
- return query_data(noun=noun, strip=strip, seed_device=self.seed,
- seed_timeout=self.seed_timeout, default=default,
- b64=b64)
+ return self._query_data(noun, seed_file, strip=strip,
+ default=default, b64=b64)
+ def _query_data(self, noun, seed_file, strip=False,
+ default=None, b64=None):
+ """Makes a request via "GET <NOUN>"
-def device_exists(device):
- """Symplistic method to determine if the device exists or not"""
- return os.path.exists(device)
+ In the response, the first line is the status, while subsequent
+ lines are is the value. A blank line with a "." is used to
+ indicate end of response.
+ If the response is expected to be base64 encoded, then set
+ b64encoded to true. Unfortantely, there is no way to know if
+ something is 100% encoded, so this method relies on being told
+ if the data is base64 or not.
+ """
-def get_serial(seed_device, seed_timeout):
- """This is replaced in unit testing, allowing us to replace
- serial.Serial with a mocked class.
+ if not noun:
+ return False
- The timeout value of 60 seconds should never be hit. The value
- is taken from SmartOS own provisioning tools. Since we are reading
- each line individually up until the single ".", the transfer is
- usually very fast (i.e. microseconds) to get the response.
- """
- if not seed_device:
- raise AttributeError("seed_device value is not set")
+ response = JoyentMetadataClient(seed_file).get_metadata(noun)
+
+ if response is None:
+ return default
+
+ if b64 is None:
+ b64 = self._query_data('b64-%s' % noun, seed_file, b64=False,
+ default=False, strip=True)
+ b64 = util.is_true(b64)
+
+ resp = None
+ if b64 or strip:
+ resp = "".join(response).rstrip()
+ else:
+ resp = "".join(response)
- ser = serial.Serial(seed_device, timeout=seed_timeout)
- if not ser.isOpen():
- raise SystemError("Unable to open %s" % seed_device)
+ if b64:
+ try:
+ return util.b64d(resp)
+ # Bogus input produces different errors in Python 2 and 3;
+ # catch both.
+ except (TypeError, binascii.Error):
+ LOG.warn("Failed base64 decoding key '%s'", noun)
+ return resp
- return ser
+ return resp
+
+
+def device_exists(device):
+ """Symplistic method to determine if the device exists or not"""
+ return os.path.exists(device)
class JoyentMetadataFetchException(Exception):
@@ -320,8 +407,8 @@ class JoyentMetadataClient(object):
r' (?P<body>(?P<request_id>[0-9a-f]+) (?P<status>SUCCESS|NOTFOUND)'
r'( (?P<payload>.+))?)')
- def __init__(self, serial):
- self.serial = serial
+ def __init__(self, metasource):
+ self.metasource = metasource
def _checksum(self, body):
return '{0:08x}'.format(
@@ -356,67 +443,30 @@ class JoyentMetadataClient(object):
util.b64e(metadata_key))
msg = 'V2 {0} {1} {2}\n'.format(
len(message_body), self._checksum(message_body), message_body)
- LOG.debug('Writing "%s" to serial port.', msg)
- self.serial.write(msg.encode('ascii'))
- response = self.serial.readline().decode('ascii')
- LOG.debug('Read "%s" from serial port.', response)
- return self._get_value_from_frame(request_id, response)
-
-
-def query_data(noun, seed_device, seed_timeout, strip=False, default=None,
- b64=None):
- """Makes a request to via the serial console via "GET <NOUN>"
-
- In the response, the first line is the status, while subsequent lines
- are is the value. A blank line with a "." is used to indicate end of
- response.
-
- If the response is expected to be base64 encoded, then set b64encoded
- to true. Unfortantely, there is no way to know if something is 100%
- encoded, so this method relies on being told if the data is base64 or
- not.
- """
- if not noun:
- return False
-
- with contextlib.closing(get_serial(seed_device, seed_timeout)) as ser:
- client = JoyentMetadataClient(ser)
- response = client.get_metadata(noun)
-
- if response is None:
- return default
-
- if b64 is None:
- b64 = query_data('b64-%s' % noun, seed_device=seed_device,
- seed_timeout=seed_timeout, b64=False,
- default=False, strip=True)
- b64 = util.is_true(b64)
-
- resp = None
- if b64 or strip:
- resp = "".join(response).rstrip()
- else:
- resp = "".join(response)
-
- if b64:
- try:
- return util.b64d(resp)
- # Bogus input produces different errors in Python 2 and 3; catch both.
- except (TypeError, binascii.Error):
- LOG.warn("Failed base64 decoding key '%s'", noun)
- return resp
+ LOG.debug('Writing "%s" to metadata transport.', msg)
+ self.metasource.write(msg.encode('ascii'))
+ self.metasource.flush()
+
+ response = bytearray()
+ response.extend(self.metasource.read(1))
+ while response[-1:] != b'\n':
+ response.extend(self.metasource.read(1))
+ response = response.rstrip().decode('ascii')
+ LOG.debug('Read "%s" from metadata transport.', response)
+
+ if 'SUCCESS' not in response:
+ return None
- return resp
+ return self._get_value_from_frame(request_id, response)
def dmi_data():
- sys_uuid = util.read_dmi_data("system-uuid")
sys_type = util.read_dmi_data("system-product-name")
- if not sys_uuid or not sys_type:
+ if not sys_type:
return None
- return (sys_uuid.lower(), sys_type)
+ return sys_type
def write_boot_content(content, content_f, link=None, shebang=False,
@@ -462,7 +512,7 @@ def write_boot_content(content, content_f, link=None, shebang=False,
except Exception as e:
util.logexc(LOG, ("Failed to identify script type for %s" %
- content_f, e))
+ content_f, e))
if link:
try:
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index d90c22fd..018cac6d 100644
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -197,6 +197,21 @@ class WALinuxAgentShim(object):
self.openssl_manager.clean_up()
@staticmethod
+ def get_ip_from_lease_value(lease_value):
+ unescaped_value = lease_value.replace('\\', '')
+ if len(unescaped_value) > 4:
+ hex_string = ''
+ for hex_pair in unescaped_value.split(':'):
+ if len(hex_pair) == 1:
+ hex_pair = '0' + hex_pair
+ hex_string += hex_pair
+ packed_bytes = struct.pack(
+ '>L', int(hex_string.replace(':', ''), 16))
+ else:
+ packed_bytes = unescaped_value.encode('utf-8')
+ return socket.inet_ntoa(packed_bytes)
+
+ @staticmethod
def find_endpoint():
LOG.debug('Finding Azure endpoint...')
content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases')
@@ -206,16 +221,7 @@ class WALinuxAgentShim(object):
value = line.strip(' ').split(' ', 2)[-1].strip(';\n"')
if value is None:
raise Exception('No endpoint found in DHCP config.')
- if ':' in value:
- hex_string = ''
- for hex_pair in value.split(':'):
- if len(hex_pair) == 1:
- hex_pair = '0' + hex_pair
- hex_string += hex_pair
- value = struct.pack('>L', int(hex_string.replace(':', ''), 16))
- else:
- value = value.encode('utf-8')
- endpoint_ip_address = socket.inet_ntoa(value)
+ endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value)
LOG.debug('Azure endpoint found at %s', endpoint_ip_address)
return endpoint_ip_address
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index 172a1649..8c5c08cf 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -19,7 +19,6 @@
import logging
import os
-import subprocess
import re
from cloudinit import util
@@ -47,12 +46,12 @@ class NicConfigurator:
"""
primary_nics = [nic for nic in self.nics if nic.primary]
if not primary_nics:
- return None
+ return None
elif len(primary_nics) > 1:
- raise Exception('There can only be one primary nic',
+ raise Exception('There can only be one primary nic',
[nic.mac for nic in primary_nics])
else:
- return primary_nics[0]
+ return primary_nics[0]
def find_devices(self):
"""
@@ -186,8 +185,8 @@ class NicConfigurator:
lines = []
for addr in addrs:
- lines.append(' up route -A inet6 add default gw %s metric 10000' %
- addr.gateway)
+ lines.append(' up route -A inet6 add default gw '
+ '%s metric 10000' % addr.gateway)
return lines
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index 9b2f5ed5..c74a7ae2 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -31,7 +31,8 @@ LOG = logging.getLogger(__name__)
DEF_SSHD_CFG = "/etc/ssh/sshd_config"
# taken from openssh source key.c/key_type_from_name
-VALID_KEY_TYPES = ("rsa", "dsa", "ssh-rsa", "ssh-dss", "ecdsa",
+VALID_KEY_TYPES = (
+ "rsa", "dsa", "ssh-rsa", "ssh-dss", "ecdsa",
"ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com",
"ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com",
"ssh-rsa-cert-v01@openssh.com", "ssh-dss-cert-v01@openssh.com",
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 9f192c8d..dbcf3d55 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -509,13 +509,13 @@ class Init(object):
def consume_data(self, frequency=PER_INSTANCE):
# Consume the userdata first, because we need want to let the part
# handlers run first (for merging stuff)
- with events.ReportEventStack(
- "consume-user-data", "reading and applying user-data",
- parent=self.reporter):
+ with events.ReportEventStack("consume-user-data",
+ "reading and applying user-data",
+ parent=self.reporter):
self._consume_userdata(frequency)
- with events.ReportEventStack(
- "consume-vendor-data", "reading and applying vendor-data",
- parent=self.reporter):
+ with events.ReportEventStack("consume-vendor-data",
+ "reading and applying vendor-data",
+ parent=self.reporter):
self._consume_vendordata(frequency)
# Perform post-consumption adjustments so that
@@ -655,7 +655,7 @@ class Modules(object):
else:
raise TypeError(("Failed to read '%s' item in config,"
" unknown type %s") %
- (item, type_utils.obj_name(item)))
+ (item, type_utils.obj_name(item)))
return module_list
def _fixup_modules(self, raw_mods):
@@ -762,8 +762,8 @@ class Modules(object):
if skipped:
LOG.info("Skipping modules %s because they are not verified "
- "on distro '%s'. To run anyway, add them to "
- "'unverified_modules' in config.", skipped, d_name)
+ "on distro '%s'. To run anyway, add them to "
+ "'unverified_modules' in config.", skipped, d_name)
if forced:
LOG.info("running unverified_modules: %s", forced)
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index f2e1390e..936f7da5 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -252,9 +252,9 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
# attrs
return UrlResponse(r)
except exceptions.RequestException as e:
- if (isinstance(e, (exceptions.HTTPError))
- and hasattr(e, 'response') # This appeared in v 0.10.8
- and hasattr(e.response, 'status_code')):
+ if (isinstance(e, (exceptions.HTTPError)) and
+ hasattr(e, 'response') and # This appeared in v 0.10.8
+ hasattr(e.response, 'status_code')):
excps.append(UrlError(e, code=e.response.status_code,
headers=e.response.headers,
url=url))
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 83c2c0d2..e7407ea4 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -76,7 +76,9 @@ FALSE_STRINGS = ('off', '0', 'no', 'false')
# Helper utils to see if running in a container
-CONTAINER_TESTS = ('running-in-container', 'lxc-is-container')
+CONTAINER_TESTS = (['systemd-detect-virt', '--quiet', '--container'],
+ ['running-in-container'],
+ ['lxc-is-container'])
def decode_binary(blob, encoding='utf-8'):
@@ -610,7 +612,7 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None):
def make_url(scheme, host, port=None,
- path='', params='', query='', fragment=''):
+ path='', params='', query='', fragment=''):
pieces = []
pieces.append(scheme or '')
@@ -802,8 +804,8 @@ def load_yaml(blob, default=None, allowed=(dict,)):
blob = decode_binary(blob)
try:
LOG.debug("Attempting to load yaml from string "
- "of length %s with allowed root types %s",
- len(blob), allowed)
+ "of length %s with allowed root types %s",
+ len(blob), allowed)
converted = safeyaml.load(blob)
if not isinstance(converted, allowed):
# Yes this will just be caught, but thats ok for now...
@@ -876,7 +878,7 @@ def read_conf_with_confd(cfgfile):
if not isinstance(confd, six.string_types):
raise TypeError(("Config file %s contains 'conf_d' "
"with non-string type %s") %
- (cfgfile, type_utils.obj_name(confd)))
+ (cfgfile, type_utils.obj_name(confd)))
else:
confd = str(confd).strip()
elif os.path.isdir("%s.d" % cfgfile):
@@ -1039,7 +1041,8 @@ def is_resolvable(name):
for iname in badnames:
try:
result = socket.getaddrinfo(iname, None, 0, 0,
- socket.SOCK_STREAM, socket.AI_CANONNAME)
+ socket.SOCK_STREAM,
+ socket.AI_CANONNAME)
badresults[iname] = []
for (_fam, _stype, _proto, cname, sockaddr) in result:
badresults[iname].append("%s: %s" % (cname, sockaddr[0]))
@@ -1107,7 +1110,7 @@ def close_stdin():
def find_devs_with(criteria=None, oformat='device',
- tag=None, no_cache=False, path=None):
+ tag=None, no_cache=False, path=None):
"""
find devices matching given criteria (via blkid)
criteria can be *one* of:
@@ -1626,7 +1629,7 @@ def write_file(filename, content, mode=0o644, omode="wb"):
content = decode_binary(content)
write_type = 'characters'
LOG.debug("Writing to %s - %s: [%s] %s %s",
- filename, omode, mode, len(content), write_type)
+ filename, omode, mode, len(content), write_type)
with SeLinuxGuard(path=filename):
with open(filename, omode) as fh:
fh.write(content)
@@ -1749,7 +1752,7 @@ def is_container():
try:
# try to run a helper program. if it returns true/zero
# then we're inside a container. otherwise, no
- subp([helper])
+ subp(helper)
return True
except (IOError, OSError):
pass
@@ -2145,7 +2148,7 @@ def _read_dmi_syspath(key):
LOG.debug("dmi data %s returned %s", dmi_key_path, key_data)
return key_data.strip()
- except Exception as e:
+ except Exception:
logexc(LOG, "failed read of %s", dmi_key_path)
return None
diff --git a/config/cloud.cfg b/config/cloud.cfg
index 74794ab0..795df19f 100644
--- a/config/cloud.cfg
+++ b/config/cloud.cfg
@@ -56,6 +56,7 @@ cloud_config_modules:
- fan
- landscape
- timezone
+ - lxd
- puppet
- chef
- salt-minion
diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt
index 3bde4aac..2651c027 100644
--- a/doc/examples/cloud-config-datasources.txt
+++ b/doc/examples/cloud-config-datasources.txt
@@ -51,12 +51,19 @@ datasource:
policy: on # [can be 'on', 'off' or 'force']
SmartOS:
+ # For KVM guests:
# Smart OS datasource works over a serial console interacting with
# a server on the other end. By default, the second serial console is the
# device. SmartOS also uses a serial timeout of 60 seconds.
serial_device: /dev/ttyS1
serial_timeout: 60
+ # For LX-Brand Zones guests:
+ # Smart OS datasource works over a socket interacting with
+ # the host on the other end. By default, the socket file is in
+ # the native .zoncontrol directory.
+ metadata_sockfile: /native/.zonecontrol/metadata.sock
+
# a list of keys that will not be base64 decoded even if base64_all
no_base64_decode: ['root_authorized_keys', 'motd_sys_info',
'iptables_disable']
diff --git a/doc/examples/cloud-config-lxd.txt b/doc/examples/cloud-config-lxd.txt
new file mode 100644
index 00000000..b9bb4aa5
--- /dev/null
+++ b/doc/examples/cloud-config-lxd.txt
@@ -0,0 +1,28 @@
+#cloud-config
+
+# configure lxd
+# default: none
+# all options default to none if not specified
+# lxd: config sections for lxd
+# init: dict of options for lxd init, see 'man lxd'
+# network_address: address for lxd to listen on
+# network_port: port for lxd to listen on
+# storage_backend: either 'zfs' or 'dir'
+# storage_create_device: device based storage using specified device
+# storage_create_loop: set up loop based storage with size in GB
+# storage_pool: name of storage pool to use or create
+# trust_password: password required to add new clients
+
+lxd:
+ init:
+ network_address: 0.0.0.0
+ network_port: 8443
+ storage_backend: zfs
+ storage_pool: datapool
+ storage_create_loop: 10
+
+
+# The simplist working configuration is
+# lxd:
+# init:
+# storage_backend: dir
diff --git a/doc/examples/cloud-config-mount-points.txt b/doc/examples/cloud-config-mount-points.txt
index 3b45b47f..aa676c24 100644
--- a/doc/examples/cloud-config-mount-points.txt
+++ b/doc/examples/cloud-config-mount-points.txt
@@ -42,5 +42,5 @@ mount_default_fields: [ None, None, "auto", "defaults,nobootwait", "0", "2" ]
# default is to not create any swap files, because 'size' is set to 0
swap:
filename: /swap.img
- size: "auto" or size in bytes
+ size: "auto" # or size in bytes
maxsize: size in bytes
diff --git a/doc/examples/cloud-config-rsyslog.txt b/doc/examples/cloud-config-rsyslog.txt
index d54960e8..28ea1f16 100644
--- a/doc/examples/cloud-config-rsyslog.txt
+++ b/doc/examples/cloud-config-rsyslog.txt
@@ -17,7 +17,7 @@ rsyslog:
- content: "*.* @@192.0.2.1:10514"
filename: 01-example.conf
- content: |
- *.* @@syslogd.example.com
+ *.* @@syslogd.example.com
config_dir: /etc/rsyslog.d
config_filename: 20-cloud-config.conf
service_reload_command: [your, syslog, reload, command]
@@ -32,7 +32,7 @@ rsyslog:
# - content: "*.* @@192.0.2.1:10514"
# filename: 01-example.conf
# - content: |
-# *.* @@syslogd.example.com
+# *.* @@syslogd.example.com
# rsyslog_filename: 20-cloud-config.conf
# rsyslog_dir: /etc/rsyslog.d
diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt
index 31491faf..0e8ed243 100644
--- a/doc/examples/cloud-config-user-groups.txt
+++ b/doc/examples/cloud-config-user-groups.txt
@@ -15,14 +15,14 @@ users:
selinux-user: staff_u
expiredate: 2012-09-01
ssh-import-id: foobar
- lock-passwd: false
+ lock_passwd: false
passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/
- name: barfoo
gecos: Bar B. Foo
sudo: ALL=(ALL) NOPASSWD:ALL
groups: users, admin
ssh-import-id: None
- lock-passwd: true
+ lock_passwd: true
ssh-authorized-keys:
- <ssh pub key 1>
- <ssh pub key 2>
@@ -42,7 +42,7 @@ users:
# selinux-user: Optional. The SELinux user for the user's login, such as
# "staff_u". When this is omitted the system will select the default
# SELinux user.
-# lock-passwd: Defaults to true. Lock the password to disable password login
+# lock_passwd: Defaults to true. Lock the password to disable password login
# inactive: Create the user as inactive
# passwd: The hash -- not the password itself -- of the password you want
# to use for this user. You can generate a safe hash via:
diff --git a/packages/bddeb b/packages/bddeb
index c4efe264..c141b1ab 100755
--- a/packages/bddeb
+++ b/packages/bddeb
@@ -1,5 +1,6 @@
#!/usr/bin/env python3
+import glob
import os
import shutil
import sys
@@ -105,11 +106,11 @@ def write_debian_folder(root, version, revno, pkgmap,
util.abs_join(deb_dir, 'rules'),
params={'python': python, 'pyver': pyver})
- # Just copy the following directly
- for base_fn in ['dirs', 'copyright', 'compat']:
- shutil.copy(util.abs_join(find_root(),
- 'packages', 'debian', base_fn),
- util.abs_join(deb_dir, base_fn))
+ # Just copy any other files directly (including .in)
+ pdeb_d = util.abs_join(find_root(), 'packages', 'debian')
+ for f in [os.path.join(pdeb_d, f) for f in os.listdir(pdeb_d)]:
+ if os.path.isfile(f):
+ shutil.copy(f, util.abs_join(deb_dir, os.path.basename(f)))
def main():
diff --git a/packages/debian/cloud-init.postinst b/packages/debian/cloud-init.postinst
new file mode 100644
index 00000000..cdd0466d
--- /dev/null
+++ b/packages/debian/cloud-init.postinst
@@ -0,0 +1,16 @@
+#!/bin/sh
+cleanup_lp1552999() {
+ local oldver="$1" last_bad_ver="0.7.7~bzr1178"
+ dpkg --compare-versions "$oldver" le "$last_bad_ver" || return 0
+ local edir="/etc/systemd/system/multi-user.target.wants"
+ rm -f "$edir/cloud-config.service" "$edir/cloud-final.service" \
+ "$edir/cloud-init-local.service" "$edir/cloud-init.service"
+}
+
+
+#DEBHELPER#
+
+if [ "$1" = "configure" ]; then
+ oldver="$2"
+ cleanup_lp1552999 "$oldver"
+fi
diff --git a/packages/debian/cloud-init.preinst b/packages/debian/cloud-init.preinst
new file mode 100644
index 00000000..3c2af06d
--- /dev/null
+++ b/packages/debian/cloud-init.preinst
@@ -0,0 +1,20 @@
+#!/bin/sh
+# vi: ts=4 expandtab
+
+cleanup_lp1552999() {
+ local oldver="$1" last_bad_ver="0.7.7~bzr1178"
+ dpkg --compare-versions "$oldver" le "$last_bad_ver" || return 0
+ local hdir="/var/lib/systemd/deb-systemd-helper-enabled"
+ hdir="$hdir/multi-user.target.wants"
+ local edir="/etc/systemd/system/multi-user.target.wants"
+ rm -f "$hdir/cloud-config.service" "$hdir/cloud-final.service" \
+ "$hdir/cloud-init-local.service" "$hdir/cloud-init.service"
+}
+
+
+if [ "$1" = "upgrade" ]; then
+ oldver="$2"
+ cleanup_lp1552999 "$oldver"
+fi
+
+#DEBHELPER#
diff --git a/packages/debian/control.in b/packages/debian/control.in
index 5fe16e43..16713577 100644
--- a/packages/debian/control.in
+++ b/packages/debian/control.in
@@ -7,6 +7,7 @@ Build-Depends: debhelper (>= 9),
dh-python,
dh-systemd,
iproute2,
+ pep8,
pyflakes,
${python},
${test_requires},
@@ -18,8 +19,8 @@ Package: cloud-init
Architecture: all
Depends: procps,
${python},
- ${requires},
${misc:Depends},
+ ${${python}:Depends}
Recommends: eatmydata, sudo, software-properties-common, gdisk
XB-Python-Version: ${python:Versions}
Description: Init scripts for cloud instances
diff --git a/packages/debian/rules.in b/packages/debian/rules.in
index bb2e1d5c..cf2dd405 100755
--- a/packages/debian/rules.in
+++ b/packages/debian/rules.in
@@ -1,9 +1,8 @@
## template:basic
#!/usr/bin/make -f
-
INIT_SYSTEM ?= upstart,systemd
-PYVER ?= python${pyver}
export PYBUILD_INSTALL_ARGS=--init-system=$(INIT_SYSTEM)
+PYVER ?= python${pyver}
%:
dh $@ --with $(PYVER),systemd --buildsystem pybuild
@@ -14,6 +13,11 @@ override_dh_install:
cp tools/21-cloudinit.conf debian/cloud-init/etc/rsyslog.d/21-cloudinit.conf
override_dh_auto_test:
- # Because setup tools didn't copy data...
- [ ! -d .pybuild/pythonX.Y_?.?/build/tests ] || cp -r tests/data .pybuild/pythonX.Y_?.?/build/tests
- http_proxy= dh_auto_test -- --test-nose
+ifeq (,$(findstring nocheck,$(DEB_BUILD_OPTIONS)))
+ http_proxy= make PYVER=${pyver} check
+else
+ @echo check disabled by DEB_BUILD_OPTIONS=$(DEB_BUILD_OPTIONS)
+endif
+
+override_dh_systemd_start:
+ dh_systemd_start --no-restart-on-upgrade --no-start
diff --git a/setup.py b/setup.py
index 5f61681b..0b261dfe 100755
--- a/setup.py
+++ b/setup.py
@@ -45,39 +45,50 @@ def tiny_p(cmd, capture=True):
stdout = None
stderr = None
sp = subprocess.Popen(cmd, stdout=stdout,
- stderr=stderr, stdin=None,
- universal_newlines=True)
+ stderr=stderr, stdin=None,
+ universal_newlines=True)
(out, err) = sp.communicate()
ret = sp.returncode
if ret not in [0]:
- raise RuntimeError("Failed running %s [rc=%s] (%s, %s)"
- % (cmd, ret, out, err))
+ raise RuntimeError("Failed running %s [rc=%s] (%s, %s)" %
+ (cmd, ret, out, err))
return (out, err)
-def systemd_unitdir():
- cmd = ['pkg-config', '--variable=systemdsystemunitdir', 'systemd']
+def pkg_config_read(library, var):
+ fallbacks = {
+ 'systemd': {
+ 'systemdsystemunitdir': '/lib/systemd/system',
+ 'systemdsystemgeneratordir': '/lib/systemd/system-generators',
+ }
+ }
+ cmd = ['pkg-config', '--variable=%s' % var, library]
try:
(path, err) = tiny_p(cmd)
except:
- return '/lib/systemd/system'
+ return fallbacks[library][var]
return str(path).strip()
+
INITSYS_FILES = {
'sysvinit': [f for f in glob('sysvinit/redhat/*') if is_f(f)],
'sysvinit_freebsd': [f for f in glob('sysvinit/freebsd/*') if is_f(f)],
'sysvinit_deb': [f for f in glob('sysvinit/debian/*') if is_f(f)],
- 'systemd': [f for f in glob('systemd/*') if is_f(f)],
+ 'systemd': [f for f in (glob('systemd/*.service') +
+ glob('systemd/*.target')) if is_f(f)],
+ 'systemd.generators': [f for f in glob('systemd/*-generator') if is_f(f)],
'upstart': [f for f in glob('upstart/*') if is_f(f)],
}
INITSYS_ROOTS = {
'sysvinit': '/etc/rc.d/init.d',
'sysvinit_freebsd': '/usr/local/etc/rc.d',
'sysvinit_deb': '/etc/init.d',
- 'systemd': systemd_unitdir(),
+ 'systemd': pkg_config_read('systemd', 'systemdsystemunitdir'),
+ 'systemd.generators': pkg_config_read('systemd',
+ 'systemdsystemgeneratordir'),
'upstart': '/etc/init/',
}
-INITSYS_TYPES = sorted(list(INITSYS_ROOTS.keys()))
+INITSYS_TYPES = sorted([f.partition(".")[0] for f in INITSYS_ROOTS.keys()])
# Install everything in the right location and take care of Linux (default) and
# FreeBSD systems.
@@ -122,9 +133,8 @@ class InitsysInstallData(install):
user_options = install.user_options + [
# This will magically show up in member variable 'init_sys'
('init-system=', None,
- ('init system(s) to configure (%s) [default: None]') %
- (", ".join(INITSYS_TYPES))
- ),
+ ('init system(s) to configure (%s) [default: None]' %
+ (", ".join(INITSYS_TYPES)))),
]
def initialize_options(self):
@@ -138,7 +148,8 @@ class InitsysInstallData(install):
self.init_system = self.init_system.split(",")
if len(self.init_system) == 0:
- raise DistutilsArgError(("You must specify one of (%s) when"
+ raise DistutilsArgError(
+ ("You must specify one of (%s) when"
" specifying init system(s)!") % (", ".join(INITSYS_TYPES)))
bad = [f for f in self.init_system if f not in INITSYS_TYPES]
@@ -147,8 +158,12 @@ class InitsysInstallData(install):
"Invalid --init-system: %s" % (','.join(bad)))
for system in self.init_system:
- self.distribution.data_files.append(
- (INITSYS_ROOTS[system], INITSYS_FILES[system]))
+ # add data files for anything that starts with '<system>.'
+ datakeys = [k for k in INITSYS_ROOTS
+ if k.partition(".")[0] == system]
+ for k in datakeys:
+ self.distribution.data_files.append(
+ (INITSYS_ROOTS[k], INITSYS_FILES[k]))
# Force that command to reinitalize (with new file list)
self.distribution.reinitialize_command('install_data', True)
@@ -182,18 +197,18 @@ if sys.version_info < (3,):
requirements.append('cheetah')
-setuptools.setup(name='cloud-init',
- version=get_version(),
- description='EC2 initialisation magic',
- author='Scott Moser',
- author_email='scott.moser@canonical.com',
- url='http://launchpad.net/cloud-init/',
- packages=setuptools.find_packages(exclude=['tests']),
- scripts=['bin/cloud-init',
- 'tools/cloud-init-per',
- ],
- license='GPLv3',
- data_files=data_files,
- install_requires=requirements,
- cmdclass=cmdclass,
- )
+setuptools.setup(
+ name='cloud-init',
+ version=get_version(),
+ description='EC2 initialisation magic',
+ author='Scott Moser',
+ author_email='scott.moser@canonical.com',
+ url='http://launchpad.net/cloud-init/',
+ packages=setuptools.find_packages(exclude=['tests']),
+ scripts=['bin/cloud-init',
+ 'tools/cloud-init-per'],
+ license='GPLv3',
+ data_files=data_files,
+ install_requires=requirements,
+ cmdclass=cmdclass,
+ )
diff --git a/systemd/cloud-config.service b/systemd/cloud-config.service
index f9f1996e..45d2a63b 100644
--- a/systemd/cloud-config.service
+++ b/systemd/cloud-config.service
@@ -13,4 +13,4 @@ TimeoutSec=0
StandardOutput=journal+console
[Install]
-WantedBy=multi-user.target
+WantedBy=cloud-init.target
diff --git a/systemd/cloud-final.service b/systemd/cloud-final.service
index c023ad94..bfb08d4a 100644
--- a/systemd/cloud-final.service
+++ b/systemd/cloud-final.service
@@ -8,9 +8,10 @@ Type=oneshot
ExecStart=/usr/bin/cloud-init modules --mode=final
RemainAfterExit=yes
TimeoutSec=0
+KillMode=process
# Output needs to appear in instance console output
StandardOutput=journal+console
[Install]
-WantedBy=multi-user.target
+WantedBy=cloud-init.target
diff --git a/systemd/cloud-init-generator b/systemd/cloud-init-generator
new file mode 100755
index 00000000..9d1e22f0
--- /dev/null
+++ b/systemd/cloud-init-generator
@@ -0,0 +1,128 @@
+#!/bin/sh
+set -f
+
+LOG=""
+DEBUG_LEVEL=1
+LOG_D="/run"
+ENABLE="enabled"
+DISABLE="disabled"
+CLOUD_SYSTEM_TARGET="/lib/systemd/system/cloud-init.target"
+CLOUD_TARGET_NAME="cloud-init.target"
+# lxc sets 'container', but lets make that explicitly a global
+CONTAINER="${container}"
+
+debug() {
+ local lvl="$1"
+ shift
+ [ "$lvl" -gt "$DEBUG_LEVEL" ] && return
+ if [ -z "$LOG" ]; then
+ local log="$LOG_D/${0##*/}.log"
+ { : > "$log"; } >/dev/null 2>&1 && LOG="$log" ||
+ LOG="/dev/kmsg"
+ fi
+ echo "$@" >> "$LOG"
+}
+
+etc_file() {
+ local pprefix="${1:-/etc/cloud/cloud-init.}"
+ _RET="unset"
+ [ -f "${pprefix}$ENABLE" ] && _RET="$ENABLE" && return 0
+ [ -f "${pprefix}$DISABLE" ] && _RET="$DISABLE" && return 0
+ return 0
+}
+
+read_proc_cmdline() {
+ if [ "$CONTAINER" = "lxc" ]; then
+ _RET_MSG="ignored: \$container=$CONTAINER"
+ _RET=""
+ return 0
+ fi
+
+ if systemd-detect-virt --container --quiet; then
+ _RET_MSG="ignored: detect-virt is container"
+ _RET=""
+ return 0
+ fi
+
+ _RET_MSG="/proc/cmdline"
+ read _RET < /proc/cmdline
+}
+
+kernel_cmdline() {
+ local cmdline="" tok=""
+ if [ -n "${KERNEL_CMDLINE+x}" ]; then
+ # use KERNEL_CMDLINE if present in environment even if empty
+ cmdline=${KERNEL_CMDLINE}
+ debug 1 "kernel command line from env KERNEL_CMDLINE: $cmdline"
+ elif read_proc_cmdline; then
+ read_proc_cmdline && cmdline="$_RET"
+ debug 1 "kernel command line ($_RET_MSG): $cmdline"
+ fi
+ _RET="unset"
+ cmdline=" $cmdline "
+ tok=${cmdline##* cloud-init=}
+ [ "$tok" = "$cmdline" ] && _RET="unset"
+ tok=${tok%% *}
+ [ "$tok" = "$ENABLE" -o "$tok" = "$DISABLE" ] && _RET="$tok"
+ return 0
+}
+
+default() {
+ _RET="$ENABLE"
+}
+
+main() {
+ local normal_d="$1" early_d="$2" late_d="$3"
+ local target_name="multi-user.target" gen_d="$early_d"
+ local link_path="$gen_d/${target_name}.wants/${CLOUD_TARGET_NAME}"
+
+ debug 1 "$0 normal=$normal_d early=$early_d late=$late_d"
+ debug 2 "$0 $*"
+
+ local search result="error" ret=""
+ for search in kernel_cmdline etc_file default; do
+ if $search; then
+ debug 1 "$search found $_RET"
+ [ "$_RET" = "$ENABLE" -o "$_RET" = "$DISABLE" ] &&
+ result=$_RET && break
+ else
+ ret=$?
+ debug 0 "search $search returned $ret"
+ fi
+ done
+
+ if [ "$result" = "$ENABLE" ]; then
+ if [ -e "$link_path" ]; then
+ debug 1 "already enabled: no change needed"
+ else
+ [ -d "${link_path%/*}" ] || mkdir -p "${link_path%/*}" ||
+ debug 0 "failed to make dir $link_path"
+ if ln -snf "$CLOUD_SYSTEM_TARGET" "$link_path"; then
+ debug 1 "enabled via $link_path -> $CLOUD_SYSTEM_TARGET"
+ else
+ ret=$?
+ debug 0 "[$ret] enable failed:" \
+ "ln $CLOUD_SYSTEM_TARGET $link_path"
+ fi
+ fi
+ elif [ "$result" = "$DISABLE" ]; then
+ if [ -f "$link_path" ]; then
+ if rm -f "$link_path"; then
+ debug 1 "disabled. removed existing $link_path"
+ else
+ ret=$?
+ debug 0 "[$ret] disable failed, remove $link_path"
+ fi
+ else
+ debug 1 "already disabled: no change needed [no $link_path]"
+ fi
+ else
+ debug 0 "unexpected result '$result'"
+ ret=3
+ fi
+ return $ret
+}
+
+main "$@"
+
+# vi: ts=4 expandtab
diff --git a/systemd/cloud-init-local.service b/systemd/cloud-init-local.service
index a31985c6..73aa46f6 100644
--- a/systemd/cloud-init-local.service
+++ b/systemd/cloud-init-local.service
@@ -13,4 +13,4 @@ TimeoutSec=0
StandardOutput=journal+console
[Install]
-WantedBy=multi-user.target
+WantedBy=cloud-init.target
diff --git a/systemd/cloud-init.service b/systemd/cloud-init.service
index 48920283..1f656f7f 100644
--- a/systemd/cloud-init.service
+++ b/systemd/cloud-init.service
@@ -15,4 +15,4 @@ TimeoutSec=0
StandardOutput=journal+console
[Install]
-WantedBy=multi-user.target
+WantedBy=cloud-init.target
diff --git a/systemd/cloud-init.target b/systemd/cloud-init.target
new file mode 100644
index 00000000..a63babb0
--- /dev/null
+++ b/systemd/cloud-init.target
@@ -0,0 +1,6 @@
+# cloud-init target is enabled by cloud-init-generator
+# To disable it you can either:
+# a.) boot with kernel cmdline of 'cloudinit=disabled'
+# b.) touch a file /etc/cloud/cloud-init.disabled
+[Unit]
+Description=Cloud-init target
diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py
index c603bfdb..9c1ec1d4 100644
--- a/tests/unittests/test_data.py
+++ b/tests/unittests/test_data.py
@@ -27,11 +27,12 @@ from cloudinit import stages
from cloudinit import user_data as ud
from cloudinit import util
-INSTANCE_ID = "i-testing"
-
from . import helpers
+INSTANCE_ID = "i-testing"
+
+
class FakeDataSource(sources.DataSource):
def __init__(self, userdata=None, vendordata=None):
diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py
index e9cd2fa5..85759c68 100644
--- a/tests/unittests/test_datasource/test_altcloud.py
+++ b/tests/unittests/test_datasource/test_altcloud.py
@@ -134,8 +134,7 @@ class TestGetCloudType(TestCase):
'''
util.read_dmi_data = _dmi_data('RHEV')
dsrc = DataSourceAltCloud({}, None, self.paths)
- self.assertEquals('RHEV', \
- dsrc.get_cloud_type())
+ self.assertEquals('RHEV', dsrc.get_cloud_type())
def test_vsphere(self):
'''
@@ -144,8 +143,7 @@ class TestGetCloudType(TestCase):
'''
util.read_dmi_data = _dmi_data('VMware Virtual Platform')
dsrc = DataSourceAltCloud({}, None, self.paths)
- self.assertEquals('VSPHERE', \
- dsrc.get_cloud_type())
+ self.assertEquals('VSPHERE', dsrc.get_cloud_type())
def test_unknown(self):
'''
@@ -154,8 +152,7 @@ class TestGetCloudType(TestCase):
'''
util.read_dmi_data = _dmi_data('Unrecognized Platform')
dsrc = DataSourceAltCloud({}, None, self.paths)
- self.assertEquals('UNKNOWN', \
- dsrc.get_cloud_type())
+ self.assertEquals('UNKNOWN', dsrc.get_cloud_type())
class TestGetDataCloudInfoFile(TestCase):
@@ -412,27 +409,27 @@ class TestReadUserDataCallback(TestCase):
'''Test read_user_data_callback() with both files.'''
self.assertEquals('test user data',
- read_user_data_callback(self.mount_dir))
+ read_user_data_callback(self.mount_dir))
def test_callback_dc(self):
'''Test read_user_data_callback() with only DC file.'''
_remove_user_data_files(self.mount_dir,
- dc_file=False,
- non_dc_file=True)
+ dc_file=False,
+ non_dc_file=True)
self.assertEquals('test user data',
- read_user_data_callback(self.mount_dir))
+ read_user_data_callback(self.mount_dir))
def test_callback_non_dc(self):
'''Test read_user_data_callback() with only non-DC file.'''
_remove_user_data_files(self.mount_dir,
- dc_file=True,
- non_dc_file=False)
+ dc_file=True,
+ non_dc_file=False)
self.assertEquals('test user data',
- read_user_data_callback(self.mount_dir))
+ read_user_data_callback(self.mount_dir))
def test_callback_none(self):
'''Test read_user_data_callback() no files are found.'''
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 3933794f..4c9c7d8b 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -207,7 +207,7 @@ class TestAzureDataSource(TestCase):
yaml_cfg = "{agent_command: my_command}\n"
cfg = yaml.safe_load(yaml_cfg)
odata = {'HostName': "myhost", 'UserName': "myuser",
- 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}}
+ 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}}
data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
dsrc = self._get_ds(data)
@@ -219,8 +219,8 @@ class TestAzureDataSource(TestCase):
# set dscfg in via base64 encoded yaml
cfg = {'agent_command': "my_command"}
odata = {'HostName': "myhost", 'UserName': "myuser",
- 'dscfg': {'text': b64e(yaml.dump(cfg)),
- 'encoding': 'base64'}}
+ 'dscfg': {'text': b64e(yaml.dump(cfg)),
+ 'encoding': 'base64'}}
data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
dsrc = self._get_ds(data)
@@ -267,7 +267,8 @@ class TestAzureDataSource(TestCase):
# should equal that after the '$'
pos = defuser['passwd'].rfind("$") + 1
self.assertEqual(defuser['passwd'],
- crypt.crypt(odata['UserPassword'], defuser['passwd'][0:pos]))
+ crypt.crypt(odata['UserPassword'],
+ defuser['passwd'][0:pos]))
def test_userdata_plain(self):
mydata = "FOOBAR"
@@ -364,8 +365,8 @@ class TestAzureDataSource(TestCase):
# Make sure that user can affect disk aliases
dscfg = {'disk_aliases': {'ephemeral0': '/dev/sdc'}}
odata = {'HostName': "myhost", 'UserName': "myuser",
- 'dscfg': {'text': b64e(yaml.dump(dscfg)),
- 'encoding': 'base64'}}
+ 'dscfg': {'text': b64e(yaml.dump(dscfg)),
+ 'encoding': 'base64'}}
usercfg = {'disk_setup': {'/dev/sdc': {'something': '...'},
'ephemeral0': False}}
userdata = '#cloud-config' + yaml.dump(usercfg) + "\n"
@@ -634,7 +635,7 @@ class TestReadAzureOvf(TestCase):
def test_invalid_xml_raises_non_azure_ds(self):
invalid_xml = "<foo>" + construct_valid_ovf_env(data={})
self.assertRaises(DataSourceAzure.BrokenAzureDataSource,
- DataSourceAzure.read_azure_ovf, invalid_xml)
+ DataSourceAzure.read_azure_ovf, invalid_xml)
def test_load_with_pubkeys(self):
mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}]
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
index 0638c974..1134199b 100644
--- a/tests/unittests/test_datasource/test_azure_helper.py
+++ b/tests/unittests/test_datasource/test_azure_helper.py
@@ -1,6 +1,4 @@
import os
-import struct
-import unittest
from cloudinit.sources.helpers import azure as azure_helper
from ..helpers import TestCase
@@ -75,48 +73,64 @@ class TestFindEndpoint(TestCase):
self.assertRaises(Exception,
azure_helper.WALinuxAgentShim.find_endpoint)
- def _build_lease_content(self, ip_address, use_hex=True):
- ip_address_repr = ':'.join(
- [hex(int(part)).replace('0x', '')
- for part in ip_address.split('.')])
- if not use_hex:
- ip_address_repr = struct.pack(
- '>L', int(ip_address_repr.replace(':', ''), 16))
- ip_address_repr = '"{0}"'.format(ip_address_repr.decode('utf-8'))
+ @staticmethod
+ def _build_lease_content(encoded_address):
return '\n'.join([
'lease {',
' interface "eth0";',
- ' option unknown-245 {0};'.format(ip_address_repr),
+ ' option unknown-245 {0};'.format(encoded_address),
'}'])
- def test_hex_string(self):
- ip_address = '98.76.54.32'
- file_content = self._build_lease_content(ip_address)
+ def test_latest_lease_used(self):
+ encoded_addresses = ['5:4:3:2', '4:3:2:1']
+ file_content = '\n'.join([self._build_lease_content(encoded_address)
+ for encoded_address in encoded_addresses])
self.load_file.return_value = file_content
- self.assertEqual(ip_address,
+ self.assertEqual(encoded_addresses[-1].replace(':', '.'),
azure_helper.WALinuxAgentShim.find_endpoint())
+
+class TestExtractIpAddressFromLeaseValue(TestCase):
+
+ def test_hex_string(self):
+ ip_address, encoded_address = '98.76.54.32', '62:4c:36:20'
+ self.assertEqual(
+ ip_address,
+ azure_helper.WALinuxAgentShim.get_ip_from_lease_value(
+ encoded_address
+ ))
+
def test_hex_string_with_single_character_part(self):
- ip_address = '4.3.2.1'
- file_content = self._build_lease_content(ip_address)
- self.load_file.return_value = file_content
- self.assertEqual(ip_address,
- azure_helper.WALinuxAgentShim.find_endpoint())
+ ip_address, encoded_address = '4.3.2.1', '4:3:2:1'
+ self.assertEqual(
+ ip_address,
+ azure_helper.WALinuxAgentShim.get_ip_from_lease_value(
+ encoded_address
+ ))
def test_packed_string(self):
- ip_address = '98.76.54.32'
- file_content = self._build_lease_content(ip_address, use_hex=False)
- self.load_file.return_value = file_content
- self.assertEqual(ip_address,
- azure_helper.WALinuxAgentShim.find_endpoint())
+ ip_address, encoded_address = '98.76.54.32', 'bL6 '
+ self.assertEqual(
+ ip_address,
+ azure_helper.WALinuxAgentShim.get_ip_from_lease_value(
+ encoded_address
+ ))
- def test_latest_lease_used(self):
- ip_addresses = ['4.3.2.1', '98.76.54.32']
- file_content = '\n'.join([self._build_lease_content(ip_address)
- for ip_address in ip_addresses])
- self.load_file.return_value = file_content
- self.assertEqual(ip_addresses[-1],
- azure_helper.WALinuxAgentShim.find_endpoint())
+ def test_packed_string_with_escaped_quote(self):
+ ip_address, encoded_address = '100.72.34.108', 'dH\\"l'
+ self.assertEqual(
+ ip_address,
+ azure_helper.WALinuxAgentShim.get_ip_from_lease_value(
+ encoded_address
+ ))
+
+ def test_packed_string_containing_a_colon(self):
+ ip_address, encoded_address = '100.72.58.108', 'dH:l'
+ self.assertEqual(
+ ip_address,
+ azure_helper.WALinuxAgentShim.get_ip_from_lease_value(
+ encoded_address
+ ))
class TestGoalStateParsing(TestCase):
diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py
index 83aca505..bfd787d1 100644
--- a/tests/unittests/test_datasource/test_configdrive.py
+++ b/tests/unittests/test_datasource/test_configdrive.py
@@ -61,16 +61,16 @@ CONTENT_0 = b'This is contents of /etc/foo.cfg\n'
CONTENT_1 = b'# this is /etc/bar/bar.cfg\n'
CFG_DRIVE_FILES_V2 = {
- 'ec2/2009-04-04/meta-data.json': json.dumps(EC2_META),
- 'ec2/2009-04-04/user-data': USER_DATA,
- 'ec2/latest/meta-data.json': json.dumps(EC2_META),
- 'ec2/latest/user-data': USER_DATA,
- 'openstack/2012-08-10/meta_data.json': json.dumps(OSTACK_META),
- 'openstack/2012-08-10/user_data': USER_DATA,
- 'openstack/content/0000': CONTENT_0,
- 'openstack/content/0001': CONTENT_1,
- 'openstack/latest/meta_data.json': json.dumps(OSTACK_META),
- 'openstack/latest/user_data': USER_DATA}
+ 'ec2/2009-04-04/meta-data.json': json.dumps(EC2_META),
+ 'ec2/2009-04-04/user-data': USER_DATA,
+ 'ec2/latest/meta-data.json': json.dumps(EC2_META),
+ 'ec2/latest/user-data': USER_DATA,
+ 'openstack/2012-08-10/meta_data.json': json.dumps(OSTACK_META),
+ 'openstack/2012-08-10/user_data': USER_DATA,
+ 'openstack/content/0000': CONTENT_0,
+ 'openstack/content/0001': CONTENT_1,
+ 'openstack/latest/meta_data.json': json.dumps(OSTACK_META),
+ 'openstack/latest/user_data': USER_DATA}
class TestConfigDriveDataSource(TestCase):
@@ -293,9 +293,8 @@ class TestConfigDriveDataSource(TestCase):
util.is_partition = my_is_partition
devs_with_answers = {"TYPE=vfat": [],
- "TYPE=iso9660": ["/dev/vdb"],
- "LABEL=config-2": ["/dev/vdb"],
- }
+ "TYPE=iso9660": ["/dev/vdb"],
+ "LABEL=config-2": ["/dev/vdb"]}
self.assertEqual(["/dev/vdb"], ds.find_candidate_devs())
# add a vfat item
@@ -306,9 +305,10 @@ class TestConfigDriveDataSource(TestCase):
# verify that partitions are considered, that have correct label.
devs_with_answers = {"TYPE=vfat": ["/dev/sda1"],
- "TYPE=iso9660": [], "LABEL=config-2": ["/dev/vdb3"]}
+ "TYPE=iso9660": [],
+ "LABEL=config-2": ["/dev/vdb3"]}
self.assertEqual(["/dev/vdb3"],
- ds.find_candidate_devs())
+ ds.find_candidate_devs())
finally:
util.find_devs_with = orig_find_devs_with
@@ -319,7 +319,7 @@ class TestConfigDriveDataSource(TestCase):
populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
myds = cfg_ds_from_dir(self.tmp)
self.assertEqual(myds.get_public_ssh_keys(),
- [OSTACK_META['public_keys']['mykey']])
+ [OSTACK_META['public_keys']['mykey']])
def cfg_ds_from_dir(seed_d):
diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py
index eb97b692..77d15cac 100644
--- a/tests/unittests/test_datasource/test_maas.py
+++ b/tests/unittests/test_datasource/test_maas.py
@@ -25,9 +25,9 @@ class TestMAASDataSource(TestCase):
"""Verify a valid seeddir is read as such."""
data = {'instance-id': 'i-valid01',
- 'local-hostname': 'valid01-hostname',
- 'user-data': b'valid01-userdata',
- 'public-keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname'}
+ 'local-hostname': 'valid01-hostname',
+ 'user-data': b'valid01-userdata',
+ 'public-keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname'}
my_d = os.path.join(self.tmp, "valid")
populate_dir(my_d, data)
@@ -45,8 +45,8 @@ class TestMAASDataSource(TestCase):
"""Verify extra files do not affect seed_dir validity."""
data = {'instance-id': 'i-valid-extra',
- 'local-hostname': 'valid-extra-hostname',
- 'user-data': b'valid-extra-userdata', 'foo': 'bar'}
+ 'local-hostname': 'valid-extra-hostname',
+ 'user-data': b'valid-extra-userdata', 'foo': 'bar'}
my_d = os.path.join(self.tmp, "valid_extra")
populate_dir(my_d, data)
@@ -64,7 +64,7 @@ class TestMAASDataSource(TestCase):
"""Verify that invalid seed_dir raises MAASSeedDirMalformed."""
valid = {'instance-id': 'i-instanceid',
- 'local-hostname': 'test-hostname', 'user-data': ''}
+ 'local-hostname': 'test-hostname', 'user-data': ''}
my_based = os.path.join(self.tmp, "valid_extra")
@@ -94,8 +94,8 @@ class TestMAASDataSource(TestCase):
def test_seed_dir_missing(self):
"""Verify that missing seed_dir raises MAASSeedDirNone."""
self.assertRaises(DataSourceMAAS.MAASSeedDirNone,
- DataSourceMAAS.read_maas_seed_dir,
- os.path.join(self.tmp, "nonexistantdirectory"))
+ DataSourceMAAS.read_maas_seed_dir,
+ os.path.join(self.tmp, "nonexistantdirectory"))
def test_seed_url_valid(self):
"""Verify that valid seed_url is read as such."""
diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py
index 27adf21b..d796f030 100644
--- a/tests/unittests/test_datasource/test_opennebula.py
+++ b/tests/unittests/test_datasource/test_opennebula.py
@@ -20,7 +20,7 @@ TEST_VARS = {
'VAR7': 'single\\t',
'VAR8': 'double\\tword',
'VAR9': 'multi\\t\nline\n',
- 'VAR10': '\\', # expect \
+ 'VAR10': '\\', # expect '\'
'VAR11': '\'', # expect '
'VAR12': '$', # expect $
}
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index adee9019..5c49966a 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -56,12 +56,13 @@ MOCK_RETURNS = {
'cloud-init:user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']),
'sdc:datacenter_name': 'somewhere2',
'sdc:operator-script': '\n'.join(['bin/true', '']),
+ 'sdc:uuid': str(uuid.uuid4()),
'sdc:vendor-data': '\n'.join(['VENDOR_DATA', '']),
'user-data': '\n'.join(['something', '']),
'user-script': '\n'.join(['/bin/true', '']),
}
-DMI_DATA_RETURN = (str(uuid.uuid4()), 'smartdc')
+DMI_DATA_RETURN = 'smartdc'
def get_mock_client(mockdata):
@@ -111,7 +112,8 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
ret = apply_patches(patches)
self.unapply += ret
- def _get_ds(self, sys_cfg=None, ds_cfg=None, mockdata=None, dmi_data=None):
+ def _get_ds(self, sys_cfg=None, ds_cfg=None, mockdata=None, dmi_data=None,
+ is_lxbrand=False):
mod = DataSourceSmartOS
if mockdata is None:
@@ -124,9 +126,13 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
return dmi_data
def _os_uname():
- # LP: #1243287. tests assume this runs, but running test on
- # arm would cause them all to fail.
- return ('LINUX', 'NODENAME', 'RELEASE', 'VERSION', 'x86_64')
+ if not is_lxbrand:
+ # LP: #1243287. tests assume this runs, but running test on
+ # arm would cause them all to fail.
+ return ('LINUX', 'NODENAME', 'RELEASE', 'VERSION', 'x86_64')
+ else:
+ return ('LINUX', 'NODENAME', 'RELEASE', 'BRANDZ VIRTUAL LINUX',
+ 'X86_64')
if sys_cfg is None:
sys_cfg = {}
@@ -136,7 +142,6 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
sys_cfg['datasource']['SmartOS'] = ds_cfg
self.apply_patches([(mod, 'LEGACY_USER_D', self.legacy_user_d)])
- self.apply_patches([(mod, 'get_serial', mock.MagicMock())])
self.apply_patches([
(mod, 'JoyentMetadataClient', get_mock_client(mockdata))])
self.apply_patches([(mod, 'dmi_data', _dmi_data)])
@@ -144,6 +149,7 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
self.apply_patches([(mod, 'device_exists', lambda d: True)])
dsrc = mod.DataSourceSmartOS(sys_cfg, distro=None,
paths=self.paths)
+ self.apply_patches([(dsrc, '_get_seed_file_object', mock.MagicMock())])
return dsrc
def test_seed(self):
@@ -151,14 +157,29 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
dsrc = self._get_ds()
ret = dsrc.get_data()
self.assertTrue(ret)
+ self.assertEquals('kvm', dsrc.smartos_type)
self.assertEquals('/dev/ttyS1', dsrc.seed)
+ def test_seed_lxbrand(self):
+ # default seed should be /dev/ttyS1
+ dsrc = self._get_ds(is_lxbrand=True)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEquals('lx-brand', dsrc.smartos_type)
+ self.assertEquals('/native/.zonecontrol/metadata.sock', dsrc.seed)
+
def test_issmartdc(self):
dsrc = self._get_ds()
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertTrue(dsrc.is_smartdc)
+ def test_issmartdc_lxbrand(self):
+ dsrc = self._get_ds(is_lxbrand=True)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertTrue(dsrc.is_smartdc)
+
def test_no_base64(self):
ds_cfg = {'no_base64_decode': ['test_var1'], 'all_base': True}
dsrc = self._get_ds(ds_cfg=ds_cfg)
@@ -169,7 +190,8 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
- self.assertEquals(DMI_DATA_RETURN[0], dsrc.metadata['instance-id'])
+ self.assertEquals(MOCK_RETURNS['sdc:uuid'],
+ dsrc.metadata['instance-id'])
def test_root_keys(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
@@ -407,18 +429,6 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
self.assertEqual(dsrc.device_name_to_device('FOO'),
mydscfg['disk_aliases']['FOO'])
- @mock.patch('cloudinit.sources.DataSourceSmartOS.JoyentMetadataClient')
- @mock.patch('cloudinit.sources.DataSourceSmartOS.get_serial')
- def test_serial_console_closed_on_error(self, get_serial, metadata_client):
- class OurException(Exception):
- pass
- metadata_client.side_effect = OurException
- try:
- DataSourceSmartOS.query_data('noun', 'device', 0)
- except OurException:
- pass
- self.assertEqual(1, get_serial.return_value.close.call_count)
-
def apply_patches(patches):
ret = []
@@ -447,14 +457,25 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase):
}
def make_response():
- payload = ''
- if self.response_parts['payload']:
- payload = ' {0}'.format(self.response_parts['payload'])
- del self.response_parts['payload']
- return (
- 'V2 {length} {crc} {request_id} {command}{payload}\n'.format(
- payload=payload, **self.response_parts).encode('ascii'))
- self.serial.readline.side_effect = make_response
+ payloadstr = ''
+ if 'payload' in self.response_parts:
+ payloadstr = ' {0}'.format(self.response_parts['payload'])
+ return ('V2 {length} {crc} {request_id} '
+ '{command}{payloadstr}\n'.format(
+ payloadstr=payloadstr,
+ **self.response_parts).encode('ascii'))
+
+ self.metasource_data = None
+
+ def read_response(length):
+ if not self.metasource_data:
+ self.metasource_data = make_response()
+ self.metasource_data_len = len(self.metasource_data)
+ resp = self.metasource_data[:length]
+ self.metasource_data = self.metasource_data[length:]
+ return resp
+
+ self.serial.read.side_effect = read_response
self.patched_funcs.enter_context(
mock.patch('cloudinit.sources.DataSourceSmartOS.random.randint',
mock.Mock(return_value=self.request_id)))
@@ -477,7 +498,9 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase):
client.get_metadata('some_key')
self.assertEqual(1, self.serial.write.call_count)
written_line = self.serial.write.call_args[0][0]
- self.assertEndsWith(written_line, b'\n')
+ print(type(written_line))
+ self.assertEndsWith(written_line.decode('ascii'),
+ b'\n'.decode('ascii'))
self.assertEqual(1, written_line.count(b'\n'))
def _get_written_line(self, key='some_key'):
@@ -489,7 +512,8 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase):
self.assertIsInstance(self._get_written_line(), six.binary_type)
def test_get_metadata_line_starts_with_v2(self):
- self.assertStartsWith(self._get_written_line(), b'V2')
+ foo = self._get_written_line()
+ self.assertStartsWith(foo.decode('ascii'), b'V2'.decode('ascii'))
def test_get_metadata_uses_get_command(self):
parts = self._get_written_line().decode('ascii').strip().split(' ')
@@ -526,7 +550,7 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase):
def test_get_metadata_reads_a_line(self):
client = self._get_client()
client.get_metadata('some_key')
- self.assertEqual(1, self.serial.readline.call_count)
+ self.assertEqual(self.metasource_data_len, self.serial.read.call_count)
def test_get_metadata_returns_valid_value(self):
client = self._get_client()
diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py
index e4488e2a..4525f487 100644
--- a/tests/unittests/test_distros/test_user_data_normalize.py
+++ b/tests/unittests/test_distros/test_user_data_normalize.py
@@ -6,13 +6,13 @@ from ..helpers import TestCase
bcfg = {
- 'name': 'bob',
- 'plain_text_passwd': 'ubuntu',
- 'home': "/home/ubuntu",
- 'shell': "/bin/bash",
- 'lock_passwd': True,
- 'gecos': "Ubuntu",
- 'groups': ["foo"]
+ 'name': 'bob',
+ 'plain_text_passwd': 'ubuntu',
+ 'home': "/home/ubuntu",
+ 'shell': "/bin/bash",
+ 'lock_passwd': True,
+ 'gecos': "Ubuntu",
+ 'groups': ["foo"]
}
@@ -34,16 +34,11 @@ class TestUGNormalize(TestCase):
def test_group_dict(self):
distro = self._make_distro('ubuntu')
g = {'groups': [
- {
- 'ubuntu': ['foo', 'bar'],
- 'bob': 'users',
- },
- 'cloud-users',
- {
- 'bob': 'users2',
- },
- ]
- }
+ {'ubuntu': ['foo', 'bar'],
+ 'bob': 'users'},
+ 'cloud-users',
+ {'bob': 'users2'}
+ ]}
(_users, groups) = self._norm(g, distro)
self.assertIn('ubuntu', groups)
ub_members = groups['ubuntu']
diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py
new file mode 100644
index 00000000..7ffa2a53
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_lxd.py
@@ -0,0 +1,75 @@
+from cloudinit.config import cc_lxd
+from cloudinit import (distros, helpers, cloud)
+from cloudinit.sources import DataSourceNoCloud
+from .. import helpers as t_help
+
+import logging
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+LOG = logging.getLogger(__name__)
+
+
+class TestLxd(t_help.TestCase):
+ lxd_cfg = {
+ 'lxd': {
+ 'init': {
+ 'network_address': '0.0.0.0',
+ 'storage_backend': 'zfs',
+ 'storage_pool': 'poolname',
+ }
+ }
+ }
+
+ def setUp(self):
+ super(TestLxd, self).setUp()
+
+ def _get_cloud(self, distro):
+ cls = distros.fetch(distro)
+ paths = helpers.Paths({})
+ d = cls(distro, {}, paths)
+ ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths)
+ cc = cloud.Cloud(ds, paths, {}, d, None)
+ return cc
+
+ @mock.patch("cloudinit.config.cc_lxd.util")
+ def test_lxd_init(self, mock_util):
+ cc = self._get_cloud('ubuntu')
+ mock_util.which.return_value = True
+ cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, LOG, [])
+ self.assertTrue(mock_util.which.called)
+ init_call = mock_util.subp.call_args_list[0][0][0]
+ self.assertEquals(init_call,
+ ['lxd', 'init', '--auto',
+ '--network-address=0.0.0.0',
+ '--storage-backend=zfs',
+ '--storage-pool=poolname'])
+
+ @mock.patch("cloudinit.config.cc_lxd.util")
+ def test_lxd_install(self, mock_util):
+ cc = self._get_cloud('ubuntu')
+ cc.distro = mock.MagicMock()
+ mock_util.which.return_value = None
+ cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, LOG, [])
+ self.assertTrue(cc.distro.install_packages.called)
+ install_pkg = cc.distro.install_packages.call_args_list[0][0][0]
+ self.assertEquals(sorted(install_pkg), ['lxd', 'zfs'])
+
+ @mock.patch("cloudinit.config.cc_lxd.util")
+ def test_no_init_does_nothing(self, mock_util):
+ cc = self._get_cloud('ubuntu')
+ cc.distro = mock.MagicMock()
+ cc_lxd.handle('cc_lxd', {'lxd': {}}, cc, LOG, [])
+ self.assertFalse(cc.distro.install_packages.called)
+ self.assertFalse(mock_util.subp.called)
+
+ @mock.patch("cloudinit.config.cc_lxd.util")
+ def test_no_lxd_does_nothing(self, mock_util):
+ cc = self._get_cloud('ubuntu')
+ cc.distro = mock.MagicMock()
+ cc_lxd.handle('cc_lxd', {'package_update': True}, cc, LOG, [])
+ self.assertFalse(cc.distro.install_packages.called)
+ self.assertFalse(mock_util.subp.called)
diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/test_handler/test_handler_power_state.py
index 5687b10d..04ce5687 100644
--- a/tests/unittests/test_handler/test_handler_power_state.py
+++ b/tests/unittests/test_handler/test_handler_power_state.py
@@ -74,7 +74,7 @@ class TestLoadPowerState(t_help.TestCase):
class TestCheckCondition(t_help.TestCase):
def cmd_with_exit(self, rc):
return([sys.executable, '-c', 'import sys; sys.exit(%s)' % rc])
-
+
def test_true_is_true(self):
self.assertEqual(psc.check_condition(True), True)
@@ -94,7 +94,6 @@ class TestCheckCondition(t_help.TestCase):
self.assertEqual(mocklog.warn.call_count, 1)
-
def check_lps_ret(psc_return, mode=None):
if len(psc_return) != 3:
raise TypeError("length returned = %d" % len(psc_return))
@@ -107,7 +106,7 @@ def check_lps_ret(psc_return, mode=None):
if 'shutdown' not in psc_return[0][0]:
errs.append("string 'shutdown' not in cmd")
- if 'condition' is None:
+ if condition is None:
errs.append("condition was not returned")
if mode is not None:
diff --git a/tests/unittests/test_handler/test_handler_seed_random.py b/tests/unittests/test_handler/test_handler_seed_random.py
index 0bcdcb31..34d11f21 100644
--- a/tests/unittests/test_handler/test_handler_seed_random.py
+++ b/tests/unittests/test_handler/test_handler_seed_random.py
@@ -190,7 +190,8 @@ class TestRandomSeed(t_help.TestCase):
c = self._get_cloud('ubuntu', {})
self.whichdata = {}
self.assertRaises(ValueError, cc_seed_random.handle,
- 'test', {'random_seed': {'command_required': True}}, c, LOG, [])
+ 'test', {'random_seed': {'command_required': True}},
+ c, LOG, [])
def test_seed_command_and_required(self):
c = self._get_cloud('ubuntu', {})
diff --git a/tests/unittests/test_handler/test_handler_snappy.py b/tests/unittests/test_handler/test_handler_snappy.py
index eceb14d9..8aeff53c 100644
--- a/tests/unittests/test_handler/test_handler_snappy.py
+++ b/tests/unittests/test_handler/test_handler_snappy.py
@@ -125,8 +125,7 @@ class TestInstallPackages(t_help.TestCase):
"pkg1.smoser.config": "pkg1.smoser.config-data",
"pkg1.config": "pkg1.config-data",
"pkg2.smoser_0.0_amd64.snap": "pkg2-snapdata",
- "pkg2.smoser_0.0_amd64.config": "pkg2.config",
- })
+ "pkg2.smoser_0.0_amd64.config": "pkg2.config"})
ret = get_package_ops(
packages=[], configs={}, installed=[], fspath=self.tmp)
diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py
index 3b317121..9aeb1cde 100644
--- a/tests/unittests/test_sshutil.py
+++ b/tests/unittests/test_sshutil.py
@@ -32,7 +32,8 @@ VALID_CONTENT = {
),
}
-TEST_OPTIONS = ("no-port-forwarding,no-agent-forwarding,no-X11-forwarding,"
+TEST_OPTIONS = (
+ "no-port-forwarding,no-agent-forwarding,no-X11-forwarding,"
'command="echo \'Please login as the user \"ubuntu\" rather than the'
'user \"root\".\';echo;sleep 10"')
diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py
index 0c19a2c2..b9863650 100644
--- a/tests/unittests/test_templating.py
+++ b/tests/unittests/test_templating.py
@@ -114,5 +114,6 @@ $a,$b'''
codename)
out_data = templater.basic_render(in_data,
- {'mirror': mirror, 'codename': codename})
+ {'mirror': mirror,
+ 'codename': codename})
self.assertEqual(ex_data, out_data)
diff --git a/tools/hacking.py b/tools/hacking.py
index 3175df38..1a0631c2 100755
--- a/tools/hacking.py
+++ b/tools/hacking.py
@@ -47,10 +47,10 @@ def import_normalize(line):
# handle "from x import y as z" to "import x.y as z"
split_line = line.split()
if (line.startswith("from ") and "," not in line and
- split_line[2] == "import" and split_line[3] != "*" and
- split_line[1] != "__future__" and
- (len(split_line) == 4 or
- (len(split_line) == 6 and split_line[4] == "as"))):
+ split_line[2] == "import" and split_line[3] != "*" and
+ split_line[1] != "__future__" and
+ (len(split_line) == 4 or
+ (len(split_line) == 6 and split_line[4] == "as"))):
return "import %s.%s" % (split_line[1], split_line[3])
else:
return line
@@ -74,7 +74,7 @@ def cloud_import_alphabetical(physical_line, line_number, lines):
split_line[0] == "import" and split_previous[0] == "import"):
if split_line[1] < split_previous[1]:
return (0, "N306: imports not in alphabetical order (%s, %s)"
- % (split_previous[1], split_line[1]))
+ % (split_previous[1], split_line[1]))
def cloud_docstring_start_space(physical_line):
@@ -87,8 +87,8 @@ def cloud_docstring_start_space(physical_line):
pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
if (pos != -1 and len(physical_line) > pos + 1):
if (physical_line[pos + 3] == ' '):
- return (pos, "N401: one line docstring should not start with"
- " a space")
+ return (pos,
+ "N401: one line docstring should not start with a space")
def cloud_todo_format(physical_line):
@@ -167,4 +167,4 @@ if __name__ == "__main__":
finally:
if len(_missingImport) > 0:
print >> sys.stderr, ("%i imports missing in this test environment"
- % len(_missingImport))
+ % len(_missingImport))
diff --git a/tools/mock-meta.py b/tools/mock-meta.py
index dfbc2a71..1c746f17 100755
--- a/tools/mock-meta.py
+++ b/tools/mock-meta.py
@@ -126,11 +126,11 @@ class WebException(Exception):
def yamlify(data):
formatted = yaml.dump(data,
- line_break="\n",
- indent=4,
- explicit_start=True,
- explicit_end=True,
- default_flow_style=False)
+ line_break="\n",
+ indent=4,
+ explicit_start=True,
+ explicit_end=True,
+ default_flow_style=False)
return formatted
@@ -282,7 +282,7 @@ class MetaDataHandler(object):
else:
log.warn(("Did not implement action %s, "
"returning empty response: %r"),
- action, NOT_IMPL_RESPONSE)
+ action, NOT_IMPL_RESPONSE)
return NOT_IMPL_RESPONSE
@@ -404,14 +404,17 @@ def setup_logging(log_level, fmt='%(levelname)s: @%(name)s : %(message)s'):
def extract_opts():
parser = OptionParser()
parser.add_option("-p", "--port", dest="port", action="store", type=int,
- default=80, metavar="PORT",
- help="port from which to serve traffic (default: %default)")
+ default=80, metavar="PORT",
+ help=("port from which to serve traffic"
+ " (default: %default)"))
parser.add_option("-a", "--addr", dest="address", action="store", type=str,
- default='0.0.0.0', metavar="ADDRESS",
- help="address from which to serve traffic (default: %default)")
+ default='0.0.0.0', metavar="ADDRESS",
+ help=("address from which to serve traffic"
+ " (default: %default)"))
parser.add_option("-f", '--user-data-file', dest='user_data_file',
- action='store', metavar='FILE',
- help="user data filename to serve back to incoming requests")
+ action='store', metavar='FILE',
+ help=("user data filename to serve back to"
+ "incoming requests"))
(options, args) = parser.parse_args()
out = dict()
out['extra'] = args
diff --git a/tools/run-pep8 b/tools/run-pep8
index ccd6be5a..086400fc 100755
--- a/tools/run-pep8
+++ b/tools/run-pep8
@@ -1,39 +1,22 @@
#!/bin/bash
-if [ $# -eq 0 ]; then
- files=( bin/cloud-init $(find * -name "*.py" -type f) )
+pycheck_dirs=( "cloudinit/" "bin/" "tests/" "tools/" )
+# FIXME: cloud-init modifies sys module path, pep8 does not like
+# bin_files=( "bin/cloud-init" )
+CR="
+"
+[ "$1" = "-v" ] && { verbose="$1"; shift; } || verbose=""
+
+set -f
+if [ $# -eq 0 ]; then unset IFS
+ IFS="$CR"
+ files=( "${bin_files[@]}" "${pycheck_dirs[@]}" )
+ unset IFS
else
- files=( "$@" );
+ files=( "$@" )
fi
-if [ -f 'hacking.py' ]
-then
- base=`pwd`
-else
- base=`pwd`/tools/
-fi
-
-IGNORE=""
-
-# King Arthur: Be quiet! ... Be Quiet! I Order You to Be Quiet.
-IGNORE="$IGNORE,E121" # Continuation line indentation is not a multiple of four
-IGNORE="$IGNORE,E123" # Closing bracket does not match indentation of opening bracket's line
-IGNORE="$IGNORE,E124" # Closing bracket missing visual indentation
-IGNORE="$IGNORE,E125" # Continuation line does not distinguish itself from next logical line
-IGNORE="$IGNORE,E126" # Continuation line over-indented for hanging indent
-IGNORE="$IGNORE,E127" # Continuation line over-indented for visual indent
-IGNORE="$IGNORE,E128" # Continuation line under-indented for visual indent
-IGNORE="$IGNORE,E502" # The backslash is redundant between brackets
-IGNORE="${IGNORE#,}" # remove the leading ',' added above
-
-cmd=(
- ${base}/hacking.py
-
- --ignore="$IGNORE"
-
- "${files[@]}"
-)
-
-echo -e "\nRunning 'cloudinit' pep8:"
-echo "${cmd[@]}"
-"${cmd[@]}"
+myname=${0##*/}
+cmd=( "${myname#run-}" $verbose "${files[@]}" )
+echo "Running: " "${cmd[@]}" 1>&2
+exec "${cmd[@]}"
diff --git a/tools/run-pyflakes b/tools/run-pyflakes
new file mode 100755
index 00000000..4bea17f4
--- /dev/null
+++ b/tools/run-pyflakes
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+PYTHON_VERSION=${PYTHON_VERSION:-2}
+CR="
+"
+pycheck_dirs=( "cloudinit/" "bin/" "tests/" "tools/" )
+
+set -f
+if [ $# -eq 0 ]; then
+ files=( "${pycheck_dirs[@]}" )
+else
+ files=( "$@" )
+fi
+
+cmd=( "python${PYTHON_VERSION}" -m "pyflakes" "${files[@]}" )
+
+echo "Running: " "${cmd[@]}" 1>&2
+exec "${cmd[@]}"
diff --git a/tools/run-pyflakes3 b/tools/run-pyflakes3
new file mode 100755
index 00000000..e9f0863d
--- /dev/null
+++ b/tools/run-pyflakes3
@@ -0,0 +1,2 @@
+#!/bin/sh
+PYTHON_VERSION=3 exec "${0%/*}/run-pyflakes" "$@"
diff --git a/tools/validate-yaml.py b/tools/validate-yaml.py
index 6e164590..ed9037d9 100755
--- a/tools/validate-yaml.py
+++ b/tools/validate-yaml.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
"""Try to read a YAML file and report any errors.
"""
diff --git a/tox.ini b/tox.ini
index 3619edf4..bd7c27dd 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,16 +1,19 @@
[tox]
-envlist = py26,py27,py34
+envlist = py27,py3,pyflakes
recreate = True
[testenv]
commands = python -m nose {posargs:tests}
-deps =
- contextlib2
- httpretty>=0.7.1
- mock
- nose
- pep8==1.5.7
- pyflakes
+deps = -r{toxinidir}/test-requirements.txt
+ -r{toxinidir}/requirements.txt
+
+[testenv:py3]
+basepython = python3
+
+[testenv:pyflakes]
+basepython = python3
+commands = {envpython} -m pyflakes {posargs:cloudinit/ tests/ tools/}
+ {envpython} -m pep8 {posargs:cloudinit/ tests/ tools/}
# https://github.com/gabrielfalcao/HTTPretty/issues/223
setenv =