summaryrefslogtreecommitdiff
path: root/cloudinit
diff options
context:
space:
mode:
authorChad Smith <chad.smith@canonical.com>2018-06-21 14:32:27 -0600
committerChad Smith <chad.smith@canonical.com>2018-06-21 14:32:27 -0600
commitba53ceb5a8a30c10951ec3ac49b8d6ebbe09a524 (patch)
tree5b68f0602daea648d48b3dc16809b0d66fb565d1 /cloudinit
parent7d1e8976bba629f30da45e814a5a97e2f4b7de3d (diff)
parent2d6e4219db73e80c135efd83753f9302f778f08d (diff)
downloadvyos-cloud-init-ba53ceb5a8a30c10951ec3ac49b8d6ebbe09a524.tar.gz
vyos-cloud-init-ba53ceb5a8a30c10951ec3ac49b8d6ebbe09a524.zip
merge from master at 18.3
Diffstat (limited to 'cloudinit')
-rw-r--r--cloudinit/analyze/__main__.py2
-rw-r--r--cloudinit/analyze/dump.py2
-rw-r--r--cloudinit/apport.py27
-rw-r--r--cloudinit/cmd/devel/logs.py59
-rw-r--r--cloudinit/cmd/devel/tests/test_logs.py21
-rw-r--r--cloudinit/cmd/main.py2
-rw-r--r--cloudinit/cmd/tests/test_main.py6
-rw-r--r--cloudinit/config/cc_apt_configure.py4
-rw-r--r--cloudinit/config/cc_bootcmd.py1
-rw-r--r--cloudinit/config/cc_disable_ec2_metadata.py14
-rw-r--r--cloudinit/config/cc_disk_setup.py12
-rw-r--r--cloudinit/config/cc_emit_upstart.py2
-rw-r--r--cloudinit/config/cc_lxd.py64
-rw-r--r--cloudinit/config/cc_mounts.py75
-rw-r--r--cloudinit/config/cc_ntp.py485
-rw-r--r--cloudinit/config/cc_phone_home.py7
-rw-r--r--cloudinit/config/cc_power_state_change.py2
-rw-r--r--cloudinit/config/cc_resizefs.py10
-rw-r--r--cloudinit/config/cc_rh_subscription.py18
-rw-r--r--cloudinit/config/cc_rsyslog.py4
-rw-r--r--cloudinit/config/cc_runcmd.py1
-rwxr-xr-xcloudinit/config/cc_set_passwords.py105
-rw-r--r--cloudinit/config/cc_snap.py5
-rw-r--r--cloudinit/config/cc_snappy.py4
-rw-r--r--cloudinit/config/cc_ubuntu_advantage.py5
-rw-r--r--cloudinit/config/cc_users_groups.py8
-rw-r--r--cloudinit/config/schema.py68
-rw-r--r--cloudinit/config/tests/test_disable_ec2_metadata.py50
-rw-r--r--cloudinit/config/tests/test_set_passwords.py71
-rw-r--r--cloudinit/config/tests/test_snap.py29
-rw-r--r--cloudinit/config/tests/test_ubuntu_advantage.py30
-rwxr-xr-xcloudinit/distros/__init__.py14
-rw-r--r--cloudinit/distros/freebsd.py10
-rw-r--r--cloudinit/distros/opensuse.py24
-rw-r--r--cloudinit/distros/ubuntu.py19
-rw-r--r--cloudinit/ec2_utils.py14
-rw-r--r--cloudinit/handlers/upstart_job.py2
-rw-r--r--cloudinit/net/__init__.py36
-rwxr-xr-xcloudinit/net/cmdline.py2
-rw-r--r--cloudinit/net/dhcp.py2
-rw-r--r--cloudinit/net/eni.py20
-rw-r--r--cloudinit/net/netplan.py22
-rw-r--r--cloudinit/net/network_state.py11
-rw-r--r--cloudinit/net/sysconfig.py10
-rw-r--r--cloudinit/net/tests/test_init.py1
-rw-r--r--cloudinit/netinfo.py379
-rw-r--r--cloudinit/reporting/events.py2
-rw-r--r--cloudinit/sources/DataSourceAliYun.py2
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py21
-rw-r--r--cloudinit/sources/DataSourceAzure.py117
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py31
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py15
-rw-r--r--cloudinit/sources/DataSourceEc2.py48
-rw-r--r--cloudinit/sources/DataSourceIBMCloud.py106
-rw-r--r--cloudinit/sources/DataSourceMAAS.py4
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py4
-rw-r--r--cloudinit/sources/DataSourceOVF.py2
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py2
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py182
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py196
-rw-r--r--cloudinit/sources/__init__.py76
-rw-r--r--cloudinit/sources/helpers/azure.py5
-rw-r--r--cloudinit/sources/helpers/digitalocean.py7
-rw-r--r--cloudinit/sources/helpers/openstack.py2
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py2
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_passwd.py4
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_util.py4
-rw-r--r--cloudinit/sources/tests/test_init.py91
-rw-r--r--cloudinit/ssh_util.py70
-rw-r--r--cloudinit/stages.py26
-rw-r--r--cloudinit/templater.py12
-rw-r--r--cloudinit/tests/helpers.py86
-rw-r--r--cloudinit/tests/test_netinfo.py233
-rw-r--r--cloudinit/tests/test_url_helper.py28
-rw-r--r--cloudinit/tests/test_util.py129
-rw-r--r--cloudinit/tests/test_version.py31
-rw-r--r--cloudinit/url_helper.py31
-rw-r--r--cloudinit/user_data.py28
-rw-r--r--cloudinit/util.py239
-rw-r--r--cloudinit/version.py6
80 files changed, 2754 insertions, 847 deletions
diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py
index 3ba5903f..f8613656 100644
--- a/cloudinit/analyze/__main__.py
+++ b/cloudinit/analyze/__main__.py
@@ -69,7 +69,7 @@ def analyze_blame(name, args):
"""
(infh, outfh) = configure_io(args)
blame_format = ' %ds (%n)'
- r = re.compile('(^\s+\d+\.\d+)', re.MULTILINE)
+ r = re.compile(r'(^\s+\d+\.\d+)', re.MULTILINE)
for idx, record in enumerate(show.show_events(_get_events(infh),
blame_format)):
srecs = sorted(filter(r.match, record), reverse=True)
diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py
index b071aa19..1f3060d0 100644
--- a/cloudinit/analyze/dump.py
+++ b/cloudinit/analyze/dump.py
@@ -112,7 +112,7 @@ def parse_ci_logline(line):
return None
event_description = stage_to_description[event_name]
else:
- (pymodloglvl, event_type, event_name) = eventstr.split()[0:3]
+ (_pymodloglvl, event_type, event_name) = eventstr.split()[0:3]
event_description = eventstr.split(event_name)[1].strip()
event = {
diff --git a/cloudinit/apport.py b/cloudinit/apport.py
index 618b0160..130ff269 100644
--- a/cloudinit/apport.py
+++ b/cloudinit/apport.py
@@ -13,10 +13,29 @@ except ImportError:
KNOWN_CLOUD_NAMES = [
- 'Amazon - Ec2', 'AliYun', 'AltCloud', 'Azure', 'Bigstep', 'CloudSigma',
- 'CloudStack', 'DigitalOcean', 'GCE - Google Compute Engine',
- 'Hetzner Cloud', 'MAAS', 'NoCloud', 'OpenNebula', 'OpenStack', 'OVF',
- 'Scaleway', 'SmartOS', 'VMware', 'Other']
+ 'AliYun',
+ 'AltCloud',
+ 'Amazon - Ec2',
+ 'Azure',
+ 'Bigstep',
+ 'Brightbox',
+ 'CloudSigma',
+ 'CloudStack',
+ 'DigitalOcean',
+ 'GCE - Google Compute Engine',
+ 'Hetzner Cloud',
+ 'IBM - (aka SoftLayer or BlueMix)',
+ 'LXD',
+ 'MAAS',
+ 'NoCloud',
+ 'OpenNebula',
+ 'OpenStack',
+ 'OVF',
+ 'OpenTelekomCloud',
+ 'Scaleway',
+ 'SmartOS',
+ 'VMware',
+ 'Other']
# Potentially clear text collected logs
CLOUDINIT_LOG = '/var/log/cloud-init.log'
diff --git a/cloudinit/cmd/devel/logs.py b/cloudinit/cmd/devel/logs.py
index 35ca478f..df725204 100644
--- a/cloudinit/cmd/devel/logs.py
+++ b/cloudinit/cmd/devel/logs.py
@@ -11,6 +11,7 @@ from cloudinit.temp_utils import tempdir
from datetime import datetime
import os
import shutil
+import sys
CLOUDINIT_LOGS = ['/var/log/cloud-init.log', '/var/log/cloud-init-output.log']
@@ -31,6 +32,8 @@ def get_parser(parser=None):
parser = argparse.ArgumentParser(
prog='collect-logs',
description='Collect and tar all cloud-init debug info')
+ parser.add_argument('--verbose', '-v', action='count', default=0,
+ dest='verbosity', help="Be more verbose.")
parser.add_argument(
"--tarfile", '-t', default='cloud-init.tar.gz',
help=('The tarfile to create containing all collected logs.'
@@ -43,17 +46,33 @@ def get_parser(parser=None):
return parser
-def _write_command_output_to_file(cmd, filename):
+def _write_command_output_to_file(cmd, filename, msg, verbosity):
"""Helper which runs a command and writes output or error to filename."""
try:
out, _ = subp(cmd)
except ProcessExecutionError as e:
write_file(filename, str(e))
+ _debug("collecting %s failed.\n" % msg, 1, verbosity)
else:
write_file(filename, out)
+ _debug("collected %s\n" % msg, 1, verbosity)
+ return out
-def collect_logs(tarfile, include_userdata):
+def _debug(msg, level, verbosity):
+ if level <= verbosity:
+ sys.stderr.write(msg)
+
+
+def _collect_file(path, out_dir, verbosity):
+ if os.path.isfile(path):
+ copy(path, out_dir)
+ _debug("collected file: %s\n" % path, 1, verbosity)
+ else:
+ _debug("file %s did not exist\n" % path, 2, verbosity)
+
+
+def collect_logs(tarfile, include_userdata, verbosity=0):
"""Collect all cloud-init logs and tar them up into the provided tarfile.
@param tarfile: The path of the tar-gzipped file to create.
@@ -64,28 +83,46 @@ def collect_logs(tarfile, include_userdata):
log_dir = 'cloud-init-logs-{0}'.format(date)
with tempdir(dir='/tmp') as tmp_dir:
log_dir = os.path.join(tmp_dir, log_dir)
- _write_command_output_to_file(
+ version = _write_command_output_to_file(
+ ['cloud-init', '--version'],
+ os.path.join(log_dir, 'version'),
+ "cloud-init --version", verbosity)
+ dpkg_ver = _write_command_output_to_file(
['dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'],
- os.path.join(log_dir, 'version'))
+ os.path.join(log_dir, 'dpkg-version'),
+ "dpkg version", verbosity)
+ if not version:
+ version = dpkg_ver if dpkg_ver else "not-available"
+ _debug("collected cloud-init version: %s\n" % version, 1, verbosity)
_write_command_output_to_file(
- ['dmesg'], os.path.join(log_dir, 'dmesg.txt'))
+ ['dmesg'], os.path.join(log_dir, 'dmesg.txt'),
+ "dmesg output", verbosity)
_write_command_output_to_file(
- ['journalctl', '-o', 'short-precise'],
- os.path.join(log_dir, 'journal.txt'))
+ ['journalctl', '--boot=0', '-o', 'short-precise'],
+ os.path.join(log_dir, 'journal.txt'),
+ "systemd journal of current boot", verbosity)
+
for log in CLOUDINIT_LOGS:
- copy(log, log_dir)
+ _collect_file(log, log_dir, verbosity)
if include_userdata:
- copy(USER_DATA_FILE, log_dir)
+ _collect_file(USER_DATA_FILE, log_dir, verbosity)
run_dir = os.path.join(log_dir, 'run')
ensure_dir(run_dir)
- shutil.copytree(CLOUDINIT_RUN_DIR, os.path.join(run_dir, 'cloud-init'))
+ if os.path.exists(CLOUDINIT_RUN_DIR):
+ shutil.copytree(CLOUDINIT_RUN_DIR,
+ os.path.join(run_dir, 'cloud-init'))
+ _debug("collected dir %s\n" % CLOUDINIT_RUN_DIR, 1, verbosity)
+ else:
+ _debug("directory '%s' did not exist\n" % CLOUDINIT_RUN_DIR, 1,
+ verbosity)
with chdir(tmp_dir):
subp(['tar', 'czvf', tarfile, log_dir.replace(tmp_dir + '/', '')])
+ sys.stderr.write("Wrote %s\n" % tarfile)
def handle_collect_logs_args(name, args):
"""Handle calls to 'cloud-init collect-logs' as a subcommand."""
- collect_logs(args.tarfile, args.userdata)
+ collect_logs(args.tarfile, args.userdata, args.verbosity)
def main():
diff --git a/cloudinit/cmd/devel/tests/test_logs.py b/cloudinit/cmd/devel/tests/test_logs.py
index dc4947cc..98b47560 100644
--- a/cloudinit/cmd/devel/tests/test_logs.py
+++ b/cloudinit/cmd/devel/tests/test_logs.py
@@ -4,6 +4,7 @@ from cloudinit.cmd.devel import logs
from cloudinit.util import ensure_dir, load_file, subp, write_file
from cloudinit.tests.helpers import FilesystemMockingTestCase, wrap_and_call
from datetime import datetime
+import mock
import os
@@ -27,11 +28,13 @@ class TestCollectLogs(FilesystemMockingTestCase):
date = datetime.utcnow().date().strftime('%Y-%m-%d')
date_logdir = 'cloud-init-logs-{0}'.format(date)
+ version_out = '/usr/bin/cloud-init 18.2fake\n'
expected_subp = {
('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'):
'0.7fake\n',
+ ('cloud-init', '--version'): version_out,
('dmesg',): 'dmesg-out\n',
- ('journalctl', '-o', 'short-precise'): 'journal-out\n',
+ ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n',
('tar', 'czvf', output_tarfile, date_logdir): ''
}
@@ -44,9 +47,12 @@ class TestCollectLogs(FilesystemMockingTestCase):
subp(cmd) # Pass through tar cmd so we can check output
return expected_subp[cmd_tuple], ''
+ fake_stderr = mock.MagicMock()
+
wrap_and_call(
'cloudinit.cmd.devel.logs',
{'subp': {'side_effect': fake_subp},
+ 'sys.stderr': {'new': fake_stderr},
'CLOUDINIT_LOGS': {'new': [log1, log2]},
'CLOUDINIT_RUN_DIR': {'new': self.run_dir}},
logs.collect_logs, output_tarfile, include_userdata=False)
@@ -55,7 +61,9 @@ class TestCollectLogs(FilesystemMockingTestCase):
out_logdir = self.tmp_path(date_logdir, self.new_root)
self.assertEqual(
'0.7fake\n',
- load_file(os.path.join(out_logdir, 'version')))
+ load_file(os.path.join(out_logdir, 'dpkg-version')))
+ self.assertEqual(version_out,
+ load_file(os.path.join(out_logdir, 'version')))
self.assertEqual(
'cloud-init-log',
load_file(os.path.join(out_logdir, 'cloud-init.log')))
@@ -72,6 +80,7 @@ class TestCollectLogs(FilesystemMockingTestCase):
'results',
load_file(
os.path.join(out_logdir, 'run', 'cloud-init', 'results.json')))
+ fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile)
def test_collect_logs_includes_optional_userdata(self):
"""collect-logs include userdata when --include-userdata is set."""
@@ -88,11 +97,13 @@ class TestCollectLogs(FilesystemMockingTestCase):
date = datetime.utcnow().date().strftime('%Y-%m-%d')
date_logdir = 'cloud-init-logs-{0}'.format(date)
+ version_out = '/usr/bin/cloud-init 18.2fake\n'
expected_subp = {
('dpkg-query', '--show', "-f=${Version}\n", 'cloud-init'):
'0.7fake',
+ ('cloud-init', '--version'): version_out,
('dmesg',): 'dmesg-out\n',
- ('journalctl', '-o', 'short-precise'): 'journal-out\n',
+ ('journalctl', '--boot=0', '-o', 'short-precise'): 'journal-out\n',
('tar', 'czvf', output_tarfile, date_logdir): ''
}
@@ -105,9 +116,12 @@ class TestCollectLogs(FilesystemMockingTestCase):
subp(cmd) # Pass through tar cmd so we can check output
return expected_subp[cmd_tuple], ''
+ fake_stderr = mock.MagicMock()
+
wrap_and_call(
'cloudinit.cmd.devel.logs',
{'subp': {'side_effect': fake_subp},
+ 'sys.stderr': {'new': fake_stderr},
'CLOUDINIT_LOGS': {'new': [log1, log2]},
'CLOUDINIT_RUN_DIR': {'new': self.run_dir},
'USER_DATA_FILE': {'new': userdata}},
@@ -118,3 +132,4 @@ class TestCollectLogs(FilesystemMockingTestCase):
self.assertEqual(
'user-data',
load_file(os.path.join(out_logdir, 'user-data.txt')))
+ fake_stderr.write.assert_any_call('Wrote %s\n' % output_tarfile)
diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py
index 3f2dbb93..d6ba90f4 100644
--- a/cloudinit/cmd/main.py
+++ b/cloudinit/cmd/main.py
@@ -187,7 +187,7 @@ def attempt_cmdline_url(path, network=True, cmdline=None):
data = None
header = b'#cloud-config'
try:
- resp = util.read_file_or_url(**kwargs)
+ resp = url_helper.read_file_or_url(**kwargs)
if resp.ok():
data = resp.contents
if not resp.contents.startswith(header):
diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py
index dbe421c0..e2c54ae8 100644
--- a/cloudinit/cmd/tests/test_main.py
+++ b/cloudinit/cmd/tests/test_main.py
@@ -56,7 +56,7 @@ class TestMain(FilesystemMockingTestCase):
cmdargs = myargs(
debug=False, files=None, force=False, local=False, reporter=None,
subcommand='init')
- (item1, item2) = wrap_and_call(
+ (_item1, item2) = wrap_and_call(
'cloudinit.cmd.main',
{'util.close_stdin': True,
'netinfo.debug_info': 'my net debug info',
@@ -85,7 +85,7 @@ class TestMain(FilesystemMockingTestCase):
cmdargs = myargs(
debug=False, files=None, force=False, local=False, reporter=None,
subcommand='init')
- (item1, item2) = wrap_and_call(
+ (_item1, item2) = wrap_and_call(
'cloudinit.cmd.main',
{'util.close_stdin': True,
'netinfo.debug_info': 'my net debug info',
@@ -133,7 +133,7 @@ class TestMain(FilesystemMockingTestCase):
self.assertEqual(main.LOG, log)
self.assertIsNone(args)
- (item1, item2) = wrap_and_call(
+ (_item1, item2) = wrap_and_call(
'cloudinit.cmd.main',
{'util.close_stdin': True,
'netinfo.debug_info': 'my net debug info',
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index 5b9cbca0..e18944ec 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -121,7 +121,7 @@ and https protocols respectively. The ``proxy`` key also exists as an alias for
All source entries in ``apt-sources`` that match regex in
``add_apt_repo_match`` will be added to the system using
``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults
-to ``^[\w-]+:\w``
+to ``^[\\w-]+:\\w``
**Add source list entries:**
@@ -378,7 +378,7 @@ def apply_debconf_selections(cfg, target=None):
# get a complete list of packages listed in input
pkgs_cfgd = set()
- for key, content in selsets.items():
+ for _key, content in selsets.items():
for line in content.splitlines():
if line.startswith("#"):
continue
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
index 233da1ef..db64f0a6 100644
--- a/cloudinit/config/cc_bootcmd.py
+++ b/cloudinit/config/cc_bootcmd.py
@@ -63,7 +63,6 @@ schema = {
'additionalProperties': False,
'minItems': 1,
'required': [],
- 'uniqueItems': True
}
}
}
diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py
index c56319b5..885b3138 100644
--- a/cloudinit/config/cc_disable_ec2_metadata.py
+++ b/cloudinit/config/cc_disable_ec2_metadata.py
@@ -32,13 +32,23 @@ from cloudinit.settings import PER_ALWAYS
frequency = PER_ALWAYS
-REJECT_CMD = ['route', 'add', '-host', '169.254.169.254', 'reject']
+REJECT_CMD_IF = ['route', 'add', '-host', '169.254.169.254', 'reject']
+REJECT_CMD_IP = ['ip', 'route', 'add', 'prohibit', '169.254.169.254']
def handle(name, cfg, _cloud, log, _args):
disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False)
if disabled:
- util.subp(REJECT_CMD, capture=False)
+ reject_cmd = None
+ if util.which('ip'):
+ reject_cmd = REJECT_CMD_IP
+ elif util.which('ifconfig'):
+ reject_cmd = REJECT_CMD_IF
+ else:
+ log.error(('Neither "route" nor "ip" command found, unable to '
+ 'manipulate routing table'))
+ return
+ util.subp(reject_cmd, capture=False)
else:
log.debug(("Skipping module named %s,"
" disabling the ec2 route not enabled"), name)
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index c3e8c484..943089e0 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -680,13 +680,13 @@ def read_parttbl(device):
reliable way to probe the partition table.
"""
blkdev_cmd = [BLKDEV_CMD, '--rereadpt', device]
- udevadm_settle()
+ util.udevadm_settle()
try:
util.subp(blkdev_cmd)
except Exception as e:
util.logexc(LOG, "Failed reading the partition table %s" % e)
- udevadm_settle()
+ util.udevadm_settle()
def exec_mkpart_mbr(device, layout):
@@ -737,14 +737,10 @@ def exec_mkpart(table_type, device, layout):
return get_dyn_func("exec_mkpart_%s", table_type, device, layout)
-def udevadm_settle():
- util.subp(['udevadm', 'settle'])
-
-
def assert_and_settle_device(device):
"""Assert that device exists and settle so it is fully recognized."""
if not os.path.exists(device):
- udevadm_settle()
+ util.udevadm_settle()
if not os.path.exists(device):
raise RuntimeError("Device %s did not exist and was not created "
"with a udevamd settle." % device)
@@ -752,7 +748,7 @@ def assert_and_settle_device(device):
# Whether or not the device existed above, it is possible that udev
# events that would populate udev database (for reading by lsdname) have
# not yet finished. So settle again.
- udevadm_settle()
+ util.udevadm_settle()
def mkpart(device, definition):
diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
index 69dc2d5e..eb9fbe66 100644
--- a/cloudinit/config/cc_emit_upstart.py
+++ b/cloudinit/config/cc_emit_upstart.py
@@ -43,7 +43,7 @@ def is_upstart_system():
del myenv['UPSTART_SESSION']
check_cmd = ['initctl', 'version']
try:
- (out, err) = util.subp(check_cmd, env=myenv)
+ (out, _err) = util.subp(check_cmd, env=myenv)
return 'upstart' in out
except util.ProcessExecutionError as e:
LOG.debug("'%s' returned '%s', not using upstart",
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index 09374d2e..ac72ac4a 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -47,11 +47,16 @@ lxd-bridge will be configured accordingly.
domain: <domain>
"""
+from cloudinit import log as logging
from cloudinit import util
import os
distros = ['ubuntu']
+LOG = logging.getLogger(__name__)
+
+_DEFAULT_NETWORK_NAME = "lxdbr0"
+
def handle(name, cfg, cloud, log, args):
# Get config
@@ -109,6 +114,7 @@ def handle(name, cfg, cloud, log, args):
# Set up lxd-bridge if bridge config is given
dconf_comm = "debconf-communicate"
if bridge_cfg:
+ net_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME)
if os.path.exists("/etc/default/lxd-bridge") \
and util.which(dconf_comm):
# Bridge configured through packaging
@@ -135,15 +141,18 @@ def handle(name, cfg, cloud, log, args):
else:
# Built-in LXD bridge support
cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg)
+ maybe_cleanup_default(
+ net_name=net_name, did_init=bool(init_cfg),
+ create=bool(cmd_create), attach=bool(cmd_attach))
if cmd_create:
log.debug("Creating lxd bridge: %s" %
" ".join(cmd_create))
- util.subp(cmd_create)
+ _lxc(cmd_create)
if cmd_attach:
log.debug("Setting up default lxd bridge: %s" %
" ".join(cmd_create))
- util.subp(cmd_attach)
+ _lxc(cmd_attach)
elif bridge_cfg:
raise RuntimeError(
@@ -204,10 +213,10 @@ def bridge_to_cmd(bridge_cfg):
if bridge_cfg.get("mode") == "none":
return None, None
- bridge_name = bridge_cfg.get("name", "lxdbr0")
+ bridge_name = bridge_cfg.get("name", _DEFAULT_NETWORK_NAME)
cmd_create = []
- cmd_attach = ["lxc", "network", "attach-profile", bridge_name,
- "default", "eth0", "--force-local"]
+ cmd_attach = ["network", "attach-profile", bridge_name,
+ "default", "eth0"]
if bridge_cfg.get("mode") == "existing":
return None, cmd_attach
@@ -215,7 +224,7 @@ def bridge_to_cmd(bridge_cfg):
if bridge_cfg.get("mode") != "new":
raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode"))
- cmd_create = ["lxc", "network", "create", bridge_name]
+ cmd_create = ["network", "create", bridge_name]
if bridge_cfg.get("ipv4_address") and bridge_cfg.get("ipv4_netmask"):
cmd_create.append("ipv4.address=%s/%s" %
@@ -247,8 +256,47 @@ def bridge_to_cmd(bridge_cfg):
if bridge_cfg.get("domain"):
cmd_create.append("dns.domain=%s" % bridge_cfg.get("domain"))
- cmd_create.append("--force-local")
-
return cmd_create, cmd_attach
+
+def _lxc(cmd):
+ env = {'LC_ALL': 'C'}
+ util.subp(['lxc'] + list(cmd) + ["--force-local"], update_env=env)
+
+
+def maybe_cleanup_default(net_name, did_init, create, attach,
+ profile="default", nic_name="eth0"):
+ """Newer versions of lxc (3.0.1+) create a lxdbr0 network when
+ 'lxd init --auto' is run. Older versions did not.
+
+ By removing ay that lxd-init created, we simply leave the add/attach
+ code in-tact.
+
+ https://github.com/lxc/lxd/issues/4649"""
+ if net_name != _DEFAULT_NETWORK_NAME or not did_init:
+ return
+
+ fail_assume_enoent = " failed. Assuming it did not exist."
+ succeeded = " succeeded."
+ if create:
+ msg = "Deletion of lxd network '%s'" % net_name
+ try:
+ _lxc(["network", "delete", net_name])
+ LOG.debug(msg + succeeded)
+ except util.ProcessExecutionError as e:
+ if e.exit_code != 1:
+ raise e
+ LOG.debug(msg + fail_assume_enoent)
+
+ if attach:
+ msg = "Removal of device '%s' from profile '%s'" % (nic_name, profile)
+ try:
+ _lxc(["profile", "device", "remove", profile, nic_name])
+ LOG.debug(msg + succeeded)
+ except util.ProcessExecutionError as e:
+ if e.exit_code != 1:
+ raise e
+ LOG.debug(msg + fail_assume_enoent)
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index f14a4fc5..339baba9 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -76,6 +76,7 @@ DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$"
DEVICE_NAME_RE = re.compile(DEVICE_NAME_FILTER)
WS = re.compile("[%s]+" % (whitespace))
FSTAB_PATH = "/etc/fstab"
+MNT_COMMENT = "comment=cloudconfig"
LOG = logging.getLogger(__name__)
@@ -232,8 +233,8 @@ def setup_swapfile(fname, size=None, maxsize=None):
if str(size).lower() == "auto":
try:
memsize = util.read_meminfo()['total']
- except IOError as e:
- LOG.debug("Not creating swap. failed to read meminfo")
+ except IOError:
+ LOG.debug("Not creating swap: failed to read meminfo")
return
util.ensure_dir(tdir)
@@ -280,17 +281,17 @@ def handle_swapcfg(swapcfg):
if os.path.exists(fname):
if not os.path.exists("/proc/swaps"):
- LOG.debug("swap file %s existed. no /proc/swaps. Being safe.",
- fname)
+ LOG.debug("swap file %s exists, but no /proc/swaps exists, "
+ "being safe", fname)
return fname
try:
for line in util.load_file("/proc/swaps").splitlines():
if line.startswith(fname + " "):
- LOG.debug("swap file %s already in use.", fname)
+ LOG.debug("swap file %s already in use", fname)
return fname
- LOG.debug("swap file %s existed, but not in /proc/swaps", fname)
+ LOG.debug("swap file %s exists, but not in /proc/swaps", fname)
except Exception:
- LOG.warning("swap file %s existed. Error reading /proc/swaps",
+ LOG.warning("swap file %s exists. Error reading /proc/swaps",
fname)
return fname
@@ -327,6 +328,22 @@ def handle(_name, cfg, cloud, log, _args):
LOG.debug("mounts configuration is %s", cfgmnt)
+ fstab_lines = []
+ fstab_devs = {}
+ fstab_removed = []
+
+ for line in util.load_file(FSTAB_PATH).splitlines():
+ if MNT_COMMENT in line:
+ fstab_removed.append(line)
+ continue
+
+ try:
+ toks = WS.split(line)
+ except Exception:
+ pass
+ fstab_devs[toks[0]] = line
+ fstab_lines.append(line)
+
for i in range(len(cfgmnt)):
# skip something that wasn't a list
if not isinstance(cfgmnt[i], list):
@@ -336,12 +353,17 @@ def handle(_name, cfg, cloud, log, _args):
start = str(cfgmnt[i][0])
sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
+ if sanitized != start:
+ log.debug("changed %s => %s" % (start, sanitized))
+
if sanitized is None:
- log.debug("Ignorming nonexistant named mount %s", start)
+ log.debug("Ignoring nonexistent named mount %s", start)
+ continue
+ elif sanitized in fstab_devs:
+ log.info("Device %s already defined in fstab: %s",
+ sanitized, fstab_devs[sanitized])
continue
- if sanitized != start:
- log.debug("changed %s => %s" % (start, sanitized))
cfgmnt[i][0] = sanitized
# in case the user did not quote a field (likely fs-freq, fs_passno)
@@ -373,11 +395,17 @@ def handle(_name, cfg, cloud, log, _args):
for defmnt in defmnts:
start = defmnt[0]
sanitized = sanitize_devname(start, cloud.device_name_to_device, log)
- if sanitized is None:
- log.debug("Ignoring nonexistant default named mount %s", start)
- continue
if sanitized != start:
log.debug("changed default device %s => %s" % (start, sanitized))
+
+ if sanitized is None:
+ log.debug("Ignoring nonexistent default named mount %s", start)
+ continue
+ elif sanitized in fstab_devs:
+ log.debug("Device %s already defined in fstab: %s",
+ sanitized, fstab_devs[sanitized])
+ continue
+
defmnt[0] = sanitized
cfgmnt_has = False
@@ -397,7 +425,7 @@ def handle(_name, cfg, cloud, log, _args):
actlist = []
for x in cfgmnt:
if x[1] is None:
- log.debug("Skipping non-existent device named %s", x[0])
+ log.debug("Skipping nonexistent device named %s", x[0])
else:
actlist.append(x)
@@ -406,34 +434,21 @@ def handle(_name, cfg, cloud, log, _args):
actlist.append([swapret, "none", "swap", "sw", "0", "0"])
if len(actlist) == 0:
- log.debug("No modifications to fstab needed.")
+ log.debug("No modifications to fstab needed")
return
- comment = "comment=cloudconfig"
cc_lines = []
needswap = False
dirs = []
for line in actlist:
# write 'comment' in the fs_mntops, entry, claiming this
- line[3] = "%s,%s" % (line[3], comment)
+ line[3] = "%s,%s" % (line[3], MNT_COMMENT)
if line[2] == "swap":
needswap = True
if line[1].startswith("/"):
dirs.append(line[1])
cc_lines.append('\t'.join(line))
- fstab_lines = []
- removed = []
- for line in util.load_file(FSTAB_PATH).splitlines():
- try:
- toks = WS.split(line)
- if toks[3].find(comment) != -1:
- removed.append(line)
- continue
- except Exception:
- pass
- fstab_lines.append(line)
-
for d in dirs:
try:
util.ensure_dir(d)
@@ -441,7 +456,7 @@ def handle(_name, cfg, cloud, log, _args):
util.logexc(log, "Failed to make '%s' config-mount", d)
sadds = [WS.sub(" ", n) for n in cc_lines]
- sdrops = [WS.sub(" ", n) for n in removed]
+ sdrops = [WS.sub(" ", n) for n in fstab_removed]
sops = (["- " + drop for drop in sdrops if drop not in sadds] +
["+ " + add for add in sadds if add not in sdrops])
diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py
index cbd0237d..9e074bda 100644
--- a/cloudinit/config/cc_ntp.py
+++ b/cloudinit/config/cc_ntp.py
@@ -10,20 +10,95 @@ from cloudinit.config.schema import (
get_schema_doc, validate_cloudconfig_schema)
from cloudinit import log as logging
from cloudinit.settings import PER_INSTANCE
+from cloudinit import temp_utils
from cloudinit import templater
from cloudinit import type_utils
from cloudinit import util
+import copy
import os
+import six
from textwrap import dedent
LOG = logging.getLogger(__name__)
frequency = PER_INSTANCE
NTP_CONF = '/etc/ntp.conf'
-TIMESYNCD_CONF = '/etc/systemd/timesyncd.conf.d/cloud-init.conf'
NR_POOL_SERVERS = 4
-distros = ['centos', 'debian', 'fedora', 'opensuse', 'sles', 'ubuntu']
+distros = ['centos', 'debian', 'fedora', 'opensuse', 'rhel', 'sles', 'ubuntu']
+
+NTP_CLIENT_CONFIG = {
+ 'chrony': {
+ 'check_exe': 'chronyd',
+ 'confpath': '/etc/chrony.conf',
+ 'packages': ['chrony'],
+ 'service_name': 'chrony',
+ 'template_name': 'chrony.conf.{distro}',
+ 'template': None,
+ },
+ 'ntp': {
+ 'check_exe': 'ntpd',
+ 'confpath': NTP_CONF,
+ 'packages': ['ntp'],
+ 'service_name': 'ntp',
+ 'template_name': 'ntp.conf.{distro}',
+ 'template': None,
+ },
+ 'ntpdate': {
+ 'check_exe': 'ntpdate',
+ 'confpath': NTP_CONF,
+ 'packages': ['ntpdate'],
+ 'service_name': 'ntpdate',
+ 'template_name': 'ntp.conf.{distro}',
+ 'template': None,
+ },
+ 'systemd-timesyncd': {
+ 'check_exe': '/lib/systemd/systemd-timesyncd',
+ 'confpath': '/etc/systemd/timesyncd.conf.d/cloud-init.conf',
+ 'packages': [],
+ 'service_name': 'systemd-timesyncd',
+ 'template_name': 'timesyncd.conf',
+ 'template': None,
+ },
+}
+
+# This is Distro-specific configuration overrides of the base config
+DISTRO_CLIENT_CONFIG = {
+ 'debian': {
+ 'chrony': {
+ 'confpath': '/etc/chrony/chrony.conf',
+ },
+ },
+ 'opensuse': {
+ 'chrony': {
+ 'service_name': 'chronyd',
+ },
+ 'ntp': {
+ 'confpath': '/etc/ntp.conf',
+ 'service_name': 'ntpd',
+ },
+ 'systemd-timesyncd': {
+ 'check_exe': '/usr/lib/systemd/systemd-timesyncd',
+ },
+ },
+ 'sles': {
+ 'chrony': {
+ 'service_name': 'chronyd',
+ },
+ 'ntp': {
+ 'confpath': '/etc/ntp.conf',
+ 'service_name': 'ntpd',
+ },
+ 'systemd-timesyncd': {
+ 'check_exe': '/usr/lib/systemd/systemd-timesyncd',
+ },
+ },
+ 'ubuntu': {
+ 'chrony': {
+ 'confpath': '/etc/chrony/chrony.conf',
+ },
+ },
+}
# The schema definition for each cloud-config module is a strict contract for
@@ -48,7 +123,34 @@ schema = {
'distros': distros,
'examples': [
dedent("""\
+ # Override ntp with chrony configuration on Ubuntu
+ ntp:
+ enabled: true
+ ntp_client: chrony # Uses cloud-init default chrony configuration
+ """),
+ dedent("""\
+ # Provide a custom ntp client configuration
ntp:
+ enabled: true
+ ntp_client: myntpclient
+ config:
+ confpath: /etc/myntpclient/myntpclient.conf
+ check_exe: myntpclientd
+ packages:
+ - myntpclient
+ service_name: myntpclient
+ template: |
+ ## template:jinja
+ # My NTP Client config
+ {% if pools -%}# pools{% endif %}
+ {% for pool in pools -%}
+ pool {{pool}} iburst
+ {% endfor %}
+ {%- if servers %}# servers
+ {% endif %}
+ {% for server in servers -%}
+ server {{server}} iburst
+ {% endfor %}
pools: [0.int.pool.ntp.org, 1.int.pool.ntp.org, ntp.myorg.org]
servers:
- ntp.server.local
@@ -83,79 +185,159 @@ schema = {
List of ntp servers. If both pools and servers are
empty, 4 default pool servers will be provided with
the format ``{0-3}.{distro}.pool.ntp.org``.""")
- }
+ },
+ 'ntp_client': {
+ 'type': 'string',
+ 'default': 'auto',
+ 'description': dedent("""\
+ Name of an NTP client to use to configure system NTP.
+ When unprovided or 'auto' the default client preferred
+ by the distribution will be used. The following
+ built-in client names can be used to override existing
+ configuration defaults: chrony, ntp, ntpdate,
+ systemd-timesyncd."""),
+ },
+ 'enabled': {
+ 'type': 'boolean',
+ 'default': True,
+ 'description': dedent("""\
+ Attempt to enable ntp clients if set to True. If set
+ to False, ntp client will not be configured or
+ installed"""),
+ },
+ 'config': {
+ 'description': dedent("""\
+ Configuration settings or overrides for the
+ ``ntp_client`` specified."""),
+ 'type': ['object'],
+ 'properties': {
+ 'confpath': {
+ 'type': 'string',
+ 'description': dedent("""\
+ The path to where the ``ntp_client``
+ configuration is written."""),
+ },
+ 'check_exe': {
+ 'type': 'string',
+ 'description': dedent("""\
+ The executable name for the ``ntp_client``.
+ For example, ntp service ``check_exe`` is
+ 'ntpd' because it runs the ntpd binary."""),
+ },
+ 'packages': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'string',
+ },
+ 'uniqueItems': True,
+ 'description': dedent("""\
+ List of packages needed to be installed for the
+ selected ``ntp_client``."""),
+ },
+ 'service_name': {
+ 'type': 'string',
+ 'description': dedent("""\
+ The systemd or sysvinit service name used to
+ start and stop the ``ntp_client``
+ service."""),
+ },
+ 'template': {
+ 'type': 'string',
+ 'description': dedent("""\
+ Inline template allowing users to define their
+ own ``ntp_client`` configuration template.
+ The value must start with '## template:jinja'
+ to enable use of templating support.
+ """),
+ },
+ },
+ # Don't use REQUIRED_NTP_CONFIG_KEYS to allow for override
+ # of builtin client values.
+ 'required': [],
+ 'minProperties': 1, # If we have config, define something
+ 'additionalProperties': False
+ },
},
'required': [],
'additionalProperties': False
}
}
}
-
-__doc__ = get_schema_doc(schema) # Supplement python help()
+REQUIRED_NTP_CONFIG_KEYS = frozenset([
+ 'check_exe', 'confpath', 'packages', 'service_name'])
-def handle(name, cfg, cloud, log, _args):
- """Enable and configure ntp."""
- if 'ntp' not in cfg:
- LOG.debug(
- "Skipping module named %s, not present or disabled by cfg", name)
- return
- ntp_cfg = cfg['ntp']
- if ntp_cfg is None:
- ntp_cfg = {} # Allow empty config which will install the package
+__doc__ = get_schema_doc(schema) # Supplement python help()
- # TODO drop this when validate_cloudconfig_schema is strict=True
- if not isinstance(ntp_cfg, (dict)):
- raise RuntimeError(
- "'ntp' key existed in config, but not a dictionary type,"
- " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg)))
- validate_cloudconfig_schema(cfg, schema)
- if ntp_installable():
- service_name = 'ntp'
- confpath = NTP_CONF
- template_name = None
- packages = ['ntp']
- check_exe = 'ntpd'
- else:
- service_name = 'systemd-timesyncd'
- confpath = TIMESYNCD_CONF
- template_name = 'timesyncd.conf'
- packages = []
- check_exe = '/lib/systemd/systemd-timesyncd'
-
- rename_ntp_conf()
- # ensure when ntp is installed it has a configuration file
- # to use instead of starting up with packaged defaults
- write_ntp_config_template(ntp_cfg, cloud, confpath, template=template_name)
- install_ntp(cloud.distro.install_packages, packages=packages,
- check_exe=check_exe)
+def distro_ntp_client_configs(distro):
+ """Construct a distro-specific ntp client config dictionary by merging
+ distro specific changes into base config.
- try:
- reload_ntp(service_name, systemd=cloud.distro.uses_systemd())
- except util.ProcessExecutionError as e:
- LOG.exception("Failed to reload/start ntp service: %s", e)
- raise
+ @param distro: String providing the distro class name.
+ @returns: Dict of distro configurations for ntp clients.
+ """
+ dcfg = DISTRO_CLIENT_CONFIG
+ cfg = copy.copy(NTP_CLIENT_CONFIG)
+ if distro in dcfg:
+ cfg = util.mergemanydict([cfg, dcfg[distro]], reverse=True)
+ return cfg
-def ntp_installable():
- """Check if we can install ntp package
+def select_ntp_client(ntp_client, distro):
+ """Determine which ntp client is to be used, consulting the distro
+ for its preference.
- Ubuntu-Core systems do not have an ntp package available, so
- we always return False. Other systems require package managers to install
- the ntp package If we fail to find one of the package managers, then we
- cannot install ntp.
+ @param ntp_client: String name of the ntp client to use.
+ @param distro: Distro class instance.
+ @returns: Dict of the selected ntp client or {} if none selected.
"""
- if util.system_is_snappy():
- return False
- if any(map(util.which, ['apt-get', 'dnf', 'yum', 'zypper'])):
- return True
+ # construct distro-specific ntp_client_config dict
+ distro_cfg = distro_ntp_client_configs(distro.name)
+
+ # user specified client, return its config
+ if ntp_client and ntp_client != 'auto':
+ LOG.debug('Selected NTP client "%s" via user-data configuration',
+ ntp_client)
+ return distro_cfg.get(ntp_client, {})
+
+ # default to auto if unset in distro
+ distro_ntp_client = distro.get_option('ntp_client', 'auto')
+
+ clientcfg = {}
+ if distro_ntp_client == "auto":
+ for client in distro.preferred_ntp_clients:
+ cfg = distro_cfg.get(client)
+ if util.which(cfg.get('check_exe')):
+ LOG.debug('Selected NTP client "%s", already installed',
+ client)
+ clientcfg = cfg
+ break
+
+ if not clientcfg:
+ client = distro.preferred_ntp_clients[0]
+ LOG.debug(
+ 'Selected distro preferred NTP client "%s", not yet installed',
+ client)
+ clientcfg = distro_cfg.get(client)
+ else:
+ LOG.debug('Selected NTP client "%s" via distro system config',
+ distro_ntp_client)
+ clientcfg = distro_cfg.get(distro_ntp_client, {})
+
+ return clientcfg
- return False
+def install_ntp_client(install_func, packages=None, check_exe="ntpd"):
+ """Install ntp client package if not already installed.
-def install_ntp(install_func, packages=None, check_exe="ntpd"):
+ @param install_func: function. This parameter is invoked with the contents
+ of the packages parameter.
+ @param packages: list. This parameter defaults to ['ntp'].
+ @param check_exe: string. The name of a binary that indicates the package
+ the specified package is already installed.
+ """
if util.which(check_exe):
return
if packages is None:
@@ -164,15 +346,23 @@ def install_ntp(install_func, packages=None, check_exe="ntpd"):
install_func(packages)
-def rename_ntp_conf(config=None):
- """Rename any existing ntp.conf file"""
- if config is None: # For testing
- config = NTP_CONF
- if os.path.exists(config):
- util.rename(config, config + ".dist")
+def rename_ntp_conf(confpath=None):
+ """Rename any existing ntp client config file
+
+ @param confpath: string. Specify a path to an existing ntp client
+ configuration file.
+ """
+ if os.path.exists(confpath):
+ util.rename(confpath, confpath + ".dist")
def generate_server_names(distro):
+ """Generate a list of server names to populate an ntp client configuration
+ file.
+
+ @param distro: string. Specify the distro name
+ @returns: list: A list of strings representing ntp servers for this distro.
+ """
names = []
pool_distro = distro
# For legal reasons x.pool.sles.ntp.org does not exist,
@@ -185,34 +375,60 @@ def generate_server_names(distro):
return names
-def write_ntp_config_template(cfg, cloud, path, template=None):
- servers = cfg.get('servers', [])
- pools = cfg.get('pools', [])
+def write_ntp_config_template(distro_name, servers=None, pools=None,
+ path=None, template_fn=None, template=None):
+ """Render a ntp client configuration for the specified client.
+
+ @param distro_name: string. The distro class name.
+ @param servers: A list of strings specifying ntp servers. Defaults to empty
+ list.
+ @param pools: A list of strings specifying ntp pools. Defaults to empty
+ list.
+ @param path: A string to specify where to write the rendered template.
+ @param template_fn: A string to specify the template source file.
+ @param template: A string specifying the contents of the template. This
+ content will be written to a temporary file before being used to render
+ the configuration file.
+
+ @raises: ValueError when path is None.
+ @raises: ValueError when template_fn is None and template is None.
+ """
+ if not servers:
+ servers = []
+ if not pools:
+ pools = []
if len(servers) == 0 and len(pools) == 0:
- pools = generate_server_names(cloud.distro.name)
+ pools = generate_server_names(distro_name)
LOG.debug(
'Adding distro default ntp pool servers: %s', ','.join(pools))
- params = {
- 'servers': servers,
- 'pools': pools,
- }
+ if not path:
+ raise ValueError('Invalid value for path parameter')
- if template is None:
- template = 'ntp.conf.%s' % cloud.distro.name
+ if not template_fn and not template:
+ raise ValueError('Not template_fn or template provided')
- template_fn = cloud.get_template_filename(template)
- if not template_fn:
- template_fn = cloud.get_template_filename('ntp.conf')
- if not template_fn:
- raise RuntimeError(
- 'No template found, not rendering {path}'.format(path=path))
+ params = {'servers': servers, 'pools': pools}
+ if template:
+ tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl")
+ template_fn = tfile[1] # filepath is second item in tuple
+ util.write_file(template_fn, content=template)
templater.render_to_file(template_fn, path, params)
+ # clean up temporary template
+ if template:
+ util.del_file(template_fn)
def reload_ntp(service, systemd=False):
+ """Restart or reload an ntp system service.
+
+ @param service: A string specifying the name of the service to be affected.
+ @param systemd: A boolean indicating if the distro uses systemd, defaults
+ to False.
+ @returns: A tuple of stdout, stderr results from executing the action.
+ """
if systemd:
cmd = ['systemctl', 'reload-or-restart', service]
else:
@@ -220,4 +436,117 @@ def reload_ntp(service, systemd=False):
util.subp(cmd, capture=True)
+def supplemental_schema_validation(ntp_config):
+ """Validate user-provided ntp:config option values.
+
+ This function supplements flexible jsonschema validation with specific
+ value checks to aid in triage of invalid user-provided configuration.
+
+ @param ntp_config: Dictionary of configuration value under 'ntp'.
+
+ @raises: ValueError describing invalid values provided.
+ """
+ errors = []
+ missing = REQUIRED_NTP_CONFIG_KEYS.difference(set(ntp_config.keys()))
+ if missing:
+ keys = ', '.join(sorted(missing))
+ errors.append(
+ 'Missing required ntp:config keys: {keys}'.format(keys=keys))
+ elif not any([ntp_config.get('template'),
+ ntp_config.get('template_name')]):
+ errors.append(
+ 'Either ntp:config:template or ntp:config:template_name values'
+ ' are required')
+ for key, value in sorted(ntp_config.items()):
+ keypath = 'ntp:config:' + key
+ if key == 'confpath':
+ if not all([value, isinstance(value, six.string_types)]):
+ errors.append(
+ 'Expected a config file path {keypath}.'
+ ' Found ({value})'.format(keypath=keypath, value=value))
+ elif key == 'packages':
+ if not isinstance(value, list):
+ errors.append(
+ 'Expected a list of required package names for {keypath}.'
+ ' Found ({value})'.format(keypath=keypath, value=value))
+ elif key in ('template', 'template_name'):
+ if value is None: # Either template or template_name can be none
+ continue
+ if not isinstance(value, six.string_types):
+ errors.append(
+ 'Expected a string type for {keypath}.'
+ ' Found ({value})'.format(keypath=keypath, value=value))
+ elif not isinstance(value, six.string_types):
+ errors.append(
+ 'Expected a string type for {keypath}.'
+ ' Found ({value})'.format(keypath=keypath, value=value))
+
+ if errors:
+ raise ValueError(r'Invalid ntp configuration:\n{errors}'.format(
+ errors='\n'.join(errors)))
+
+
+def handle(name, cfg, cloud, log, _args):
+ """Enable and configure ntp."""
+ if 'ntp' not in cfg:
+ LOG.debug(
+ "Skipping module named %s, not present or disabled by cfg", name)
+ return
+ ntp_cfg = cfg['ntp']
+ if ntp_cfg is None:
+ ntp_cfg = {} # Allow empty config which will install the package
+
+ # TODO drop this when validate_cloudconfig_schema is strict=True
+ if not isinstance(ntp_cfg, (dict)):
+ raise RuntimeError(
+ "'ntp' key existed in config, but not a dictionary type,"
+ " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg)))
+
+ validate_cloudconfig_schema(cfg, schema)
+
+ # Allow users to explicitly enable/disable
+ enabled = ntp_cfg.get('enabled', True)
+ if util.is_false(enabled):
+ LOG.debug("Skipping module named %s, disabled by cfg", name)
+ return
+
+ # Select which client is going to be used and get the configuration
+ ntp_client_config = select_ntp_client(ntp_cfg.get('ntp_client'),
+ cloud.distro)
+
+ # Allow user ntp config to override distro configurations
+ ntp_client_config = util.mergemanydict(
+ [ntp_client_config, ntp_cfg.get('config', {})], reverse=True)
+
+ supplemental_schema_validation(ntp_client_config)
+ rename_ntp_conf(confpath=ntp_client_config.get('confpath'))
+
+ template_fn = None
+ if not ntp_client_config.get('template'):
+ template_name = (
+ ntp_client_config.get('template_name').replace('{distro}',
+ cloud.distro.name))
+ template_fn = cloud.get_template_filename(template_name)
+ if not template_fn:
+ msg = ('No template found, not rendering %s' %
+ ntp_client_config.get('template_name'))
+ raise RuntimeError(msg)
+
+ write_ntp_config_template(cloud.distro.name,
+ servers=ntp_cfg.get('servers', []),
+ pools=ntp_cfg.get('pools', []),
+ path=ntp_client_config.get('confpath'),
+ template_fn=template_fn,
+ template=ntp_client_config.get('template'))
+
+ install_ntp_client(cloud.distro.install_packages,
+ packages=ntp_client_config['packages'],
+ check_exe=ntp_client_config['check_exe'])
+ try:
+ reload_ntp(ntp_client_config['service_name'],
+ systemd=cloud.distro.uses_systemd())
+ except util.ProcessExecutionError as e:
+ LOG.exception("Failed to reload/start ntp service: %s", e)
+ raise
+
# vi: ts=4 expandtab
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index 878069b7..3be0d1c1 100644
--- a/cloudinit/config/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -41,6 +41,7 @@ keys to post. Available keys are:
"""
from cloudinit import templater
+from cloudinit import url_helper
from cloudinit import util
from cloudinit.settings import PER_INSTANCE
@@ -136,9 +137,9 @@ def handle(name, cfg, cloud, log, args):
}
url = templater.render_string(url, url_params)
try:
- util.read_file_or_url(url, data=real_submit_keys,
- retries=tries, sec_between=3,
- ssl_details=util.fetch_ssl_details(cloud.paths))
+ url_helper.read_file_or_url(
+ url, data=real_submit_keys, retries=tries, sec_between=3,
+ ssl_details=util.fetch_ssl_details(cloud.paths))
except Exception:
util.logexc(log, "Failed to post phone home data to %s in %s tries",
url, tries)
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index 4da3a588..50b37470 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -74,7 +74,7 @@ def givecmdline(pid):
if util.is_FreeBSD():
(output, _err) = util.subp(['procstat', '-c', str(pid)])
line = output.splitlines()[1]
- m = re.search('\d+ (\w|\.|-)+\s+(/\w.+)', line)
+ m = re.search(r'\d+ (\w|\.|-)+\s+(/\w.+)', line)
return m.group(2)
else:
return util.load_file("/proc/%s/cmdline" % pid)
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index 013e69b5..2edddd0c 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -81,7 +81,7 @@ def _resize_xfs(mount_point, devpth):
def _resize_ufs(mount_point, devpth):
- return ('growfs', devpth)
+ return ('growfs', '-y', devpth)
def _resize_zfs(mount_point, devpth):
@@ -89,13 +89,11 @@ def _resize_zfs(mount_point, devpth):
def _get_dumpfs_output(mount_point):
- dumpfs_res, err = util.subp(['dumpfs', '-m', mount_point])
- return dumpfs_res
+ return util.subp(['dumpfs', '-m', mount_point])[0]
def _get_gpart_output(part):
- gpart_res, err = util.subp(['gpart', 'show', part])
- return gpart_res
+ return util.subp(['gpart', 'show', part])[0]
def _can_skip_resize_ufs(mount_point, devpth):
@@ -113,7 +111,7 @@ def _can_skip_resize_ufs(mount_point, devpth):
if not line.startswith('#'):
newfs_cmd = shlex.split(line)
opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:'
- optlist, args = getopt.getopt(newfs_cmd[1:], opt_value)
+ optlist, _args = getopt.getopt(newfs_cmd[1:], opt_value)
for o, a in optlist:
if o == "-s":
cur_fs_sz = int(a)
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index 530808ce..1c679430 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -209,8 +209,7 @@ class SubscriptionManager(object):
cmd.append("--serverurl={0}".format(self.server_hostname))
try:
- return_out, return_err = self._sub_man_cli(cmd,
- logstring_val=True)
+ return_out = self._sub_man_cli(cmd, logstring_val=True)[0]
except util.ProcessExecutionError as e:
if e.stdout == "":
self.log_warn("Registration failed due "
@@ -233,8 +232,7 @@ class SubscriptionManager(object):
# Attempting to register the system only
try:
- return_out, return_err = self._sub_man_cli(cmd,
- logstring_val=True)
+ return_out = self._sub_man_cli(cmd, logstring_val=True)[0]
except util.ProcessExecutionError as e:
if e.stdout == "":
self.log_warn("Registration failed due "
@@ -257,7 +255,7 @@ class SubscriptionManager(object):
.format(self.servicelevel)]
try:
- return_out, return_err = self._sub_man_cli(cmd)
+ return_out = self._sub_man_cli(cmd)[0]
except util.ProcessExecutionError as e:
if e.stdout.rstrip() != '':
for line in e.stdout.split("\n"):
@@ -275,7 +273,7 @@ class SubscriptionManager(object):
def _set_auto_attach(self):
cmd = ['attach', '--auto']
try:
- return_out, return_err = self._sub_man_cli(cmd)
+ return_out = self._sub_man_cli(cmd)[0]
except util.ProcessExecutionError as e:
self.log_warn("Auto-attach failed with: {0}".format(e))
return False
@@ -294,12 +292,12 @@ class SubscriptionManager(object):
# Get all available pools
cmd = ['list', '--available', '--pool-only']
- results, errors = self._sub_man_cli(cmd)
+ results = self._sub_man_cli(cmd)[0]
available = (results.rstrip()).split("\n")
# Get all consumed pools
cmd = ['list', '--consumed', '--pool-only']
- results, errors = self._sub_man_cli(cmd)
+ results = self._sub_man_cli(cmd)[0]
consumed = (results.rstrip()).split("\n")
return available, consumed
@@ -311,14 +309,14 @@ class SubscriptionManager(object):
'''
cmd = ['repos', '--list-enabled']
- return_out, return_err = self._sub_man_cli(cmd)
+ return_out = self._sub_man_cli(cmd)[0]
active_repos = []
for repo in return_out.split("\n"):
if "Repo ID:" in repo:
active_repos.append((repo.split(':')[1]).strip())
cmd = ['repos', '--list-disabled']
- return_out, return_err = self._sub_man_cli(cmd)
+ return_out = self._sub_man_cli(cmd)[0]
inactive_repos = []
for repo in return_out.split("\n"):
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index af08788c..27d2366c 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -203,8 +203,8 @@ LOG = logging.getLogger(__name__)
COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*')
HOST_PORT_RE = re.compile(
r'^(?P<proto>[@]{0,2})'
- '(([[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))'
- '([:](?P<port>[0-9]+))?$')
+ r'(([[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))'
+ r'([:](?P<port>[0-9]+))?$')
def reload_syslog(command=DEF_RELOAD, systemd=False):
diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
index 539cbd5d..b6f6c807 100644
--- a/cloudinit/config/cc_runcmd.py
+++ b/cloudinit/config/cc_runcmd.py
@@ -66,7 +66,6 @@ schema = {
'additionalProperties': False,
'minItems': 1,
'required': [],
- 'uniqueItems': True
}
}
}
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index bb24d57f..5ef97376 100755
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -68,16 +68,57 @@ import re
import sys
from cloudinit.distros import ug_util
-from cloudinit import ssh_util
+from cloudinit import log as logging
+from cloudinit.ssh_util import update_ssh_config
from cloudinit import util
from string import ascii_letters, digits
+LOG = logging.getLogger(__name__)
+
# We are removing certain 'painful' letters/numbers
PW_SET = (''.join([x for x in ascii_letters + digits
if x not in 'loLOI01']))
+def handle_ssh_pwauth(pw_auth, service_cmd=None, service_name="ssh"):
+ """Apply sshd PasswordAuthentication changes.
+
+ @param pw_auth: config setting from 'pw_auth'.
+ Best given as True, False, or "unchanged".
+ @param service_cmd: The service command list (['service'])
+ @param service_name: The name of the sshd service for the system.
+
+ @return: None"""
+ cfg_name = "PasswordAuthentication"
+ if service_cmd is None:
+ service_cmd = ["service"]
+
+ if util.is_true(pw_auth):
+ cfg_val = 'yes'
+ elif util.is_false(pw_auth):
+ cfg_val = 'no'
+ else:
+ bmsg = "Leaving ssh config '%s' unchanged." % cfg_name
+ if pw_auth is None or pw_auth.lower() == 'unchanged':
+ LOG.debug("%s ssh_pwauth=%s", bmsg, pw_auth)
+ else:
+ LOG.warning("%s Unrecognized value: ssh_pwauth=%s", bmsg, pw_auth)
+ return
+
+ updated = update_ssh_config({cfg_name: cfg_val})
+ if not updated:
+ LOG.debug("No need to restart ssh service, %s not updated.", cfg_name)
+ return
+
+ if 'systemctl' in service_cmd:
+ cmd = list(service_cmd) + ["restart", service_name]
+ else:
+ cmd = list(service_cmd) + [service_name, "restart"]
+ util.subp(cmd)
+ LOG.debug("Restarted the ssh daemon.")
+
+
def handle(_name, cfg, cloud, log, args):
if len(args) != 0:
# if run from command line, and give args, wipe the chpasswd['list']
@@ -170,65 +211,9 @@ def handle(_name, cfg, cloud, log, args):
if expired_users:
log.debug("Expired passwords for: %s users", expired_users)
- change_pwauth = False
- pw_auth = None
- if 'ssh_pwauth' in cfg:
- if util.is_true(cfg['ssh_pwauth']):
- change_pwauth = True
- pw_auth = 'yes'
- elif util.is_false(cfg['ssh_pwauth']):
- change_pwauth = True
- pw_auth = 'no'
- elif str(cfg['ssh_pwauth']).lower() == 'unchanged':
- log.debug('Leaving auth line unchanged')
- change_pwauth = False
- elif not str(cfg['ssh_pwauth']).strip():
- log.debug('Leaving auth line unchanged')
- change_pwauth = False
- elif not cfg['ssh_pwauth']:
- log.debug('Leaving auth line unchanged')
- change_pwauth = False
- else:
- msg = 'Unrecognized value %s for ssh_pwauth' % cfg['ssh_pwauth']
- util.logexc(log, msg)
-
- if change_pwauth:
- replaced_auth = False
-
- # See: man sshd_config
- old_lines = ssh_util.parse_ssh_config(ssh_util.DEF_SSHD_CFG)
- new_lines = []
- i = 0
- for (i, line) in enumerate(old_lines):
- # Keywords are case-insensitive and arguments are case-sensitive
- if line.key == 'passwordauthentication':
- log.debug("Replacing auth line %s with %s", i + 1, pw_auth)
- replaced_auth = True
- line.value = pw_auth
- new_lines.append(line)
-
- if not replaced_auth:
- log.debug("Adding new auth line %s", i + 1)
- replaced_auth = True
- new_lines.append(ssh_util.SshdConfigLine('',
- 'PasswordAuthentication',
- pw_auth))
-
- lines = [str(l) for l in new_lines]
- util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines),
- copy_mode=True)
-
- try:
- cmd = cloud.distro.init_cmd # Default service
- cmd.append(cloud.distro.get_option('ssh_svcname', 'ssh'))
- cmd.append('restart')
- if 'systemctl' in cmd: # Switch action ordering
- cmd[1], cmd[2] = cmd[2], cmd[1]
- cmd = filter(None, cmd) # Remove empty arguments
- util.subp(cmd)
- log.debug("Restarted the ssh daemon")
- except Exception:
- util.logexc(log, "Restarting of the ssh daemon failed")
+ handle_ssh_pwauth(
+ cfg.get('ssh_pwauth'), service_cmd=cloud.distro.init_cmd,
+ service_name=cloud.distro.get_option('ssh_svcname', 'ssh'))
if len(errors):
log.debug("%s errors occured, re-raising the last one", len(errors))
diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py
index 34a53fd4..90724b81 100644
--- a/cloudinit/config/cc_snap.py
+++ b/cloudinit/config/cc_snap.py
@@ -110,7 +110,6 @@ schema = {
'additionalItems': False, # Reject non-string & non-list
'minItems': 1,
'minProperties': 1,
- 'uniqueItems': True
},
'squashfuse_in_container': {
'type': 'boolean'
@@ -204,12 +203,12 @@ def maybe_install_squashfuse(cloud):
return
try:
cloud.distro.update_package_sources()
- except Exception as e:
+ except Exception:
util.logexc(LOG, "Package update failed")
raise
try:
cloud.distro.install_packages(['squashfuse'])
- except Exception as e:
+ except Exception:
util.logexc(LOG, "Failed to install squashfuse")
raise
diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index bab80bbe..15bee2d3 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -213,7 +213,7 @@ def render_snap_op(op, name, path=None, cfgfile=None, config=None):
def read_installed_packages():
ret = []
- for (name, date, version, dev) in read_pkg_data():
+ for (name, _date, _version, dev) in read_pkg_data():
if dev:
ret.append(NAMESPACE_DELIM.join([name, dev]))
else:
@@ -222,7 +222,7 @@ def read_installed_packages():
def read_pkg_data():
- out, err = util.subp([SNAPPY_CMD, "list"])
+ out, _err = util.subp([SNAPPY_CMD, "list"])
pkg_data = []
for line in out.splitlines()[1:]:
toks = line.split(sep=None, maxsplit=3)
diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py
index 16b1868b..5e082bd6 100644
--- a/cloudinit/config/cc_ubuntu_advantage.py
+++ b/cloudinit/config/cc_ubuntu_advantage.py
@@ -87,7 +87,6 @@ schema = {
'additionalItems': False, # Reject non-string & non-list
'minItems': 1,
'minProperties': 1,
- 'uniqueItems': True
}
},
'additionalProperties': False, # Reject keys not in schema
@@ -149,12 +148,12 @@ def maybe_install_ua_tools(cloud):
return
try:
cloud.distro.update_package_sources()
- except Exception as e:
+ except Exception:
util.logexc(LOG, "Package update failed")
raise
try:
cloud.distro.install_packages(['ubuntu-advantage-tools'])
- except Exception as e:
+ except Exception:
util.logexc(LOG, "Failed to install ubuntu-advantage-tools")
raise
diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py
index b215e95a..c95bdaad 100644
--- a/cloudinit/config/cc_users_groups.py
+++ b/cloudinit/config/cc_users_groups.py
@@ -54,8 +54,9 @@ config keys for an entry in ``users`` are as follows:
- ``ssh_authorized_keys``: Optional. List of ssh keys to add to user's
authkeys file. Default: none
- ``ssh_import_id``: Optional. SSH id to import for user. Default: none
- - ``sudo``: Optional. Sudo rule to use, or list of sudo rules to use.
- Default: none.
+ - ``sudo``: Optional. Sudo rule to use, list of sudo rules to use or False.
+ Default: none. An absence of sudo key, or a value of none or false
+ will result in no sudo rules being written for the user.
- ``system``: Optional. Create user as system user with no home directory.
Default: false
- ``uid``: Optional. The user's ID. Default: The next available value.
@@ -82,6 +83,9 @@ config keys for an entry in ``users`` are as follows:
users:
- default
+ # User explicitly omitted from sudo permission; also default behavior.
+ - name: <some_restricted_user>
+ sudo: false
- name: <username>
expiredate: <date>
gecos: <comment>
diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py
index ca7d0d5b..080a6d06 100644
--- a/cloudinit/config/schema.py
+++ b/cloudinit/config/schema.py
@@ -4,7 +4,7 @@
from __future__ import print_function
from cloudinit import importer
-from cloudinit.util import find_modules, read_file_or_url
+from cloudinit.util import find_modules, load_file
import argparse
from collections import defaultdict
@@ -93,20 +93,33 @@ def validate_cloudconfig_schema(config, schema, strict=False):
def annotated_cloudconfig_file(cloudconfig, original_content, schema_errors):
"""Return contents of the cloud-config file annotated with schema errors.
- @param cloudconfig: YAML-loaded object from the original_content.
+ @param cloudconfig: YAML-loaded dict from the original_content or empty
+ dict if unparseable.
@param original_content: The contents of a cloud-config file
@param schema_errors: List of tuples from a JSONSchemaValidationError. The
tuples consist of (schemapath, error_message).
"""
if not schema_errors:
return original_content
- schemapaths = _schemapath_for_cloudconfig(cloudconfig, original_content)
+ schemapaths = {}
+ if cloudconfig:
+ schemapaths = _schemapath_for_cloudconfig(
+ cloudconfig, original_content)
errors_by_line = defaultdict(list)
error_count = 1
error_footer = []
annotated_content = []
for path, msg in schema_errors:
- errors_by_line[schemapaths[path]].append(msg)
+ match = re.match(r'format-l(?P<line>\d+)\.c(?P<col>\d+).*', path)
+ if match:
+ line, col = match.groups()
+ errors_by_line[int(line)].append(msg)
+ else:
+ col = None
+ errors_by_line[schemapaths[path]].append(msg)
+ if col is not None:
+ msg = 'Line {line} column {col}: {msg}'.format(
+ line=line, col=col, msg=msg)
error_footer.append('# E{0}: {1}'.format(error_count, msg))
error_count += 1
lines = original_content.decode().split('\n')
@@ -139,21 +152,34 @@ def validate_cloudconfig_file(config_path, schema, annotate=False):
"""
if not os.path.exists(config_path):
raise RuntimeError('Configfile {0} does not exist'.format(config_path))
- content = read_file_or_url('file://{0}'.format(config_path)).contents
+ content = load_file(config_path, decode=False)
if not content.startswith(CLOUD_CONFIG_HEADER):
errors = (
- ('header', 'File {0} needs to begin with "{1}"'.format(
+ ('format-l1.c1', 'File {0} needs to begin with "{1}"'.format(
config_path, CLOUD_CONFIG_HEADER.decode())),)
- raise SchemaValidationError(errors)
-
+ error = SchemaValidationError(errors)
+ if annotate:
+ print(annotated_cloudconfig_file({}, content, error.schema_errors))
+ raise error
try:
cloudconfig = yaml.safe_load(content)
- except yaml.parser.ParserError as e:
- errors = (
- ('format', 'File {0} is not valid yaml. {1}'.format(
- config_path, str(e))),)
- raise SchemaValidationError(errors)
-
+ except (yaml.YAMLError) as e:
+ line = column = 1
+ mark = None
+ if hasattr(e, 'context_mark') and getattr(e, 'context_mark'):
+ mark = getattr(e, 'context_mark')
+ elif hasattr(e, 'problem_mark') and getattr(e, 'problem_mark'):
+ mark = getattr(e, 'problem_mark')
+ if mark:
+ line = mark.line + 1
+ column = mark.column + 1
+ errors = (('format-l{line}.c{col}'.format(line=line, col=column),
+ 'File {0} is not valid yaml. {1}'.format(
+ config_path, str(e))),)
+ error = SchemaValidationError(errors)
+ if annotate:
+ print(annotated_cloudconfig_file({}, content, error.schema_errors))
+ raise error
try:
validate_cloudconfig_schema(
cloudconfig, schema, strict=True)
@@ -176,7 +202,7 @@ def _schemapath_for_cloudconfig(config, original_content):
list_index = 0
RE_YAML_INDENT = r'^(\s*)'
scopes = []
- for line_number, line in enumerate(content_lines):
+ for line_number, line in enumerate(content_lines, 1):
indent_depth = len(re.match(RE_YAML_INDENT, line).groups()[0])
line = line.strip()
if not line or line.startswith('#'):
@@ -208,8 +234,8 @@ def _schemapath_for_cloudconfig(config, original_content):
scopes.append((indent_depth + 2, key + '.0'))
for inner_list_index in range(0, len(yaml.safe_load(value))):
list_key = key + '.' + str(inner_list_index)
- schema_line_numbers[list_key] = line_number + 1
- schema_line_numbers[key] = line_number + 1
+ schema_line_numbers[list_key] = line_number
+ schema_line_numbers[key] = line_number
return schema_line_numbers
@@ -297,8 +323,8 @@ def get_schema():
configs_dir = os.path.dirname(os.path.abspath(__file__))
potential_handlers = find_modules(configs_dir)
- for (fname, mod_name) in potential_handlers.items():
- mod_locs, looked_locs = importer.find_module(
+ for (_fname, mod_name) in potential_handlers.items():
+ mod_locs, _looked_locs = importer.find_module(
mod_name, ['cloudinit.config'], ['schema'])
if mod_locs:
mod = importer.import_module(mod_locs[0])
@@ -337,9 +363,11 @@ def handle_schema_args(name, args):
try:
validate_cloudconfig_file(
args.config_file, full_schema, args.annotate)
- except (SchemaValidationError, RuntimeError) as e:
+ except SchemaValidationError as e:
if not args.annotate:
error(str(e))
+ except RuntimeError as e:
+ error(str(e))
else:
print("Valid cloud-config file {0}".format(args.config_file))
if args.doc:
diff --git a/cloudinit/config/tests/test_disable_ec2_metadata.py b/cloudinit/config/tests/test_disable_ec2_metadata.py
new file mode 100644
index 00000000..67646b03
--- /dev/null
+++ b/cloudinit/config/tests/test_disable_ec2_metadata.py
@@ -0,0 +1,50 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Tests cc_disable_ec2_metadata handler"""
+
+import cloudinit.config.cc_disable_ec2_metadata as ec2_meta
+
+from cloudinit.tests.helpers import CiTestCase, mock
+
+import logging
+
+LOG = logging.getLogger(__name__)
+
+DISABLE_CFG = {'disable_ec2_metadata': 'true'}
+
+
+class TestEC2MetadataRoute(CiTestCase):
+
+ with_logs = True
+
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
+ def test_disable_ifconfig(self, m_subp, m_which):
+ """Set the route if ifconfig command is available"""
+ m_which.side_effect = lambda x: x if x == 'ifconfig' else None
+ ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None)
+ m_subp.assert_called_with(
+ ['route', 'add', '-host', '169.254.169.254', 'reject'],
+ capture=False)
+
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
+ def test_disable_ip(self, m_subp, m_which):
+ """Set the route if ip command is available"""
+ m_which.side_effect = lambda x: x if x == 'ip' else None
+ ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None)
+ m_subp.assert_called_with(
+ ['ip', 'route', 'add', 'prohibit', '169.254.169.254'],
+ capture=False)
+
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.which')
+ @mock.patch('cloudinit.config.cc_disable_ec2_metadata.util.subp')
+ def test_disable_no_tool(self, m_subp, m_which):
+ """Log error when neither route nor ip commands are available"""
+ m_which.return_value = None # Find neither ifconfig nor ip
+ ec2_meta.handle('foo', DISABLE_CFG, None, LOG, None)
+ self.assertEqual(
+ [mock.call('ip'), mock.call('ifconfig')], m_which.call_args_list)
+ m_subp.assert_not_called()
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py
new file mode 100644
index 00000000..b051ec82
--- /dev/null
+++ b/cloudinit/config/tests/test_set_passwords.py
@@ -0,0 +1,71 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import mock
+
+from cloudinit.config import cc_set_passwords as setpass
+from cloudinit.tests.helpers import CiTestCase
+from cloudinit import util
+
+MODPATH = "cloudinit.config.cc_set_passwords."
+
+
+class TestHandleSshPwauth(CiTestCase):
+ """Test cc_set_passwords handling of ssh_pwauth in handle_ssh_pwauth."""
+
+ with_logs = True
+
+ @mock.patch(MODPATH + "util.subp")
+ def test_unknown_value_logs_warning(self, m_subp):
+ setpass.handle_ssh_pwauth("floo")
+ self.assertIn("Unrecognized value: ssh_pwauth=floo",
+ self.logs.getvalue())
+ m_subp.assert_not_called()
+
+ @mock.patch(MODPATH + "update_ssh_config", return_value=True)
+ @mock.patch(MODPATH + "util.subp")
+ def test_systemctl_as_service_cmd(self, m_subp, m_update_ssh_config):
+ """If systemctl in service cmd: systemctl restart name."""
+ setpass.handle_ssh_pwauth(
+ True, service_cmd=["systemctl"], service_name="myssh")
+ self.assertEqual(mock.call(["systemctl", "restart", "myssh"]),
+ m_subp.call_args)
+
+ @mock.patch(MODPATH + "update_ssh_config", return_value=True)
+ @mock.patch(MODPATH + "util.subp")
+ def test_service_as_service_cmd(self, m_subp, m_update_ssh_config):
+ """If systemctl in service cmd: systemctl restart name."""
+ setpass.handle_ssh_pwauth(
+ True, service_cmd=["service"], service_name="myssh")
+ self.assertEqual(mock.call(["service", "myssh", "restart"]),
+ m_subp.call_args)
+
+ @mock.patch(MODPATH + "update_ssh_config", return_value=False)
+ @mock.patch(MODPATH + "util.subp")
+ def test_not_restarted_if_not_updated(self, m_subp, m_update_ssh_config):
+ """If config is not updated, then no system restart should be done."""
+ setpass.handle_ssh_pwauth(True)
+ m_subp.assert_not_called()
+ self.assertIn("No need to restart ssh", self.logs.getvalue())
+
+ @mock.patch(MODPATH + "update_ssh_config", return_value=True)
+ @mock.patch(MODPATH + "util.subp")
+ def test_unchanged_does_nothing(self, m_subp, m_update_ssh_config):
+ """If 'unchanged', then no updates to config and no restart."""
+ setpass.handle_ssh_pwauth(
+ "unchanged", service_cmd=["systemctl"], service_name="myssh")
+ m_update_ssh_config.assert_not_called()
+ m_subp.assert_not_called()
+
+ @mock.patch(MODPATH + "util.subp")
+ def test_valid_change_values(self, m_subp):
+ """If value is a valid changen value, then update should be called."""
+ upname = MODPATH + "update_ssh_config"
+ optname = "PasswordAuthentication"
+ for value in util.FALSE_STRINGS + util.TRUE_STRINGS:
+ optval = "yes" if value in util.TRUE_STRINGS else "no"
+ with mock.patch(upname, return_value=False) as m_update:
+ setpass.handle_ssh_pwauth(value)
+ m_update.assert_called_with({optname: optval})
+ m_subp.assert_not_called()
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/config/tests/test_snap.py b/cloudinit/config/tests/test_snap.py
index c5b4a9de..34c80f1e 100644
--- a/cloudinit/config/tests/test_snap.py
+++ b/cloudinit/config/tests/test_snap.py
@@ -9,7 +9,7 @@ from cloudinit.config.cc_snap import (
from cloudinit.config.schema import validate_cloudconfig_schema
from cloudinit import util
from cloudinit.tests.helpers import (
- CiTestCase, mock, wrap_and_call, skipUnlessJsonSchema)
+ CiTestCase, SchemaTestCaseMixin, mock, wrap_and_call, skipUnlessJsonSchema)
SYSTEM_USER_ASSERTION = """\
@@ -245,9 +245,10 @@ class TestRunCommands(CiTestCase):
@skipUnlessJsonSchema()
-class TestSchema(CiTestCase):
+class TestSchema(CiTestCase, SchemaTestCaseMixin):
with_logs = True
+ schema = schema
def test_schema_warns_on_snap_not_as_dict(self):
"""If the snap configuration is not a dict, emit a warning."""
@@ -340,6 +341,30 @@ class TestSchema(CiTestCase):
{'snap': {'assertions': {'01': 'also valid'}}}, schema)
self.assertEqual('', self.logs.getvalue())
+ def test_duplicates_are_fine_array_array(self):
+ """Duplicated commands array/array entries are allowed."""
+ self.assertSchemaValid(
+ {'commands': [["echo", "bye"], ["echo" "bye"]]},
+ "command entries can be duplicate.")
+
+ def test_duplicates_are_fine_array_string(self):
+ """Duplicated commands array/string entries are allowed."""
+ self.assertSchemaValid(
+ {'commands': ["echo bye", "echo bye"]},
+ "command entries can be duplicate.")
+
+ def test_duplicates_are_fine_dict_array(self):
+ """Duplicated commands dict/array entries are allowed."""
+ self.assertSchemaValid(
+ {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}},
+ "command entries can be duplicate.")
+
+ def test_duplicates_are_fine_dict_string(self):
+ """Duplicated commands dict/string entries are allowed."""
+ self.assertSchemaValid(
+ {'commands': {'00': "echo bye", '01': "echo bye"}},
+ "command entries can be duplicate.")
+
class TestHandle(CiTestCase):
diff --git a/cloudinit/config/tests/test_ubuntu_advantage.py b/cloudinit/config/tests/test_ubuntu_advantage.py
index f2a59faf..f1beeff8 100644
--- a/cloudinit/config/tests/test_ubuntu_advantage.py
+++ b/cloudinit/config/tests/test_ubuntu_advantage.py
@@ -7,7 +7,8 @@ from cloudinit.config.cc_ubuntu_advantage import (
handle, maybe_install_ua_tools, run_commands, schema)
from cloudinit.config.schema import validate_cloudconfig_schema
from cloudinit import util
-from cloudinit.tests.helpers import CiTestCase, mock, skipUnlessJsonSchema
+from cloudinit.tests.helpers import (
+ CiTestCase, mock, SchemaTestCaseMixin, skipUnlessJsonSchema)
# Module path used in mocks
@@ -105,9 +106,10 @@ class TestRunCommands(CiTestCase):
@skipUnlessJsonSchema()
-class TestSchema(CiTestCase):
+class TestSchema(CiTestCase, SchemaTestCaseMixin):
with_logs = True
+ schema = schema
def test_schema_warns_on_ubuntu_advantage_not_as_dict(self):
"""If ubuntu-advantage configuration is not a dict, emit a warning."""
@@ -169,6 +171,30 @@ class TestSchema(CiTestCase):
{'ubuntu-advantage': {'commands': {'01': 'also valid'}}}, schema)
self.assertEqual('', self.logs.getvalue())
+ def test_duplicates_are_fine_array_array(self):
+ """Duplicated commands array/array entries are allowed."""
+ self.assertSchemaValid(
+ {'commands': [["echo", "bye"], ["echo" "bye"]]},
+ "command entries can be duplicate.")
+
+ def test_duplicates_are_fine_array_string(self):
+ """Duplicated commands array/string entries are allowed."""
+ self.assertSchemaValid(
+ {'commands': ["echo bye", "echo bye"]},
+ "command entries can be duplicate.")
+
+ def test_duplicates_are_fine_dict_array(self):
+ """Duplicated commands dict/array entries are allowed."""
+ self.assertSchemaValid(
+ {'commands': {'00': ["echo", "bye"], '01': ["echo", "bye"]}},
+ "command entries can be duplicate.")
+
+ def test_duplicates_are_fine_dict_string(self):
+ """Duplicated commands dict/string entries are allowed."""
+ self.assertSchemaValid(
+ {'commands': {'00': "echo bye", '01': "echo bye"}},
+ "command entries can be duplicate.")
+
class TestHandle(CiTestCase):
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 55260eae..ab0b0776 100755
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -49,6 +49,9 @@ LOG = logging.getLogger(__name__)
# It could break when Amazon adds new regions and new AZs.
_EC2_AZ_RE = re.compile('^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$')
+# Default NTP Client Configurations
+PREFERRED_NTP_CLIENTS = ['chrony', 'systemd-timesyncd', 'ntp', 'ntpdate']
+
@six.add_metaclass(abc.ABCMeta)
class Distro(object):
@@ -60,6 +63,7 @@ class Distro(object):
tz_zone_dir = "/usr/share/zoneinfo"
init_cmd = ['service'] # systemctl, service etc
renderer_configs = {}
+ _preferred_ntp_clients = None
def __init__(self, name, cfg, paths):
self._paths = paths
@@ -339,6 +343,14 @@ class Distro(object):
contents.write("%s\n" % (eh))
util.write_file(self.hosts_fn, contents.getvalue(), mode=0o644)
+ @property
+ def preferred_ntp_clients(self):
+ """Allow distro to determine the preferred ntp client list"""
+ if not self._preferred_ntp_clients:
+ self._preferred_ntp_clients = list(PREFERRED_NTP_CLIENTS)
+
+ return self._preferred_ntp_clients
+
def _bring_up_interface(self, device_name):
cmd = ['ifup', device_name]
LOG.debug("Attempting to run bring up interface %s using command %s",
@@ -519,7 +531,7 @@ class Distro(object):
self.lock_passwd(name)
# Configure sudo access
- if 'sudo' in kwargs:
+ if 'sudo' in kwargs and kwargs['sudo'] is not False:
self.write_sudo_rules(name, kwargs['sudo'])
# Import SSH keys
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index 754d3df6..ff22d568 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -110,15 +110,15 @@ class Distro(distros.Distro):
if dev.startswith('lo'):
return dev
- n = re.search('\d+$', dev)
+ n = re.search(r'\d+$', dev)
index = n.group(0)
- (out, err) = util.subp(['ifconfig', '-a'])
+ (out, _err) = util.subp(['ifconfig', '-a'])
ifconfigoutput = [x for x in (out.strip()).splitlines()
if len(x.split()) > 0]
bsddev = 'NOT_FOUND'
for line in ifconfigoutput:
- m = re.match('^\w+', line)
+ m = re.match(r'^\w+', line)
if m:
if m.group(0).startswith('lo'):
continue
@@ -128,7 +128,7 @@ class Distro(distros.Distro):
break
# Replace the index with the one we're after.
- bsddev = re.sub('\d+$', index, bsddev)
+ bsddev = re.sub(r'\d+$', index, bsddev)
LOG.debug("Using network interface %s", bsddev)
return bsddev
@@ -266,7 +266,7 @@ class Distro(distros.Distro):
self.lock_passwd(name)
# Configure sudo access
- if 'sudo' in kwargs:
+ if 'sudo' in kwargs and kwargs['sudo'] is not False:
self.write_sudo_rules(name, kwargs['sudo'])
# Import SSH keys
diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py
index 162dfa05..9f90e95e 100644
--- a/cloudinit/distros/opensuse.py
+++ b/cloudinit/distros/opensuse.py
@@ -208,4 +208,28 @@ class Distro(distros.Distro):
nameservers, searchservers)
return dev_names
+ @property
+ def preferred_ntp_clients(self):
+ """The preferred ntp client is dependent on the version."""
+
+ """Allow distro to determine the preferred ntp client list"""
+ if not self._preferred_ntp_clients:
+ distro_info = util.system_info()['dist']
+ name = distro_info[0]
+ major_ver = int(distro_info[1].split('.')[0])
+
+ # This is horribly complicated because of a case of
+ # "we do not care if versions should be increasing syndrome"
+ if (
+ (major_ver >= 15 and 'openSUSE' not in name) or
+ (major_ver >= 15 and 'openSUSE' in name and major_ver != 42)
+ ):
+ self._preferred_ntp_clients = ['chrony',
+ 'systemd-timesyncd', 'ntp']
+ else:
+ self._preferred_ntp_clients = ['ntp',
+ 'systemd-timesyncd', 'chrony']
+
+ return self._preferred_ntp_clients
+
# vi: ts=4 expandtab
diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py
index 82ca34f5..68154104 100644
--- a/cloudinit/distros/ubuntu.py
+++ b/cloudinit/distros/ubuntu.py
@@ -10,12 +10,31 @@
# This file is part of cloud-init. See LICENSE file for license information.
from cloudinit.distros import debian
+from cloudinit.distros import PREFERRED_NTP_CLIENTS
from cloudinit import log as logging
+from cloudinit import util
+
+import copy
LOG = logging.getLogger(__name__)
class Distro(debian.Distro):
+
+ @property
+ def preferred_ntp_clients(self):
+ """The preferred ntp client is dependent on the version."""
+ if not self._preferred_ntp_clients:
+ (_name, _version, codename) = util.system_info()['dist']
+ # Xenial cloud-init only installed ntp, UbuntuCore has timesyncd.
+ if codename == "xenial" and not util.system_is_snappy():
+ self._preferred_ntp_clients = ['ntp']
+ else:
+ self._preferred_ntp_clients = (
+ copy.deepcopy(PREFERRED_NTP_CLIENTS))
+ return self._preferred_ntp_clients
+
pass
+
# vi: ts=4 expandtab
diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
index dc3f0fc3..3b7b17f1 100644
--- a/cloudinit/ec2_utils.py
+++ b/cloudinit/ec2_utils.py
@@ -150,11 +150,9 @@ def get_instance_userdata(api_version='latest',
# NOT_FOUND occurs) and just in that case returning an empty string.
exception_cb = functools.partial(_skip_retry_on_codes,
SKIP_USERDATA_CODES)
- response = util.read_file_or_url(ud_url,
- ssl_details=ssl_details,
- timeout=timeout,
- retries=retries,
- exception_cb=exception_cb)
+ response = url_helper.read_file_or_url(
+ ud_url, ssl_details=ssl_details, timeout=timeout,
+ retries=retries, exception_cb=exception_cb)
user_data = response.contents
except url_helper.UrlError as e:
if e.code not in SKIP_USERDATA_CODES:
@@ -169,9 +167,9 @@ def _get_instance_metadata(tree, api_version='latest',
ssl_details=None, timeout=5, retries=5,
leaf_decoder=None):
md_url = url_helper.combine_url(metadata_address, api_version, tree)
- caller = functools.partial(util.read_file_or_url,
- ssl_details=ssl_details, timeout=timeout,
- retries=retries)
+ caller = functools.partial(
+ url_helper.read_file_or_url, ssl_details=ssl_details,
+ timeout=timeout, retries=retries)
def mcaller(url):
return caller(url).contents
diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py
index 1ca92d4b..dc338769 100644
--- a/cloudinit/handlers/upstart_job.py
+++ b/cloudinit/handlers/upstart_job.py
@@ -97,7 +97,7 @@ def _has_suitable_upstart():
else:
util.logexc(LOG, "dpkg --compare-versions failed [%s]",
e.exit_code)
- except Exception as e:
+ except Exception:
util.logexc(LOG, "dpkg --compare-versions failed")
return False
else:
diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py
index f69c0ef2..3ffde52c 100644
--- a/cloudinit/net/__init__.py
+++ b/cloudinit/net/__init__.py
@@ -107,6 +107,21 @@ def is_bond(devname):
return os.path.exists(sys_dev_path(devname, "bonding"))
+def is_renamed(devname):
+ """
+ /* interface name assignment types (sysfs name_assign_type attribute) */
+ #define NET_NAME_UNKNOWN 0 /* unknown origin (not exposed to user) */
+ #define NET_NAME_ENUM 1 /* enumerated by kernel */
+ #define NET_NAME_PREDICTABLE 2 /* predictably named by the kernel */
+ #define NET_NAME_USER 3 /* provided by user-space */
+ #define NET_NAME_RENAMED 4 /* renamed by user-space */
+ """
+ name_assign_type = read_sys_net_safe(devname, 'name_assign_type')
+ if name_assign_type and name_assign_type in ['3', '4']:
+ return True
+ return False
+
+
def is_vlan(devname):
uevent = str(read_sys_net_safe(devname, "uevent"))
return 'DEVTYPE=vlan' in uevent.splitlines()
@@ -180,6 +195,17 @@ def find_fallback_nic(blacklist_drivers=None):
if not blacklist_drivers:
blacklist_drivers = []
+ if 'net.ifnames=0' in util.get_cmdline():
+ LOG.debug('Stable ifnames disabled by net.ifnames=0 in /proc/cmdline')
+ else:
+ unstable = [device for device in get_devicelist()
+ if device != 'lo' and not is_renamed(device)]
+ if len(unstable):
+ LOG.debug('Found unstable nic names: %s; calling udevadm settle',
+ unstable)
+ msg = 'Waiting for udev events to settle'
+ util.log_time(LOG.debug, msg, func=util.udevadm_settle)
+
# get list of interfaces that could have connections
invalid_interfaces = set(['lo'])
potential_interfaces = set([device for device in get_devicelist()
@@ -295,7 +321,7 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True):
def _version_2(netcfg):
renames = []
- for key, ent in netcfg.get('ethernets', {}).items():
+ for ent in netcfg.get('ethernets', {}).values():
# only rename if configured to do so
name = ent.get('set-name')
if not name:
@@ -333,8 +359,12 @@ def interface_has_own_mac(ifname, strict=False):
1: randomly generated 3: set using dev_set_mac_address"""
assign_type = read_sys_net_int(ifname, "addr_assign_type")
- if strict and assign_type is None:
- raise ValueError("%s had no addr_assign_type.")
+ if assign_type is None:
+ # None is returned if this nic had no 'addr_assign_type' entry.
+ # if strict, raise an error, if not return True.
+ if strict:
+ raise ValueError("%s had no addr_assign_type.")
+ return True
return assign_type in (0, 1, 3)
diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py
index 9e9fe0fe..f89a0f73 100755
--- a/cloudinit/net/cmdline.py
+++ b/cloudinit/net/cmdline.py
@@ -65,7 +65,7 @@ def _klibc_to_config_entry(content, mac_addrs=None):
iface['mac_address'] = mac_addrs[name]
# Handle both IPv4 and IPv6 values
- for v, pre in (('ipv4', 'IPV4'), ('ipv6', 'IPV6')):
+ for pre in ('IPV4', 'IPV6'):
# if no IPV4ADDR or IPV6ADDR, then go on.
if pre + "ADDR" not in data:
continue
diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py
index 087c0c03..12cf5097 100644
--- a/cloudinit/net/dhcp.py
+++ b/cloudinit/net/dhcp.py
@@ -216,7 +216,7 @@ def networkd_get_option_from_leases(keyname, leases_d=None):
if leases_d is None:
leases_d = NETWORKD_LEASES_DIR
leases = networkd_load_leases(leases_d=leases_d)
- for ifindex, data in sorted(leases.items()):
+ for _ifindex, data in sorted(leases.items()):
if data.get(keyname):
return data[keyname]
return None
diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py
index c6a71d16..bd20a361 100644
--- a/cloudinit/net/eni.py
+++ b/cloudinit/net/eni.py
@@ -10,9 +10,12 @@ from . import ParserError
from . import renderer
from .network_state import subnet_is_ipv6
+from cloudinit import log as logging
from cloudinit import util
+LOG = logging.getLogger(__name__)
+
NET_CONFIG_COMMANDS = [
"pre-up", "up", "post-up", "down", "pre-down", "post-down",
]
@@ -61,7 +64,7 @@ def _iface_add_subnet(iface, subnet):
# TODO: switch to valid_map for attrs
-def _iface_add_attrs(iface, index):
+def _iface_add_attrs(iface, index, ipv4_subnet_mtu):
# If the index is non-zero, this is an alias interface. Alias interfaces
# represent additional interface addresses, and should not have additional
# attributes. (extra attributes here are almost always either incorrect,
@@ -100,6 +103,13 @@ def _iface_add_attrs(iface, index):
value = 'on' if iface[key] else 'off'
if not value or key in ignore_map:
continue
+ if key == 'mtu' and ipv4_subnet_mtu:
+ if value != ipv4_subnet_mtu:
+ LOG.warning(
+ "Network config: ignoring %s device-level mtu:%s because"
+ " ipv4 subnet-level mtu:%s provided.",
+ iface['name'], value, ipv4_subnet_mtu)
+ continue
if key in multiline_keys:
for v in value:
content.append(" {0} {1}".format(renames.get(key, key), v))
@@ -377,12 +387,15 @@ class Renderer(renderer.Renderer):
subnets = iface.get('subnets', {})
if subnets:
for index, subnet in enumerate(subnets):
+ ipv4_subnet_mtu = None
iface['index'] = index
iface['mode'] = subnet['type']
iface['control'] = subnet.get('control', 'auto')
subnet_inet = 'inet'
if subnet_is_ipv6(subnet):
subnet_inet += '6'
+ else:
+ ipv4_subnet_mtu = subnet.get('mtu')
iface['inet'] = subnet_inet
if subnet['type'].startswith('dhcp'):
iface['mode'] = 'dhcp'
@@ -397,7 +410,7 @@ class Renderer(renderer.Renderer):
_iface_start_entry(
iface, index, render_hwaddress=render_hwaddress) +
_iface_add_subnet(iface, subnet) +
- _iface_add_attrs(iface, index)
+ _iface_add_attrs(iface, index, ipv4_subnet_mtu)
)
for route in subnet.get('routes', []):
lines.extend(self._render_route(route, indent=" "))
@@ -409,7 +422,8 @@ class Renderer(renderer.Renderer):
if 'bond-master' in iface or 'bond-slaves' in iface:
lines.append("auto {name}".format(**iface))
lines.append("iface {name} {inet} {mode}".format(**iface))
- lines.extend(_iface_add_attrs(iface, index=0))
+ lines.extend(
+ _iface_add_attrs(iface, index=0, ipv4_subnet_mtu=None))
sections.append(lines)
return sections
diff --git a/cloudinit/net/netplan.py b/cloudinit/net/netplan.py
index 63443484..40143634 100644
--- a/cloudinit/net/netplan.py
+++ b/cloudinit/net/netplan.py
@@ -34,7 +34,7 @@ def _get_params_dict_by_match(config, match):
if key.startswith(match))
-def _extract_addresses(config, entry):
+def _extract_addresses(config, entry, ifname):
"""This method parse a cloudinit.net.network_state dictionary (config) and
maps netstate keys/values into a dictionary (entry) to represent
netplan yaml.
@@ -124,6 +124,15 @@ def _extract_addresses(config, entry):
addresses.append(addr)
+ if 'mtu' in config:
+ entry_mtu = entry.get('mtu')
+ if entry_mtu and config['mtu'] != entry_mtu:
+ LOG.warning(
+ "Network config: ignoring %s device-level mtu:%s because"
+ " ipv4 subnet-level mtu:%s provided.",
+ ifname, config['mtu'], entry_mtu)
+ else:
+ entry['mtu'] = config['mtu']
if len(addresses) > 0:
entry.update({'addresses': addresses})
if len(routes) > 0:
@@ -262,10 +271,7 @@ class Renderer(renderer.Renderer):
else:
del eth['match']
del eth['set-name']
- if 'mtu' in ifcfg:
- eth['mtu'] = ifcfg.get('mtu')
-
- _extract_addresses(ifcfg, eth)
+ _extract_addresses(ifcfg, eth, ifname)
ethernets.update({ifname: eth})
elif if_type == 'bond':
@@ -288,7 +294,7 @@ class Renderer(renderer.Renderer):
slave_interfaces = ifcfg.get('bond-slaves')
if slave_interfaces == 'none':
_extract_bond_slaves_by_name(interfaces, bond, ifname)
- _extract_addresses(ifcfg, bond)
+ _extract_addresses(ifcfg, bond, ifname)
bonds.update({ifname: bond})
elif if_type == 'bridge':
@@ -321,7 +327,7 @@ class Renderer(renderer.Renderer):
if len(br_config) > 0:
bridge.update({'parameters': br_config})
- _extract_addresses(ifcfg, bridge)
+ _extract_addresses(ifcfg, bridge, ifname)
bridges.update({ifname: bridge})
elif if_type == 'vlan':
@@ -333,7 +339,7 @@ class Renderer(renderer.Renderer):
macaddr = ifcfg.get('mac_address', None)
if macaddr is not None:
vlan['macaddress'] = macaddr.lower()
- _extract_addresses(ifcfg, vlan)
+ _extract_addresses(ifcfg, vlan, ifname)
vlans.update({ifname: vlan})
# inject global nameserver values under each all interface which
diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py
index 6d63e5c5..72c803eb 100644
--- a/cloudinit/net/network_state.py
+++ b/cloudinit/net/network_state.py
@@ -7,6 +7,8 @@
import copy
import functools
import logging
+import socket
+import struct
import six
@@ -886,12 +888,9 @@ def net_prefix_to_ipv4_mask(prefix):
This is the inverse of ipv4_mask_to_net_prefix.
24 -> "255.255.255.0"
Also supports input as a string."""
-
- mask = [0, 0, 0, 0]
- for i in list(range(0, int(prefix))):
- idx = int(i / 8)
- mask[idx] = mask[idx] + (1 << (7 - i % 8))
- return ".".join([str(x) for x in mask])
+ mask = socket.inet_ntoa(
+ struct.pack(">I", (0xffffffff << (32 - int(prefix)) & 0xffffffff)))
+ return mask
def ipv4_mask_to_net_prefix(mask):
diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py
index 39d89c46..3d719238 100644
--- a/cloudinit/net/sysconfig.py
+++ b/cloudinit/net/sysconfig.py
@@ -287,7 +287,6 @@ class Renderer(renderer.Renderer):
if subnet_type == 'dhcp6':
iface_cfg['IPV6INIT'] = True
iface_cfg['DHCPV6C'] = True
- iface_cfg['BOOTPROTO'] = 'dhcp'
elif subnet_type in ['dhcp4', 'dhcp']:
iface_cfg['BOOTPROTO'] = 'dhcp'
elif subnet_type == 'static':
@@ -305,6 +304,13 @@ class Renderer(renderer.Renderer):
mtu_key = 'IPV6_MTU'
iface_cfg['IPV6INIT'] = True
if 'mtu' in subnet:
+ mtu_mismatch = bool(mtu_key in iface_cfg and
+ subnet['mtu'] != iface_cfg[mtu_key])
+ if mtu_mismatch:
+ LOG.warning(
+ 'Network config: ignoring %s device-level mtu:%s'
+ ' because ipv4 subnet-level mtu:%s provided.',
+ iface_cfg.name, iface_cfg[mtu_key], subnet['mtu'])
iface_cfg[mtu_key] = subnet['mtu']
elif subnet_type == 'manual':
# If the subnet has an MTU setting, then ONBOOT=True
@@ -364,7 +370,7 @@ class Renderer(renderer.Renderer):
@classmethod
def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets):
- for i, subnet in enumerate(subnets, start=len(iface_cfg.children)):
+ for _, subnet in enumerate(subnets, start=len(iface_cfg.children)):
for route in subnet.get('routes', []):
is_ipv6 = subnet.get('ipv6') or is_ipv6_addr(route['gateway'])
diff --git a/cloudinit/net/tests/test_init.py b/cloudinit/net/tests/test_init.py
index 276556ee..5c017d15 100644
--- a/cloudinit/net/tests/test_init.py
+++ b/cloudinit/net/tests/test_init.py
@@ -199,6 +199,7 @@ class TestGenerateFallbackConfig(CiTestCase):
self.sysdir = self.tmp_dir() + '/'
self.m_sys_path.return_value = self.sysdir
self.addCleanup(sys_mock.stop)
+ self.add_patch('cloudinit.net.util.udevadm_settle', 'm_settle')
def test_generate_fallback_finds_connected_eth_with_mac(self):
"""generate_fallback_config finds any connected device with a mac."""
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
index 993b26cf..9ff929c2 100644
--- a/cloudinit/netinfo.py
+++ b/cloudinit/netinfo.py
@@ -8,9 +8,11 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+from copy import copy, deepcopy
import re
from cloudinit import log as logging
+from cloudinit.net.network_state import net_prefix_to_ipv4_mask
from cloudinit import util
from cloudinit.simpletable import SimpleTable
@@ -18,18 +20,90 @@ from cloudinit.simpletable import SimpleTable
LOG = logging.getLogger()
-def netdev_info(empty=""):
- fields = ("hwaddr", "addr", "bcast", "mask")
- (ifcfg_out, _err) = util.subp(["ifconfig", "-a"], rcs=[0, 1])
+DEFAULT_NETDEV_INFO = {
+ "ipv4": [],
+ "ipv6": [],
+ "hwaddr": "",
+ "up": False
+}
+
+
+def _netdev_info_iproute(ipaddr_out):
+ """
+ Get network device dicts from ip route and ip link info.
+
+ @param ipaddr_out: Output string from 'ip addr show' command.
+
+ @returns: A dict of device info keyed by network device name containing
+ device configuration values.
+ @raise: TypeError if ipaddr_out isn't a string.
+ """
+ devs = {}
+ dev_name = None
+ for num, line in enumerate(ipaddr_out.splitlines()):
+ m = re.match(r'^\d+:\s(?P<dev>[^:]+):\s+<(?P<flags>\S+)>\s+.*', line)
+ if m:
+ dev_name = m.group('dev').lower().split('@')[0]
+ flags = m.group('flags').split(',')
+ devs[dev_name] = {
+ 'ipv4': [], 'ipv6': [], 'hwaddr': '',
+ 'up': bool('UP' in flags and 'LOWER_UP' in flags),
+ }
+ elif 'inet6' in line:
+ m = re.match(
+ r'\s+inet6\s(?P<ip>\S+)\sscope\s(?P<scope6>\S+).*', line)
+ if not m:
+ LOG.warning(
+ 'Could not parse ip addr show: (line:%d) %s', num, line)
+ continue
+ devs[dev_name]['ipv6'].append(m.groupdict())
+ elif 'inet' in line:
+ m = re.match(
+ r'\s+inet\s(?P<cidr4>\S+)(\sbrd\s(?P<bcast>\S+))?\sscope\s'
+ r'(?P<scope>\S+).*', line)
+ if not m:
+ LOG.warning(
+ 'Could not parse ip addr show: (line:%d) %s', num, line)
+ continue
+ match = m.groupdict()
+ cidr4 = match.pop('cidr4')
+ addr, _, prefix = cidr4.partition('/')
+ if not prefix:
+ prefix = '32'
+ devs[dev_name]['ipv4'].append({
+ 'ip': addr,
+ 'bcast': match['bcast'] if match['bcast'] else '',
+ 'mask': net_prefix_to_ipv4_mask(prefix),
+ 'scope': match['scope']})
+ elif 'link' in line:
+ m = re.match(
+ r'\s+link/(?P<link_type>\S+)\s(?P<hwaddr>\S+).*', line)
+ if not m:
+ LOG.warning(
+ 'Could not parse ip addr show: (line:%d) %s', num, line)
+ continue
+ if m.group('link_type') == 'ether':
+ devs[dev_name]['hwaddr'] = m.group('hwaddr')
+ else:
+ devs[dev_name]['hwaddr'] = ''
+ else:
+ continue
+ return devs
+
+
+def _netdev_info_ifconfig(ifconfig_data):
+ # fields that need to be returned in devs for each dev
devs = {}
- for line in str(ifcfg_out).splitlines():
+ for line in ifconfig_data.splitlines():
if len(line) == 0:
continue
if line[0] not in ("\t", " "):
curdev = line.split()[0]
- devs[curdev] = {"up": False}
- for field in fields:
- devs[curdev][field] = ""
+ # current ifconfig pops a ':' on the end of the device
+ if curdev.endswith(':'):
+ curdev = curdev[:-1]
+ if curdev not in devs:
+ devs[curdev] = deepcopy(DEFAULT_NETDEV_INFO)
toks = line.lower().strip().split()
if toks[0] == "up":
devs[curdev]['up'] = True
@@ -39,59 +113,164 @@ def netdev_info(empty=""):
if re.search(r"flags=\d+<up,", toks[1]):
devs[curdev]['up'] = True
- fieldpost = ""
- if toks[0] == "inet6":
- fieldpost = "6"
-
for i in range(len(toks)):
- # older net-tools (ubuntu) show 'inet addr:xx.yy',
- # newer (freebsd and fedora) show 'inet xx.yy'
- # just skip this 'inet' entry. (LP: #1285185)
- try:
- if ((toks[i] in ("inet", "inet6") and
- toks[i + 1].startswith("addr:"))):
- continue
- except IndexError:
- pass
-
- # Couple the different items we're interested in with the correct
- # field since FreeBSD/CentOS/Fedora differ in the output.
- ifconfigfields = {
- "addr:": "addr", "inet": "addr",
- "bcast:": "bcast", "broadcast": "bcast",
- "mask:": "mask", "netmask": "mask",
- "hwaddr": "hwaddr", "ether": "hwaddr",
- "scope": "scope",
- }
- for origfield, field in ifconfigfields.items():
- target = "%s%s" % (field, fieldpost)
- if devs[curdev].get(target, ""):
- continue
- if toks[i] == "%s" % origfield:
- try:
- devs[curdev][target] = toks[i + 1]
- except IndexError:
- pass
- elif toks[i].startswith("%s" % origfield):
- devs[curdev][target] = toks[i][len(field) + 1:]
-
- if empty != "":
- for (_devname, dev) in devs.items():
- for field in dev:
- if dev[field] == "":
- dev[field] = empty
+ if toks[i] == "inet": # Create new ipv4 addr entry
+ devs[curdev]['ipv4'].append(
+ {'ip': toks[i + 1].lstrip("addr:")})
+ elif toks[i].startswith("bcast:"):
+ devs[curdev]['ipv4'][-1]['bcast'] = toks[i].lstrip("bcast:")
+ elif toks[i] == "broadcast":
+ devs[curdev]['ipv4'][-1]['bcast'] = toks[i + 1]
+ elif toks[i].startswith("mask:"):
+ devs[curdev]['ipv4'][-1]['mask'] = toks[i].lstrip("mask:")
+ elif toks[i] == "netmask":
+ devs[curdev]['ipv4'][-1]['mask'] = toks[i + 1]
+ elif toks[i] == "hwaddr" or toks[i] == "ether":
+ devs[curdev]['hwaddr'] = toks[i + 1]
+ elif toks[i] == "inet6":
+ if toks[i + 1] == "addr:":
+ devs[curdev]['ipv6'].append({'ip': toks[i + 2]})
+ else:
+ devs[curdev]['ipv6'].append({'ip': toks[i + 1]})
+ elif toks[i] == "prefixlen": # Add prefix to current ipv6 value
+ addr6 = devs[curdev]['ipv6'][-1]['ip'] + "/" + toks[i + 1]
+ devs[curdev]['ipv6'][-1]['ip'] = addr6
+ elif toks[i].startswith("scope:"):
+ devs[curdev]['ipv6'][-1]['scope6'] = toks[i].lstrip("scope:")
+ elif toks[i] == "scopeid":
+ res = re.match(r'.*<(\S+)>', toks[i + 1])
+ if res:
+ devs[curdev]['ipv6'][-1]['scope6'] = res.group(1)
+ return devs
+
+
+def netdev_info(empty=""):
+ devs = {}
+ if util.which('ip'):
+ # Try iproute first of all
+ (ipaddr_out, _err) = util.subp(["ip", "addr", "show"])
+ devs = _netdev_info_iproute(ipaddr_out)
+ elif util.which('ifconfig'):
+ # Fall back to net-tools if iproute2 is not present
+ (ifcfg_out, _err) = util.subp(["ifconfig", "-a"], rcs=[0, 1])
+ devs = _netdev_info_ifconfig(ifcfg_out)
+ else:
+ LOG.warning(
+ "Could not print networks: missing 'ip' and 'ifconfig' commands")
+ if empty == "":
+ return devs
+
+ recurse_types = (dict, tuple, list)
+
+ def fill(data, new_val="", empty_vals=("", b"")):
+ """Recursively replace 'empty_vals' in data (dict, tuple, list)
+ with new_val"""
+ if isinstance(data, dict):
+ myiter = data.items()
+ elif isinstance(data, (tuple, list)):
+ myiter = enumerate(data)
+ else:
+ raise TypeError("Unexpected input to fill")
+
+ for key, val in myiter:
+ if val in empty_vals:
+ data[key] = new_val
+ elif isinstance(val, recurse_types):
+ fill(val, new_val)
+
+ fill(devs, new_val=empty)
return devs
-def route_info():
- (route_out, _err) = util.subp(["netstat", "-rn"], rcs=[0, 1])
+def _netdev_route_info_iproute(iproute_data):
+ """
+ Get network route dicts from ip route info.
+
+ @param iproute_data: Output string from ip route command.
+
+ @returns: A dict containing ipv4 and ipv6 route entries as lists. Each
+ item in the list is a route dictionary representing destination,
+ gateway, flags, genmask and interface information.
+ """
+
+ routes = {}
+ routes['ipv4'] = []
+ routes['ipv6'] = []
+ entries = iproute_data.splitlines()
+ default_route_entry = {
+ 'destination': '', 'flags': '', 'gateway': '', 'genmask': '',
+ 'iface': '', 'metric': ''}
+ for line in entries:
+ entry = copy(default_route_entry)
+ if not line:
+ continue
+ toks = line.split()
+ flags = ['U']
+ if toks[0] == "default":
+ entry['destination'] = "0.0.0.0"
+ entry['genmask'] = "0.0.0.0"
+ else:
+ if '/' in toks[0]:
+ (addr, cidr) = toks[0].split("/")
+ else:
+ addr = toks[0]
+ cidr = '32'
+ flags.append("H")
+ entry['genmask'] = net_prefix_to_ipv4_mask(cidr)
+ entry['destination'] = addr
+ entry['genmask'] = net_prefix_to_ipv4_mask(cidr)
+ entry['gateway'] = "0.0.0.0"
+ for i in range(len(toks)):
+ if toks[i] == "via":
+ entry['gateway'] = toks[i + 1]
+ flags.insert(1, "G")
+ if toks[i] == "dev":
+ entry["iface"] = toks[i + 1]
+ if toks[i] == "metric":
+ entry['metric'] = toks[i + 1]
+ entry['flags'] = ''.join(flags)
+ routes['ipv4'].append(entry)
+ try:
+ (iproute_data6, _err6) = util.subp(
+ ["ip", "--oneline", "-6", "route", "list", "table", "all"],
+ rcs=[0, 1])
+ except util.ProcessExecutionError:
+ pass
+ else:
+ entries6 = iproute_data6.splitlines()
+ for line in entries6:
+ entry = {}
+ if not line:
+ continue
+ toks = line.split()
+ if toks[0] == "default":
+ entry['destination'] = "::/0"
+ entry['flags'] = "UG"
+ else:
+ entry['destination'] = toks[0]
+ entry['gateway'] = "::"
+ entry['flags'] = "U"
+ for i in range(len(toks)):
+ if toks[i] == "via":
+ entry['gateway'] = toks[i + 1]
+ entry['flags'] = "UG"
+ if toks[i] == "dev":
+ entry["iface"] = toks[i + 1]
+ if toks[i] == "metric":
+ entry['metric'] = toks[i + 1]
+ if toks[i] == "expires":
+ entry['flags'] = entry['flags'] + 'e'
+ routes['ipv6'].append(entry)
+ return routes
+
+def _netdev_route_info_netstat(route_data):
routes = {}
routes['ipv4'] = []
routes['ipv6'] = []
- entries = route_out.splitlines()[1:]
+ entries = route_data.splitlines()
for line in entries:
if not line:
continue
@@ -101,8 +280,8 @@ def route_info():
# default 10.65.0.1 UGS 0 34920 vtnet0
#
# Linux netstat shows 2 more:
- # Destination Gateway Genmask Flags MSS Window irtt Iface
- # 0.0.0.0 10.65.0.1 0.0.0.0 UG 0 0 0 eth0
+ # Destination Gateway Genmask Flags Metric Ref Use Iface
+ # 0.0.0.0 10.65.0.1 0.0.0.0 UG 0 0 0 eth0
if (len(toks) < 6 or toks[0] == "Kernel" or
toks[0] == "Destination" or toks[0] == "Internet" or
toks[0] == "Internet6" or toks[0] == "Routing"):
@@ -125,31 +304,57 @@ def route_info():
routes['ipv4'].append(entry)
try:
- (route_out6, _err6) = util.subp(["netstat", "-A", "inet6", "-n"],
- rcs=[0, 1])
+ (route_data6, _err6) = util.subp(
+ ["netstat", "-A", "inet6", "--route", "--numeric"], rcs=[0, 1])
except util.ProcessExecutionError:
pass
else:
- entries6 = route_out6.splitlines()[1:]
+ entries6 = route_data6.splitlines()
for line in entries6:
if not line:
continue
toks = line.split()
- if (len(toks) < 6 or toks[0] == "Kernel" or
+ if (len(toks) < 7 or toks[0] == "Kernel" or
+ toks[0] == "Destination" or toks[0] == "Internet" or
toks[0] == "Proto" or toks[0] == "Active"):
continue
entry = {
- 'proto': toks[0],
- 'recv-q': toks[1],
- 'send-q': toks[2],
- 'local address': toks[3],
- 'foreign address': toks[4],
- 'state': toks[5],
+ 'destination': toks[0],
+ 'gateway': toks[1],
+ 'flags': toks[2],
+ 'metric': toks[3],
+ 'ref': toks[4],
+ 'use': toks[5],
+ 'iface': toks[6],
}
+ # skip lo interface on ipv6
+ if entry['iface'] == "lo":
+ continue
+ # strip /128 from address if it's included
+ if entry['destination'].endswith('/128'):
+ entry['destination'] = re.sub(
+ r'\/128$', '', entry['destination'])
routes['ipv6'].append(entry)
return routes
+def route_info():
+ routes = {}
+ if util.which('ip'):
+ # Try iproute first of all
+ (iproute_out, _err) = util.subp(["ip", "-o", "route", "list"])
+ routes = _netdev_route_info_iproute(iproute_out)
+ elif util.which('netstat'):
+ # Fall back to net-tools if iproute2 is not present
+ (route_out, _err) = util.subp(
+ ["netstat", "--route", "--numeric", "--extend"], rcs=[0, 1])
+ routes = _netdev_route_info_netstat(route_out)
+ else:
+ LOG.warning(
+ "Could not print routes: missing 'ip' and 'netstat' commands")
+ return routes
+
+
def getgateway():
try:
routes = route_info()
@@ -164,23 +369,36 @@ def getgateway():
def netdev_pformat():
lines = []
+ empty = "."
try:
- netdev = netdev_info(empty=".")
- except Exception:
- lines.append(util.center("Net device info failed", '!', 80))
+ netdev = netdev_info(empty=empty)
+ except Exception as e:
+ lines.append(
+ util.center(
+ "Net device info failed ({error})".format(error=str(e)),
+ '!', 80))
else:
+ if not netdev:
+ return '\n'
fields = ['Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address']
tbl = SimpleTable(fields)
- for (dev, d) in sorted(netdev.items()):
- tbl.add_row([dev, d["up"], d["addr"], d["mask"], ".", d["hwaddr"]])
- if d.get('addr6'):
- tbl.add_row([dev, d["up"],
- d["addr6"], ".", d.get("scope6"), d["hwaddr"]])
+ for (dev, data) in sorted(netdev.items()):
+ for addr in data.get('ipv4'):
+ tbl.add_row(
+ (dev, data["up"], addr["ip"], addr["mask"],
+ addr.get('scope', empty), data["hwaddr"]))
+ for addr in data.get('ipv6'):
+ tbl.add_row(
+ (dev, data["up"], addr["ip"], empty, addr["scope6"],
+ data["hwaddr"]))
+ if len(data.get('ipv6')) + len(data.get('ipv4')) == 0:
+ tbl.add_row((dev, data["up"], empty, empty, empty,
+ data["hwaddr"]))
netdev_s = tbl.get_string()
max_len = len(max(netdev_s.splitlines(), key=len))
header = util.center("Net device info", "+", max_len)
lines.extend([header, netdev_s])
- return "\n".join(lines)
+ return "\n".join(lines) + "\n"
def route_pformat():
@@ -188,7 +406,10 @@ def route_pformat():
try:
routes = route_info()
except Exception as e:
- lines.append(util.center('Route info failed', '!', 80))
+ lines.append(
+ util.center(
+ 'Route info failed ({error})'.format(error=str(e)),
+ '!', 80))
util.logexc(LOG, "Route info failed: %s" % e)
else:
if routes.get('ipv4'):
@@ -205,20 +426,20 @@ def route_pformat():
header = util.center("Route IPv4 info", "+", max_len)
lines.extend([header, route_s])
if routes.get('ipv6'):
- fields_v6 = ['Route', 'Proto', 'Recv-Q', 'Send-Q',
- 'Local Address', 'Foreign Address', 'State']
+ fields_v6 = ['Route', 'Destination', 'Gateway', 'Interface',
+ 'Flags']
tbl_v6 = SimpleTable(fields_v6)
for (n, r) in enumerate(routes.get('ipv6')):
route_id = str(n)
- tbl_v6.add_row([route_id, r['proto'],
- r['recv-q'], r['send-q'],
- r['local address'], r['foreign address'],
- r['state']])
+ if r['iface'] == 'lo':
+ continue
+ tbl_v6.add_row([route_id, r['destination'],
+ r['gateway'], r['iface'], r['flags']])
route_s = tbl_v6.get_string()
max_len = len(max(route_s.splitlines(), key=len))
header = util.center("Route IPv6 info", "+", max_len)
lines.extend([header, route_s])
- return "\n".join(lines)
+ return "\n".join(lines) + "\n"
def debug_info(prefix='ci-info: '):
diff --git a/cloudinit/reporting/events.py b/cloudinit/reporting/events.py
index 4f62d2f9..e5dfab33 100644
--- a/cloudinit/reporting/events.py
+++ b/cloudinit/reporting/events.py
@@ -192,7 +192,7 @@ class ReportEventStack(object):
def _childrens_finish_info(self):
for cand_result in (status.FAIL, status.WARN):
- for name, (value, msg) in self.children.items():
+ for _name, (value, _msg) in self.children.items():
if value == cand_result:
return (value, self.message)
return (self.result, self.message)
diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py
index 22279d09..858e0827 100644
--- a/cloudinit/sources/DataSourceAliYun.py
+++ b/cloudinit/sources/DataSourceAliYun.py
@@ -45,7 +45,7 @@ def _is_aliyun():
def parse_public_keys(public_keys):
keys = []
- for key_id, key_body in public_keys.items():
+ for _key_id, key_body in public_keys.items():
if isinstance(key_body, str):
keys.append(key_body.strip())
elif isinstance(key_body, list):
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index e1d0055b..24fd65ff 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -29,7 +29,6 @@ CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info'
# Shell command lists
CMD_PROBE_FLOPPY = ['modprobe', 'floppy']
-CMD_UDEVADM_SETTLE = ['udevadm', 'settle', '--timeout=5']
META_DATA_NOT_SUPPORTED = {
'block-device-mapping': {},
@@ -185,26 +184,24 @@ class DataSourceAltCloud(sources.DataSource):
cmd = CMD_PROBE_FLOPPY
(cmd_out, _err) = util.subp(cmd)
LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)
- except ProcessExecutionError as _err:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
+ except ProcessExecutionError as e:
+ util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
return False
- except OSError as _err:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
+ except OSError as e:
+ util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
return False
floppy_dev = '/dev/fd0'
# udevadm settle for floppy device
try:
- cmd = CMD_UDEVADM_SETTLE
- cmd.append('--exit-if-exists=' + floppy_dev)
- (cmd_out, _err) = util.subp(cmd)
+ (cmd_out, _err) = util.udevadm_settle(exists=floppy_dev, timeout=5)
LOG.debug('Command: %s\nOutput%s', ' '.join(cmd), cmd_out)
- except ProcessExecutionError as _err:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
+ except ProcessExecutionError as e:
+ util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
return False
- except OSError as _err:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
+ except OSError as e:
+ util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), e)
return False
try:
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 0ee622e2..7007d9ea 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -48,6 +48,7 @@ DEFAULT_FS = 'ext4'
# DMI chassis-asset-tag is set static for all azure instances
AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77'
REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds"
+REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready"
IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata"
@@ -107,31 +108,24 @@ def find_dev_from_busdev(camcontrol_out, busdev):
return None
-def get_dev_storvsc_sysctl():
+def execute_or_debug(cmd, fail_ret=None):
try:
- sysctl_out, err = util.subp(['sysctl', 'dev.storvsc'])
+ return util.subp(cmd)[0]
except util.ProcessExecutionError:
- LOG.debug("Fail to execute sysctl dev.storvsc")
- sysctl_out = ""
- return sysctl_out
+ LOG.debug("Failed to execute: %s", ' '.join(cmd))
+ return fail_ret
+
+
+def get_dev_storvsc_sysctl():
+ return execute_or_debug(["sysctl", "dev.storvsc"], fail_ret="")
def get_camcontrol_dev_bus():
- try:
- camcontrol_b_out, err = util.subp(['camcontrol', 'devlist', '-b'])
- except util.ProcessExecutionError:
- LOG.debug("Fail to execute camcontrol devlist -b")
- return None
- return camcontrol_b_out
+ return execute_or_debug(['camcontrol', 'devlist', '-b'])
def get_camcontrol_dev():
- try:
- camcontrol_out, err = util.subp(['camcontrol', 'devlist'])
- except util.ProcessExecutionError:
- LOG.debug("Fail to execute camcontrol devlist")
- return None
- return camcontrol_out
+ return execute_or_debug(['camcontrol', 'devlist'])
def get_resource_disk_on_freebsd(port_id):
@@ -214,6 +208,7 @@ BUILTIN_CLOUD_CONFIG = {
}
DS_CFG_PATH = ['datasource', DS_NAME]
+DS_CFG_KEY_PRESERVE_NTFS = 'never_destroy_ntfs'
DEF_EPHEMERAL_LABEL = 'Temporary Storage'
# The redacted password fails to meet password complexity requirements
@@ -400,14 +395,9 @@ class DataSourceAzure(sources.DataSource):
if found == ddir:
LOG.debug("using files cached in %s", ddir)
- # azure / hyper-v provides random data here
- # TODO. find the seed on FreeBSD platform
- # now update ds_cfg to reflect contents pass in config
- if not util.is_FreeBSD():
- seed = util.load_file("/sys/firmware/acpi/tables/OEM0",
- quiet=True, decode=False)
- if seed:
- self.metadata['random_seed'] = seed
+ seed = _get_random_seed()
+ if seed:
+ self.metadata['random_seed'] = seed
user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
@@ -443,11 +433,12 @@ class DataSourceAzure(sources.DataSource):
LOG.debug("negotiating already done for %s",
self.get_instance_id())
- def _poll_imds(self, report_ready=True):
+ def _poll_imds(self):
"""Poll IMDS for the new provisioning data until we get a valid
response. Then return the returned JSON object."""
url = IMDS_URL + "?api-version=2017-04-02"
headers = {"Metadata": "true"}
+ report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE))
LOG.debug("Start polling IMDS")
def exc_cb(msg, exception):
@@ -457,13 +448,17 @@ class DataSourceAzure(sources.DataSource):
# call DHCP and setup the ephemeral network to acquire the new IP.
return False
- need_report = report_ready
while True:
try:
with EphemeralDHCPv4() as lease:
- if need_report:
+ if report_ready:
+ path = REPORTED_READY_MARKER_FILE
+ LOG.info(
+ "Creating a marker file to report ready: %s", path)
+ util.write_file(path, "{pid}: {time}\n".format(
+ pid=os.getpid(), time=time()))
self._report_ready(lease=lease)
- need_report = False
+ report_ready = False
return readurl(url, timeout=1, headers=headers,
exception_cb=exc_cb, infinite=True).contents
except UrlError:
@@ -474,7 +469,7 @@ class DataSourceAzure(sources.DataSource):
before we go into our polling loop."""
try:
get_metadata_from_fabric(None, lease['unknown-245'])
- except Exception as exc:
+ except Exception:
LOG.warning(
"Error communicating with Azure fabric; You may experience."
"connectivity issues.", exc_info=True)
@@ -492,13 +487,15 @@ class DataSourceAzure(sources.DataSource):
jump back into the polling loop in order to retrieve the ovf_env."""
if not ret:
return False
- (md, self.userdata_raw, cfg, files) = ret
+ (_md, self.userdata_raw, cfg, _files) = ret
path = REPROVISION_MARKER_FILE
if (cfg.get('PreprovisionedVm') is True or
os.path.isfile(path)):
if not os.path.isfile(path):
- LOG.info("Creating a marker file to poll imds")
- util.write_file(path, "%s: %s\n" % (os.getpid(), time()))
+ LOG.info("Creating a marker file to poll imds: %s",
+ path)
+ util.write_file(path, "{pid}: {time}\n".format(
+ pid=os.getpid(), time=time()))
return True
return False
@@ -528,16 +525,19 @@ class DataSourceAzure(sources.DataSource):
self.ds_cfg['agent_command'])
try:
fabric_data = metadata_func()
- except Exception as exc:
+ except Exception:
LOG.warning(
"Error communicating with Azure fabric; You may experience."
"connectivity issues.", exc_info=True)
return False
+ util.del_file(REPORTED_READY_MARKER_FILE)
util.del_file(REPROVISION_MARKER_FILE)
return fabric_data
def activate(self, cfg, is_new_instance):
- address_ephemeral_resize(is_new_instance=is_new_instance)
+ address_ephemeral_resize(is_new_instance=is_new_instance,
+ preserve_ntfs=self.ds_cfg.get(
+ DS_CFG_KEY_PRESERVE_NTFS, False))
return
@property
@@ -581,17 +581,29 @@ def _has_ntfs_filesystem(devpath):
return os.path.realpath(devpath) in ntfs_devices
-def can_dev_be_reformatted(devpath):
- """Determine if block device devpath is newly formatted ephemeral.
+def can_dev_be_reformatted(devpath, preserve_ntfs):
+ """Determine if the ephemeral drive at devpath should be reformatted.
- A newly formatted disk will:
+ A fresh ephemeral disk is formatted by Azure and will:
a.) have a partition table (dos or gpt)
b.) have 1 partition that is ntfs formatted, or
have 2 partitions with the second partition ntfs formatted.
(larger instances with >2TB ephemeral disk have gpt, and will
have a microsoft reserved partition as part 1. LP: #1686514)
c.) the ntfs partition will have no files other than possibly
- 'dataloss_warning_readme.txt'"""
+ 'dataloss_warning_readme.txt'
+
+ User can indicate that NTFS should never be destroyed by setting
+ DS_CFG_KEY_PRESERVE_NTFS in dscfg.
+ If data is found on NTFS, user is warned to set DS_CFG_KEY_PRESERVE_NTFS
+ to make sure cloud-init does not accidentally wipe their data.
+ If cloud-init cannot mount the disk to check for data, destruction
+ will be allowed, unless the dscfg key is set."""
+ if preserve_ntfs:
+ msg = ('config says to never destroy NTFS (%s.%s), skipping checks' %
+ (".".join(DS_CFG_PATH), DS_CFG_KEY_PRESERVE_NTFS))
+ return False, msg
+
if not os.path.exists(devpath):
return False, 'device %s does not exist' % devpath
@@ -624,18 +636,27 @@ def can_dev_be_reformatted(devpath):
bmsg = ('partition %s (%s) on device %s was ntfs formatted' %
(cand_part, cand_path, devpath))
try:
- file_count = util.mount_cb(cand_path, count_files)
+ file_count = util.mount_cb(cand_path, count_files, mtype="ntfs",
+ update_env_for_mount={'LANG': 'C'})
except util.MountFailedError as e:
+ if "mount: unknown filesystem type 'ntfs'" in str(e):
+ return True, (bmsg + ' but this system cannot mount NTFS,'
+ ' assuming there are no important files.'
+ ' Formatting allowed.')
return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e)
if file_count != 0:
+ LOG.warning("it looks like you're using NTFS on the ephemeral disk, "
+ 'to ensure that filesystem does not get wiped, set '
+ '%s.%s in config', '.'.join(DS_CFG_PATH),
+ DS_CFG_KEY_PRESERVE_NTFS)
return False, bmsg + ' but had %d files on it.' % file_count
return True, bmsg + ' and had no important files. Safe for reformatting.'
def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
- is_new_instance=False):
+ is_new_instance=False, preserve_ntfs=False):
# wait for ephemeral disk to come up
naplen = .2
missing = util.wait_for_files([devpath], maxwait=maxwait, naplen=naplen,
@@ -651,7 +672,7 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
if is_new_instance:
result, msg = (True, "First instance boot.")
else:
- result, msg = can_dev_be_reformatted(devpath)
+ result, msg = can_dev_be_reformatted(devpath, preserve_ntfs)
LOG.debug("reformattable=%s: %s", result, msg)
if not result:
@@ -965,6 +986,18 @@ def _check_freebsd_cdrom(cdrom_dev):
return False
+def _get_random_seed():
+ """Return content random seed file if available, otherwise,
+ return None."""
+ # azure / hyper-v provides random data here
+ # TODO. find the seed on FreeBSD platform
+ # now update ds_cfg to reflect contents pass in config
+ if util.is_FreeBSD():
+ return None
+ return util.load_file("/sys/firmware/acpi/tables/OEM0",
+ quiet=True, decode=False)
+
+
def list_possible_azure_ds_devs():
devlist = []
if util.is_FreeBSD():
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 0df545fc..d4b758f2 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -68,6 +68,10 @@ class DataSourceCloudStack(sources.DataSource):
dsname = 'CloudStack'
+ # Setup read_url parameters per get_url_params.
+ url_max_wait = 120
+ url_timeout = 50
+
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.seed_dir = os.path.join(paths.seed_dir, 'cs')
@@ -80,33 +84,18 @@ class DataSourceCloudStack(sources.DataSource):
self.metadata_address = "http://%s/" % (self.vr_addr,)
self.cfg = {}
- def _get_url_settings(self):
- mcfg = self.ds_cfg
- max_wait = 120
- try:
- max_wait = int(mcfg.get("max_wait", max_wait))
- except Exception:
- util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
+ def wait_for_metadata_service(self):
+ url_params = self.get_url_params()
- if max_wait == 0:
+ if url_params.max_wait_seconds <= 0:
return False
- timeout = 50
- try:
- timeout = int(mcfg.get("timeout", timeout))
- except Exception:
- util.logexc(LOG, "Failed to get timeout, using %s", timeout)
-
- return (max_wait, timeout)
-
- def wait_for_metadata_service(self):
- (max_wait, timeout) = self._get_url_settings()
-
urls = [uhelp.combine_url(self.metadata_address,
'latest/meta-data/instance-id')]
start_time = time.time()
- url = uhelp.wait_for_url(urls=urls, max_wait=max_wait,
- timeout=timeout, status_cb=LOG.warn)
+ url = uhelp.wait_for_url(
+ urls=urls, max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds, status_cb=LOG.warn)
if url:
LOG.debug("Using metadata source: '%s'", url)
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index c7b5fe5f..4cb28977 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -43,7 +43,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
self.version = None
self.ec2_metadata = None
self._network_config = None
- self.network_json = None
+ self.network_json = sources.UNSET
self.network_eni = None
self.known_macs = None
self.files = {}
@@ -69,7 +69,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
util.logexc(LOG, "Failed reading config drive from %s", sdir)
if not found:
- for dev in find_candidate_devs():
+ dslist = self.sys_cfg.get('datasource_list')
+ for dev in find_candidate_devs(dslist=dslist):
try:
# Set mtype if freebsd and turn off sync
if dev.startswith("/dev/cd"):
@@ -148,7 +149,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
@property
def network_config(self):
if self._network_config is None:
- if self.network_json is not None:
+ if self.network_json not in (None, sources.UNSET):
LOG.debug("network config provided via network_json")
self._network_config = openstack.convert_net_json(
self.network_json, known_macs=self.known_macs)
@@ -211,7 +212,7 @@ def write_injected_files(files):
util.logexc(LOG, "Failed writing file: %s", filename)
-def find_candidate_devs(probe_optical=True):
+def find_candidate_devs(probe_optical=True, dslist=None):
"""Return a list of devices that may contain the config drive.
The returned list is sorted by search order where the first item has
@@ -227,6 +228,9 @@ def find_candidate_devs(probe_optical=True):
* either vfat or iso9660 formated
* labeled with 'config-2' or 'CONFIG-2'
"""
+ if dslist is None:
+ dslist = []
+
# query optical drive to get it in blkid cache for 2.6 kernels
if probe_optical:
for device in OPTICAL_DEVICES:
@@ -257,7 +261,8 @@ def find_candidate_devs(probe_optical=True):
devices = [d for d in candidates
if d in by_label or not util.is_partition(d)]
- if devices:
+ LOG.debug("devices=%s dslist=%s", devices, dslist)
+ if devices and "IBMCloud" in dslist:
# IBMCloud uses config-2 label, but limited to a single UUID.
ibm_platform, ibm_path = get_ibm_platform()
if ibm_path in devices:
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 21e9ef84..968ab3f7 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -27,8 +27,6 @@ SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND])
STRICT_ID_PATH = ("datasource", "Ec2", "strict_id")
STRICT_ID_DEFAULT = "warn"
-_unset = "_unset"
-
class Platforms(object):
# TODO Rename and move to cloudinit.cloud.CloudNames
@@ -59,15 +57,16 @@ class DataSourceEc2(sources.DataSource):
# for extended metadata content. IPv6 support comes in 2016-09-02
extended_metadata_versions = ['2016-09-02']
+ # Setup read_url parameters per get_url_params.
+ url_max_wait = 120
+ url_timeout = 50
+
_cloud_platform = None
- _network_config = _unset # Used for caching calculated network config v1
+ _network_config = sources.UNSET # Used to cache calculated network cfg v1
# Whether we want to get network configuration from the metadata service.
- get_network_metadata = False
-
- # Track the discovered fallback nic for use in configuration generation.
- _fallback_interface = None
+ perform_dhcp_setup = False
def __init__(self, sys_cfg, distro, paths):
super(DataSourceEc2, self).__init__(sys_cfg, distro, paths)
@@ -98,7 +97,7 @@ class DataSourceEc2(sources.DataSource):
elif self.cloud_platform == Platforms.NO_EC2_METADATA:
return False
- if self.get_network_metadata: # Setup networking in init-local stage.
+ if self.perform_dhcp_setup: # Setup networking in init-local stage.
if util.is_FreeBSD():
LOG.debug("FreeBSD doesn't support running dhclient with -sf")
return False
@@ -158,27 +157,11 @@ class DataSourceEc2(sources.DataSource):
else:
return self.metadata['instance-id']
- def _get_url_settings(self):
- mcfg = self.ds_cfg
- max_wait = 120
- try:
- max_wait = int(mcfg.get("max_wait", max_wait))
- except Exception:
- util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
-
- timeout = 50
- try:
- timeout = max(0, int(mcfg.get("timeout", timeout)))
- except Exception:
- util.logexc(LOG, "Failed to get timeout, using %s", timeout)
-
- return (max_wait, timeout)
-
def wait_for_metadata_service(self):
mcfg = self.ds_cfg
- (max_wait, timeout) = self._get_url_settings()
- if max_wait <= 0:
+ url_params = self.get_url_params()
+ if url_params.max_wait_seconds <= 0:
return False
# Remove addresses from the list that wont resolve.
@@ -205,7 +188,8 @@ class DataSourceEc2(sources.DataSource):
start_time = time.time()
url = uhelp.wait_for_url(
- urls=urls, max_wait=max_wait, timeout=timeout, status_cb=LOG.warn)
+ urls=urls, max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds, status_cb=LOG.warn)
if url:
self.metadata_address = url2base[url]
@@ -310,11 +294,11 @@ class DataSourceEc2(sources.DataSource):
@property
def network_config(self):
"""Return a network config dict for rendering ENI or netplan files."""
- if self._network_config != _unset:
+ if self._network_config != sources.UNSET:
return self._network_config
if self.metadata is None:
- # this would happen if get_data hadn't been called. leave as _unset
+ # this would happen if get_data hadn't been called. leave as UNSET
LOG.warning(
"Unexpected call to network_config when metadata is None.")
return None
@@ -353,9 +337,7 @@ class DataSourceEc2(sources.DataSource):
self._fallback_interface = _legacy_fbnic
self.fallback_nic = None
else:
- self._fallback_interface = net.find_fallback_nic()
- if self._fallback_interface is None:
- LOG.warning("Did not find a fallback interface on EC2.")
+ return super(DataSourceEc2, self).fallback_interface
return self._fallback_interface
def _crawl_metadata(self):
@@ -390,7 +372,7 @@ class DataSourceEc2Local(DataSourceEc2):
metadata service. If the metadata service provides network configuration
then render the network configuration for that instance based on metadata.
"""
- get_network_metadata = True # Get metadata network config if present
+ perform_dhcp_setup = True # Use dhcp before querying metadata
def get_data(self):
supported_platforms = (Platforms.AWS,)
diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py
index 02b3d56f..01106ec0 100644
--- a/cloudinit/sources/DataSourceIBMCloud.py
+++ b/cloudinit/sources/DataSourceIBMCloud.py
@@ -8,17 +8,11 @@ There are 2 different api exposed launch methods.
* template: This is the legacy method of launching instances.
When booting from an image template, the system boots first into
a "provisioning" mode. There, host <-> guest mechanisms are utilized
- to execute code in the guest and provision it.
+ to execute code in the guest and configure it. The configuration
+ includes configuring the system network and possibly installing
+ packages and other software stack.
- Cloud-init will disable itself when it detects that it is in the
- provisioning mode. It detects this by the presence of
- a file '/root/provisioningConfiguration.cfg'.
-
- When provided with user-data, the "first boot" will contain a
- ConfigDrive-like disk labeled with 'METADATA'. If there is no user-data
- provided, then there is no data-source.
-
- Cloud-init never does any network configuration in this mode.
+ After the provisioning is finished, the system reboots.
* os_code: Essentially "launch by OS Code" (Operating System Code).
This is a more modern approach. There is no specific "provisioning" boot.
@@ -30,11 +24,73 @@ There are 2 different api exposed launch methods.
mean that 1 in 8^16 (~4 billion) Xen ConfigDrive systems will be
incorrectly identified as IBMCloud.
+The combination of these 2 launch methods and with or without user-data
+creates 6 boot scenarios.
+ A. os_code with user-data
+ B. os_code without user-data
+ Cloud-init is fully operational in this mode.
+
+ There is a block device attached with label 'config-2'.
+ As it differs from OpenStack's config-2, we have to differentiate.
+ We do so by requiring the UUID on the filesystem to be "9796-932E".
+
+ This disk will have the following files. Specifically note, there
+ is no versioned path to the meta-data, only 'latest':
+ openstack/latest/meta_data.json
+ openstack/latest/network_data.json
+ openstack/latest/user_data [optional]
+ openstack/latest/vendor_data.json
+
+ vendor_data.json as of 2018-04 looks like this:
+ {"cloud-init":"#!/bin/bash\necho 'root:$6$<snip>' | chpasswd -e"}
+
+ The only difference between A and B in this mode is the presence
+ of user_data on the config disk.
+
+ C. template, provisioning boot with user-data
+ D. template, provisioning boot without user-data.
+ With ds-identify cloud-init is fully disabled in this mode.
+ Without ds-identify, cloud-init None datasource will be used.
+
+ This is currently identified by the presence of
+ /root/provisioningConfiguration.cfg . That file is placed into the
+ system before it is booted.
+
+ The difference between C and D is the presence of the METADATA disk
+ as described in E below. There is no METADATA disk attached unless
+ user-data is provided.
+
+ E. template, post-provisioning boot with user-data.
+ Cloud-init is fully operational in this mode.
+
+ This is identified by a block device with filesystem label "METADATA".
+ The looks similar to a version-1 OpenStack config drive. It will
+ have the following files:
+
+ openstack/latest/user_data
+ openstack/latest/meta_data.json
+ openstack/content/interfaces
+ meta.js
+
+ meta.js contains something similar to user_data. cloud-init ignores it.
+ cloud-init ignores the 'interfaces' style file here.
+ In this mode, cloud-init has networking code disabled. It relies
+ on the provisioning boot to have configured networking.
+
+ F. template, post-provisioning boot without user-data.
+ With ds-identify, cloud-init will be fully disabled.
+ Without ds-identify, cloud-init None datasource will be used.
+
+ There is no information available to identify this scenario.
+
+ The user will be able to ssh in as as root with their public keys that
+ have been installed into /root/ssh/.authorized_keys
+ during the provisioning stage.
+
TODO:
* is uuid (/sys/hypervisor/uuid) stable for life of an instance?
it seems it is not the same as data's uuid in the os_code case
but is in the template case.
-
"""
import base64
import json
@@ -138,8 +194,30 @@ def _is_xen():
return os.path.exists("/proc/xen")
-def _is_ibm_provisioning():
- return os.path.exists("/root/provisioningConfiguration.cfg")
+def _is_ibm_provisioning(
+ prov_cfg="/root/provisioningConfiguration.cfg",
+ inst_log="/root/swinstall.log",
+ boot_ref="/proc/1/environ"):
+ """Return boolean indicating if this boot is ibm provisioning boot."""
+ if os.path.exists(prov_cfg):
+ msg = "config '%s' exists." % prov_cfg
+ result = True
+ if os.path.exists(inst_log):
+ if os.path.exists(boot_ref):
+ result = (os.stat(inst_log).st_mtime >
+ os.stat(boot_ref).st_mtime)
+ msg += (" log '%s' from %s boot." %
+ (inst_log, "current" if result else "previous"))
+ else:
+ msg += (" log '%s' existed, but no reference file '%s'." %
+ (inst_log, boot_ref))
+ result = False
+ else:
+ msg += " log '%s' did not exist." % inst_log
+ else:
+ result, msg = (False, "config '%s' did not exist." % prov_cfg)
+ LOG.debug("ibm_provisioning=%s: %s", result, msg)
+ return result
def get_ibm_platform():
@@ -189,7 +267,7 @@ def get_ibm_platform():
else:
return (Platforms.TEMPLATE_LIVE_METADATA, metadata_path)
elif _is_ibm_provisioning():
- return (Platforms.TEMPLATE_PROVISIONING_NODATA, None)
+ return (Platforms.TEMPLATE_PROVISIONING_NODATA, None)
return not_found
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 6ac88635..bcb38544 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -198,13 +198,13 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
If version is None, then <version>/ will not be used.
"""
if read_file_or_url is None:
- read_file_or_url = util.read_file_or_url
+ read_file_or_url = url_helper.read_file_or_url
if seed_url.endswith("/"):
seed_url = seed_url[:-1]
md = {}
- for path, dictname, binary, optional in DS_FIELDS:
+ for path, _dictname, binary, optional in DS_FIELDS:
if version is None:
url = "%s/%s" % (seed_url, path)
else:
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index 5d3a8ddb..2daea59d 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -78,7 +78,7 @@ class DataSourceNoCloud(sources.DataSource):
LOG.debug("Using seeded data from %s", path)
mydata = _merge_new_seed(mydata, seeded)
break
- except ValueError as e:
+ except ValueError:
pass
# If the datasource config had a 'seedfrom' entry, then that takes
@@ -117,7 +117,7 @@ class DataSourceNoCloud(sources.DataSource):
try:
seeded = util.mount_cb(dev, _pp2d_callback,
pp2d_kwargs)
- except ValueError as e:
+ except ValueError:
if dev in label_list:
LOG.warning("device %s with label=%s not a"
"valid seed.", dev, label)
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index dc914a72..178ccb0f 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -556,7 +556,7 @@ def search_file(dirpath, filename):
if not dirpath or not filename:
return None
- for root, dirs, files in os.walk(dirpath):
+ for root, _dirs, files in os.walk(dirpath):
if filename in files:
return os.path.join(root, filename)
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index d4a41116..16c10785 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -378,7 +378,7 @@ def read_context_disk_dir(source_dir, asuser=None):
if asuser is not None:
try:
pwd.getpwnam(asuser)
- except KeyError as e:
+ except KeyError:
raise BrokenContextDiskDir(
"configured user '{user}' does not exist".format(
user=asuser))
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index e55a7638..365af96a 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -7,6 +7,7 @@
import time
from cloudinit import log as logging
+from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
from cloudinit import sources
from cloudinit import url_helper
from cloudinit import util
@@ -22,51 +23,37 @@ DEFAULT_METADATA = {
"instance-id": DEFAULT_IID,
}
+# OpenStack DMI constants
+DMI_PRODUCT_NOVA = 'OpenStack Nova'
+DMI_PRODUCT_COMPUTE = 'OpenStack Compute'
+VALID_DMI_PRODUCT_NAMES = [DMI_PRODUCT_NOVA, DMI_PRODUCT_COMPUTE]
+DMI_ASSET_TAG_OPENTELEKOM = 'OpenTelekomCloud'
+VALID_DMI_ASSET_TAGS = [DMI_ASSET_TAG_OPENTELEKOM]
+
class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
dsname = "OpenStack"
+ _network_config = sources.UNSET # Used to cache calculated network cfg v1
+
+ # Whether we want to get network configuration from the metadata service.
+ perform_dhcp_setup = False
+
def __init__(self, sys_cfg, distro, paths):
super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths)
self.metadata_address = None
self.ssl_details = util.fetch_ssl_details(self.paths)
self.version = None
self.files = {}
- self.ec2_metadata = None
+ self.ec2_metadata = sources.UNSET
+ self.network_json = sources.UNSET
def __str__(self):
root = sources.DataSource.__str__(self)
mstr = "%s [%s,ver=%s]" % (root, self.dsmode, self.version)
return mstr
- def _get_url_settings(self):
- # TODO(harlowja): this is shared with ec2 datasource, we should just
- # move it to a shared location instead...
- # Note: the defaults here are different though.
-
- # max_wait < 0 indicates do not wait
- max_wait = -1
- timeout = 10
- retries = 5
-
- try:
- max_wait = int(self.ds_cfg.get("max_wait", max_wait))
- except Exception:
- util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
-
- try:
- timeout = max(0, int(self.ds_cfg.get("timeout", timeout)))
- except Exception:
- util.logexc(LOG, "Failed to get timeout, using %s", timeout)
-
- try:
- retries = int(self.ds_cfg.get("retries", retries))
- except Exception:
- util.logexc(LOG, "Failed to get retries. using %s", retries)
-
- return (max_wait, timeout, retries)
-
def wait_for_metadata_service(self):
urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL])
filtered = [x for x in urls if util.is_resolvable_url(x)]
@@ -86,10 +73,11 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
md_urls.append(md_url)
url2base[md_url] = url
- (max_wait, timeout, retries) = self._get_url_settings()
+ url_params = self.get_url_params()
start_time = time.time()
- avail_url = url_helper.wait_for_url(urls=md_urls, max_wait=max_wait,
- timeout=timeout)
+ avail_url = url_helper.wait_for_url(
+ urls=md_urls, max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds)
if avail_url:
LOG.debug("Using metadata source: '%s'", url2base[avail_url])
else:
@@ -99,38 +87,66 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
self.metadata_address = url2base.get(avail_url)
return bool(avail_url)
- def _get_data(self):
- try:
- if not self.wait_for_metadata_service():
- return False
- except IOError:
- return False
+ def check_instance_id(self, sys_cfg):
+ # quickly (local check only) if self.instance_id is still valid
+ return sources.instance_id_matches_system_uuid(self.get_instance_id())
- (max_wait, timeout, retries) = self._get_url_settings()
+ @property
+ def network_config(self):
+ """Return a network config dict for rendering ENI or netplan files."""
+ if self._network_config != sources.UNSET:
+ return self._network_config
+
+ # RELEASE_BLOCKER: SRU to Xenial and Artful SRU should not provide
+ # network_config by default unless configured in /etc/cloud/cloud.cfg*.
+ # Patch Xenial and Artful before release to default to False.
+ if util.is_false(self.ds_cfg.get('apply_network_config', True)):
+ self._network_config = None
+ return self._network_config
+ if self.network_json == sources.UNSET:
+ # this would happen if get_data hadn't been called. leave as UNSET
+ LOG.warning(
+ 'Unexpected call to network_config when network_json is None.')
+ return None
+
+ LOG.debug('network config provided via network_json')
+ self._network_config = openstack.convert_net_json(
+ self.network_json, known_macs=None)
+ return self._network_config
- try:
- results = util.log_time(LOG.debug,
- 'Crawl of openstack metadata service',
- read_metadata_service,
- args=[self.metadata_address],
- kwargs={'ssl_details': self.ssl_details,
- 'retries': retries,
- 'timeout': timeout})
- except openstack.NonReadable:
- return False
- except (openstack.BrokenMetadata, IOError):
- util.logexc(LOG, "Broken metadata address %s",
- self.metadata_address)
+ def _get_data(self):
+ """Crawl metadata, parse and persist that data for this instance.
+
+ @return: True when metadata discovered indicates OpenStack datasource.
+ False when unable to contact metadata service or when metadata
+ format is invalid or disabled.
+ """
+ if not detect_openstack():
return False
+ if self.perform_dhcp_setup: # Setup networking in init-local stage.
+ try:
+ with EphemeralDHCPv4(self.fallback_interface):
+ results = util.log_time(
+ logfunc=LOG.debug, msg='Crawl of metadata service',
+ func=self._crawl_metadata)
+ except (NoDHCPLeaseError, sources.InvalidMetaDataException) as e:
+ util.logexc(LOG, str(e))
+ return False
+ else:
+ try:
+ results = self._crawl_metadata()
+ except sources.InvalidMetaDataException as e:
+ util.logexc(LOG, str(e))
+ return False
self.dsmode = self._determine_dsmode([results.get('dsmode')])
if self.dsmode == sources.DSMODE_DISABLED:
return False
-
md = results.get('metadata', {})
md = util.mergemanydict([md, DEFAULT_METADATA])
self.metadata = md
self.ec2_metadata = results.get('ec2-metadata')
+ self.network_json = results.get('networkdata')
self.userdata_raw = results.get('userdata')
self.version = results['version']
self.files.update(results.get('files', {}))
@@ -145,9 +161,50 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
return True
- def check_instance_id(self, sys_cfg):
- # quickly (local check only) if self.instance_id is still valid
- return sources.instance_id_matches_system_uuid(self.get_instance_id())
+ def _crawl_metadata(self):
+ """Crawl metadata service when available.
+
+ @returns: Dictionary with all metadata discovered for this datasource.
+ @raise: InvalidMetaDataException on unreadable or broken
+ metadata.
+ """
+ try:
+ if not self.wait_for_metadata_service():
+ raise sources.InvalidMetaDataException(
+ 'No active metadata service found')
+ except IOError as e:
+ raise sources.InvalidMetaDataException(
+ 'IOError contacting metadata service: {error}'.format(
+ error=str(e)))
+
+ url_params = self.get_url_params()
+
+ try:
+ result = util.log_time(
+ LOG.debug, 'Crawl of openstack metadata service',
+ read_metadata_service, args=[self.metadata_address],
+ kwargs={'ssl_details': self.ssl_details,
+ 'retries': url_params.num_retries,
+ 'timeout': url_params.timeout_seconds})
+ except openstack.NonReadable as e:
+ raise sources.InvalidMetaDataException(str(e))
+ except (openstack.BrokenMetadata, IOError):
+ msg = 'Broken metadata address {addr}'.format(
+ addr=self.metadata_address)
+ raise sources.InvalidMetaDataException(msg)
+ return result
+
+
+class DataSourceOpenStackLocal(DataSourceOpenStack):
+ """Run in init-local using a dhcp discovery prior to metadata crawl.
+
+ In init-local, no network is available. This subclass sets up minimal
+ networking with dhclient on a viable nic so that it can talk to the
+ metadata service. If the metadata service provides network configuration
+ then render the network configuration for that instance based on metadata.
+ """
+
+ perform_dhcp_setup = True # Get metadata network config if present
def read_metadata_service(base_url, ssl_details=None,
@@ -157,8 +214,23 @@ def read_metadata_service(base_url, ssl_details=None,
return reader.read_v2()
+def detect_openstack():
+ """Return True when a potential OpenStack platform is detected."""
+ if not util.is_x86():
+ return True # Non-Intel cpus don't properly report dmi product names
+ product_name = util.read_dmi_data('system-product-name')
+ if product_name in VALID_DMI_PRODUCT_NAMES:
+ return True
+ elif util.read_dmi_data('chassis-asset-tag') in VALID_DMI_ASSET_TAGS:
+ return True
+ elif util.get_proc_env(1).get('product_name') == DMI_PRODUCT_NOVA:
+ return True
+ return False
+
+
# Used to match classes to dependencies
datasources = [
+ (DataSourceOpenStackLocal, (sources.DEP_FILESYSTEM,)),
(DataSourceOpenStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 86bfa5d8..f92e8b5c 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -1,4 +1,5 @@
# Copyright (C) 2013 Canonical Ltd.
+# Copyright (c) 2018, Joyent, Inc.
#
# Author: Ben Howard <ben.howard@canonical.com>
#
@@ -10,17 +11,19 @@
# SmartOS hosts use a serial console (/dev/ttyS1) on KVM Linux Guests
# The meta-data is transmitted via key/value pairs made by
# requests on the console. For example, to get the hostname, you
-# would send "GET hostname" on /dev/ttyS1.
+# would send "GET sdc:hostname" on /dev/ttyS1.
# For Linux Guests running in LX-Brand Zones on SmartOS hosts
# a socket (/native/.zonecontrol/metadata.sock) is used instead
# of a serial console.
#
# Certain behavior is defined by the DataDictionary
-# http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html
+# https://eng.joyent.com/mdata/datadict.html
# Comments with "@datadictionary" are snippets of the definition
import base64
import binascii
+import errno
+import fcntl
import json
import os
import random
@@ -108,7 +111,7 @@ BUILTIN_CLOUD_CONFIG = {
'overwrite': False}
},
'fs_setup': [{'label': 'ephemeral0',
- 'filesystem': 'ext3',
+ 'filesystem': 'ext4',
'device': 'ephemeral0'}],
}
@@ -162,9 +165,8 @@ class DataSourceSmartOS(sources.DataSource):
dsname = "Joyent"
- _unset = "_unset"
- smartos_type = _unset
- md_client = _unset
+ smartos_type = sources.UNSET
+ md_client = sources.UNSET
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -186,12 +188,12 @@ class DataSourceSmartOS(sources.DataSource):
return "%s [client=%s]" % (root, self.md_client)
def _init(self):
- if self.smartos_type == self._unset:
+ if self.smartos_type == sources.UNSET:
self.smartos_type = get_smartos_environ()
if self.smartos_type is None:
self.md_client = None
- if self.md_client == self._unset:
+ if self.md_client == sources.UNSET:
self.md_client = jmc_client_factory(
smartos_type=self.smartos_type,
metadata_sockfile=self.ds_cfg['metadata_sockfile'],
@@ -229,6 +231,9 @@ class DataSourceSmartOS(sources.DataSource):
self.md_client)
return False
+ # Open once for many requests, rather than once for each request
+ self.md_client.open_transport()
+
for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items():
smartos_noun, strip = attribute
md[ci_noun] = self.md_client.get(smartos_noun, strip=strip)
@@ -236,6 +241,8 @@ class DataSourceSmartOS(sources.DataSource):
for ci_noun, smartos_noun in SMARTOS_ATTRIB_JSON.items():
md[ci_noun] = self.md_client.get_json(smartos_noun)
+ self.md_client.close_transport()
+
# @datadictionary: This key may contain a program that is written
# to a file in the filesystem of the guest on each boot and then
# executed. It may be of any format that would be considered
@@ -266,8 +273,14 @@ class DataSourceSmartOS(sources.DataSource):
write_boot_content(u_data, u_data_f)
# Handle the cloud-init regular meta
+
+ # The hostname may or may not be qualified with the local domain name.
+ # This follows section 3.14 of RFC 2132.
if not md['local-hostname']:
- md['local-hostname'] = md['instance-id']
+ if md['hostname']:
+ md['local-hostname'] = md['hostname']
+ else:
+ md['local-hostname'] = md['instance-id']
ud = None
if md['user-data']:
@@ -285,6 +298,7 @@ class DataSourceSmartOS(sources.DataSource):
self.userdata_raw = ud
self.vendordata_raw = md['vendor-data']
self.network_data = md['network-data']
+ self.routes_data = md['routes']
self._set_provisioned()
return True
@@ -308,7 +322,8 @@ class DataSourceSmartOS(sources.DataSource):
convert_smartos_network_data(
network_data=self.network_data,
dns_servers=self.metadata['dns_servers'],
- dns_domain=self.metadata['dns_domain']))
+ dns_domain=self.metadata['dns_domain'],
+ routes=self.routes_data))
return self._network_config
@@ -316,6 +331,10 @@ class JoyentMetadataFetchException(Exception):
pass
+class JoyentMetadataTimeoutException(JoyentMetadataFetchException):
+ pass
+
+
class JoyentMetadataClient(object):
"""
A client implementing v2 of the Joyent Metadata Protocol Specification.
@@ -360,6 +379,47 @@ class JoyentMetadataClient(object):
LOG.debug('Value "%s" found.', value)
return value
+ def _readline(self):
+ """
+ Reads a line a byte at a time until \n is encountered. Returns an
+ ascii string with the trailing newline removed.
+
+ If a timeout (per-byte) is set and it expires, a
+ JoyentMetadataFetchException will be thrown.
+ """
+ response = []
+
+ def as_ascii():
+ return b''.join(response).decode('ascii')
+
+ msg = "Partial response: '%s'"
+ while True:
+ try:
+ byte = self.fp.read(1)
+ if len(byte) == 0:
+ raise JoyentMetadataTimeoutException(msg % as_ascii())
+ if byte == b'\n':
+ return as_ascii()
+ response.append(byte)
+ except OSError as exc:
+ if exc.errno == errno.EAGAIN:
+ raise JoyentMetadataTimeoutException(msg % as_ascii())
+ raise
+
+ def _write(self, msg):
+ self.fp.write(msg.encode('ascii'))
+ self.fp.flush()
+
+ def _negotiate(self):
+ LOG.debug('Negotiating protocol V2')
+ self._write('NEGOTIATE V2\n')
+ response = self._readline()
+ LOG.debug('read "%s"', response)
+ if response != 'V2_OK':
+ raise JoyentMetadataFetchException(
+ 'Invalid response "%s" to "NEGOTIATE V2"' % response)
+ LOG.debug('Negotiation complete')
+
def request(self, rtype, param=None):
request_id = '{0:08x}'.format(random.randint(0, 0xffffffff))
message_body = ' '.join((request_id, rtype,))
@@ -374,18 +434,11 @@ class JoyentMetadataClient(object):
self.open_transport()
need_close = True
- self.fp.write(msg.encode('ascii'))
- self.fp.flush()
-
- response = bytearray()
- response.extend(self.fp.read(1))
- while response[-1:] != b'\n':
- response.extend(self.fp.read(1))
-
+ self._write(msg)
+ response = self._readline()
if need_close:
self.close_transport()
- response = response.rstrip().decode('ascii')
LOG.debug('Read "%s" from metadata transport.', response)
if 'SUCCESS' not in response:
@@ -410,9 +463,9 @@ class JoyentMetadataClient(object):
def list(self):
result = self.request(rtype='KEYS')
- if result:
- result = result.split('\n')
- return result
+ if not result:
+ return []
+ return result.split('\n')
def put(self, key, val):
param = b' '.join([base64.b64encode(i.encode())
@@ -450,6 +503,7 @@ class JoyentMetadataSocketClient(JoyentMetadataClient):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.socketpath)
self.fp = sock.makefile('rwb')
+ self._negotiate()
def exists(self):
return os.path.exists(self.socketpath)
@@ -459,8 +513,9 @@ class JoyentMetadataSocketClient(JoyentMetadataClient):
class JoyentMetadataSerialClient(JoyentMetadataClient):
- def __init__(self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM):
- super(JoyentMetadataSerialClient, self).__init__(smartos_type)
+ def __init__(self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM,
+ fp=None):
+ super(JoyentMetadataSerialClient, self).__init__(smartos_type, fp)
self.device = device
self.timeout = timeout
@@ -468,10 +523,51 @@ class JoyentMetadataSerialClient(JoyentMetadataClient):
return os.path.exists(self.device)
def open_transport(self):
- ser = serial.Serial(self.device, timeout=self.timeout)
- if not ser.isOpen():
- raise SystemError("Unable to open %s" % self.device)
- self.fp = ser
+ if self.fp is None:
+ ser = serial.Serial(self.device, timeout=self.timeout)
+ if not ser.isOpen():
+ raise SystemError("Unable to open %s" % self.device)
+ self.fp = ser
+ fcntl.lockf(ser, fcntl.LOCK_EX)
+ self._flush()
+ self._negotiate()
+
+ def _flush(self):
+ LOG.debug('Flushing input')
+ # Read any pending data
+ timeout = self.fp.timeout
+ self.fp.timeout = 0.1
+ while True:
+ try:
+ self._readline()
+ except JoyentMetadataTimeoutException:
+ break
+ LOG.debug('Input empty')
+
+ # Send a newline and expect "invalid command". Keep trying until
+ # successful. Retry rather frequently so that the "Is the host
+ # metadata service running" appears on the console soon after someone
+ # attaches in an effort to debug.
+ if timeout > 5:
+ self.fp.timeout = 5
+ else:
+ self.fp.timeout = timeout
+ while True:
+ LOG.debug('Writing newline, expecting "invalid command"')
+ self._write('\n')
+ try:
+ response = self._readline()
+ if response == 'invalid command':
+ break
+ if response == 'FAILURE':
+ LOG.debug('Got "FAILURE". Retrying.')
+ continue
+ LOG.warning('Unexpected response "%s" during flush', response)
+ except JoyentMetadataTimeoutException:
+ LOG.warning('Timeout while initializing metadata client. ' +
+ 'Is the host metadata service running?')
+ LOG.debug('Got "invalid command". Flush complete.')
+ self.fp.timeout = timeout
def __repr__(self):
return "%s(device=%s, timeout=%s)" % (
@@ -650,7 +746,7 @@ def get_smartos_environ(uname_version=None, product_name=None):
# report 'BrandZ virtual linux' as the kernel version
if uname_version is None:
uname_version = uname[3]
- if uname_version.lower() == 'brandz virtual linux':
+ if uname_version == 'BrandZ virtual linux':
return SMARTOS_ENV_LX_BRAND
if product_name is None:
@@ -658,7 +754,7 @@ def get_smartos_environ(uname_version=None, product_name=None):
else:
system_type = product_name
- if system_type and 'smartdc' in system_type.lower():
+ if system_type and system_type.startswith('SmartDC'):
return SMARTOS_ENV_KVM
return None
@@ -666,7 +762,8 @@ def get_smartos_environ(uname_version=None, product_name=None):
# Convert SMARTOS 'sdc:nics' data to network_config yaml
def convert_smartos_network_data(network_data=None,
- dns_servers=None, dns_domain=None):
+ dns_servers=None, dns_domain=None,
+ routes=None):
"""Return a dictionary of network_config by parsing provided
SMARTOS sdc:nics configuration data
@@ -684,6 +781,10 @@ def convert_smartos_network_data(network_data=None,
keys are related to ip configuration. For each ip in the 'ips' list
we create a subnet entry under 'subnets' pairing the ip to a one in
the 'gateways' list.
+
+ Each route in sdc:routes is mapped to a route on each interface.
+ The sdc:routes properties 'dst' and 'gateway' map to 'network' and
+ 'gateway'. The 'linklocal' sdc:routes property is ignored.
"""
valid_keys = {
@@ -706,6 +807,10 @@ def convert_smartos_network_data(network_data=None,
'scope',
'type',
],
+ 'route': [
+ 'network',
+ 'gateway',
+ ],
}
if dns_servers:
@@ -720,6 +825,9 @@ def convert_smartos_network_data(network_data=None,
else:
dns_domain = []
+ if not routes:
+ routes = []
+
def is_valid_ipv4(addr):
return '.' in addr
@@ -746,6 +854,7 @@ def convert_smartos_network_data(network_data=None,
if ip == "dhcp":
subnet = {'type': 'dhcp4'}
else:
+ routeents = []
subnet = dict((k, v) for k, v in nic.items()
if k in valid_keys['subnet'])
subnet.update({
@@ -767,6 +876,25 @@ def convert_smartos_network_data(network_data=None,
pgws[proto]['gw'] = gateways[0]
subnet.update({'gateway': pgws[proto]['gw']})
+ for route in routes:
+ rcfg = dict((k, v) for k, v in route.items()
+ if k in valid_keys['route'])
+ # Linux uses the value of 'gateway' to determine
+ # automatically if the route is a forward/next-hop
+ # (non-local IP for gateway) or an interface/resolver
+ # (local IP for gateway). So we can ignore the
+ # 'interface' attribute of sdc:routes, because SDC
+ # guarantees that the gateway is a local IP for
+ # "interface=true".
+ #
+ # Eventually we should be smart and compare "gateway"
+ # to see if it's in the prefix. We can then smartly
+ # add or not-add this route. But for now,
+ # when in doubt, use brute force! Routes for everyone!
+ rcfg.update({'network': route['dst']})
+ routeents.append(rcfg)
+ subnet.update({'routes': routeents})
+
subnets.append(subnet)
cfg.update({'subnets': subnets})
config.append(cfg)
@@ -810,12 +938,14 @@ if __name__ == "__main__":
keyname = SMARTOS_ATTRIB_JSON[key]
data[key] = client.get_json(keyname)
elif key == "network_config":
- for depkey in ('network-data', 'dns_servers', 'dns_domain'):
+ for depkey in ('network-data', 'dns_servers', 'dns_domain',
+ 'routes'):
load_key(client, depkey, data)
data[key] = convert_smartos_network_data(
network_data=data['network-data'],
dns_servers=data['dns_servers'],
- dns_domain=data['dns_domain'])
+ dns_domain=data['dns_domain'],
+ routes=data['routes'])
else:
if key in SMARTOS_ATTRIB_MAP:
keyname, strip = SMARTOS_ATTRIB_MAP[key]
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index df0b374a..90d74575 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -9,6 +9,7 @@
# This file is part of cloud-init. See LICENSE file for license information.
import abc
+from collections import namedtuple
import copy
import json
import os
@@ -17,6 +18,7 @@ import six
from cloudinit.atomic_helper import write_json
from cloudinit import importer
from cloudinit import log as logging
+from cloudinit import net
from cloudinit import type_utils
from cloudinit import user_data as ud
from cloudinit import util
@@ -41,6 +43,8 @@ INSTANCE_JSON_FILE = 'instance-data.json'
# Key which can be provide a cloud's official product name to cloud-init
METADATA_CLOUD_NAME_KEY = 'cloud-name'
+UNSET = "_unset"
+
LOG = logging.getLogger(__name__)
@@ -48,6 +52,11 @@ class DataSourceNotFoundException(Exception):
pass
+class InvalidMetaDataException(Exception):
+ """Raised when metadata is broken, unavailable or disabled."""
+ pass
+
+
def process_base64_metadata(metadata, key_path=''):
"""Strip ci-b64 prefix and return metadata with base64-encoded-keys set."""
md_copy = copy.deepcopy(metadata)
@@ -68,6 +77,10 @@ def process_base64_metadata(metadata, key_path=''):
return md_copy
+URLParams = namedtuple(
+ 'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries'])
+
+
@six.add_metaclass(abc.ABCMeta)
class DataSource(object):
@@ -81,6 +94,14 @@ class DataSource(object):
# Cached cloud_name as determined by _get_cloud_name
_cloud_name = None
+ # Track the discovered fallback nic for use in configuration generation.
+ _fallback_interface = None
+
+ # read_url_params
+ url_max_wait = -1 # max_wait < 0 means do not wait
+ url_timeout = 10 # timeout for each metadata url read attempt
+ url_retries = 5 # number of times to retry url upon 404
+
def __init__(self, sys_cfg, distro, paths, ud_proc=None):
self.sys_cfg = sys_cfg
self.distro = distro
@@ -128,6 +149,14 @@ class DataSource(object):
'meta-data': self.metadata,
'user-data': self.get_userdata_raw(),
'vendor-data': self.get_vendordata_raw()}}
+ if hasattr(self, 'network_json'):
+ network_json = getattr(self, 'network_json')
+ if network_json != UNSET:
+ instance_data['ds']['network_json'] = network_json
+ if hasattr(self, 'ec2_metadata'):
+ ec2_metadata = getattr(self, 'ec2_metadata')
+ if ec2_metadata != UNSET:
+ instance_data['ds']['ec2_metadata'] = ec2_metadata
instance_data.update(
self._get_standardized_metadata())
try:
@@ -149,6 +178,42 @@ class DataSource(object):
'Subclasses of DataSource must implement _get_data which'
' sets self.metadata, vendordata_raw and userdata_raw.')
+ def get_url_params(self):
+ """Return the Datasource's prefered url_read parameters.
+
+ Subclasses may override url_max_wait, url_timeout, url_retries.
+
+ @return: A URLParams object with max_wait_seconds, timeout_seconds,
+ num_retries.
+ """
+ max_wait = self.url_max_wait
+ try:
+ max_wait = int(self.ds_cfg.get("max_wait", self.url_max_wait))
+ except ValueError:
+ util.logexc(
+ LOG, "Config max_wait '%s' is not an int, using default '%s'",
+ self.ds_cfg.get("max_wait"), max_wait)
+
+ timeout = self.url_timeout
+ try:
+ timeout = max(
+ 0, int(self.ds_cfg.get("timeout", self.url_timeout)))
+ except ValueError:
+ timeout = self.url_timeout
+ util.logexc(
+ LOG, "Config timeout '%s' is not an int, using default '%s'",
+ self.ds_cfg.get('timeout'), timeout)
+
+ retries = self.url_retries
+ try:
+ retries = int(self.ds_cfg.get("retries", self.url_retries))
+ except Exception:
+ util.logexc(
+ LOG, "Config retries '%s' is not an int, using default '%s'",
+ self.ds_cfg.get('retries'), retries)
+
+ return URLParams(max_wait, timeout, retries)
+
def get_userdata(self, apply_filter=False):
if self.userdata is None:
self.userdata = self.ud_proc.process(self.get_userdata_raw())
@@ -162,6 +227,17 @@ class DataSource(object):
return self.vendordata
@property
+ def fallback_interface(self):
+ """Determine the network interface used during local network config."""
+ if self._fallback_interface is None:
+ self._fallback_interface = net.find_fallback_nic()
+ if self._fallback_interface is None:
+ LOG.warning(
+ "Did not find a fallback interface on %s.",
+ self.cloud_name)
+ return self._fallback_interface
+
+ @property
def cloud_name(self):
"""Return lowercase cloud name as determined by the datasource.
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index 90c12df1..e5696b1f 100644
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -14,6 +14,7 @@ from cloudinit import temp_utils
from contextlib import contextmanager
from xml.etree import ElementTree
+from cloudinit import url_helper
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -55,14 +56,14 @@ class AzureEndpointHttpClient(object):
if secure:
headers = self.headers.copy()
headers.update(self.extra_secure_headers)
- return util.read_file_or_url(url, headers=headers)
+ return url_helper.read_file_or_url(url, headers=headers)
def post(self, url, data=None, extra_headers=None):
headers = self.headers
if extra_headers is not None:
headers = self.headers.copy()
headers.update(extra_headers)
- return util.read_file_or_url(url, data=data, headers=headers)
+ return url_helper.read_file_or_url(url, data=data, headers=headers)
class GoalState(object):
diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py
index 693f8d5c..0e7cccac 100644
--- a/cloudinit/sources/helpers/digitalocean.py
+++ b/cloudinit/sources/helpers/digitalocean.py
@@ -41,10 +41,9 @@ def assign_ipv4_link_local(nic=None):
"address")
try:
- (result, _err) = util.subp(ip_addr_cmd)
+ util.subp(ip_addr_cmd)
LOG.debug("assigned ip4LL address '%s' to '%s'", addr, nic)
-
- (result, _err) = util.subp(ip_link_cmd)
+ util.subp(ip_link_cmd)
LOG.debug("brought device '%s' up", nic)
except Exception:
util.logexc(LOG, "ip4LL address assignment of '%s' to '%s' failed."
@@ -75,7 +74,7 @@ def del_ipv4_link_local(nic=None):
ip_addr_cmd = ['ip', 'addr', 'flush', 'dev', nic]
try:
- (result, _err) = util.subp(ip_addr_cmd)
+ util.subp(ip_addr_cmd)
LOG.debug("removed ip4LL addresses from %s", nic)
except Exception as e:
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 26f3168d..a4cf0667 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -638,7 +638,7 @@ def convert_net_json(network_json=None, known_macs=None):
known_macs = net.get_interfaces_by_mac()
# go through and fill out the link_id_info with names
- for link_id, info in link_id_info.items():
+ for _link_id, info in link_id_info.items():
if info.get('name'):
continue
if info.get('mac') in known_macs:
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index 2d8900e2..3ef8c624 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -73,7 +73,7 @@ class NicConfigurator(object):
The mac address(es) are in the lower case
"""
cmd = ['ip', 'addr', 'show']
- (output, err) = util.subp(cmd)
+ output, _err = util.subp(cmd)
sections = re.split(r'\n\d+: ', '\n' + output)[1:]
macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))'
diff --git a/cloudinit/sources/helpers/vmware/imc/config_passwd.py b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
index 75cfbaaf..8c91fa41 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_passwd.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
@@ -56,10 +56,10 @@ class PasswordConfigurator(object):
LOG.info('Expiring password.')
for user in uidUserList:
try:
- out, err = util.subp(['passwd', '--expire', user])
+ util.subp(['passwd', '--expire', user])
except util.ProcessExecutionError as e:
if os.path.exists('/usr/bin/chage'):
- out, e = util.subp(['chage', '-d', '0', user])
+ util.subp(['chage', '-d', '0', user])
else:
LOG.warning('Failed to expire password for %s with error: '
'%s', user, e)
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
index 44075255..a590f323 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
@@ -91,7 +91,7 @@ def enable_nics(nics):
for attempt in range(0, enableNicsWaitRetries):
logger.debug("Trying to connect interfaces, attempt %d", attempt)
- (out, err) = set_customization_status(
+ (out, _err) = set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
GuestCustEventEnum.GUESTCUST_EVENT_ENABLE_NICS,
nics)
@@ -104,7 +104,7 @@ def enable_nics(nics):
return
for count in range(0, enableNicsWaitCount):
- (out, err) = set_customization_status(
+ (out, _err) = set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
GuestCustEventEnum.GUESTCUST_EVENT_QUERY_NICS,
nics)
diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py
index e7fda22a..d5bc98a4 100644
--- a/cloudinit/sources/tests/test_init.py
+++ b/cloudinit/sources/tests/test_init.py
@@ -17,6 +17,7 @@ from cloudinit import util
class DataSourceTestSubclassNet(DataSource):
dsname = 'MyTestSubclass'
+ url_max_wait = 55
def __init__(self, sys_cfg, distro, paths, custom_userdata=None):
super(DataSourceTestSubclassNet, self).__init__(
@@ -70,8 +71,7 @@ class TestDataSource(CiTestCase):
"""Init uses DataSource.dsname for sourcing ds_cfg."""
sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}}
distro = 'distrotest' # generally should be a Distro object
- paths = Paths({})
- datasource = DataSourceTestSubclassNet(sys_cfg, distro, paths)
+ datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths)
self.assertEqual({'key2': False}, datasource.ds_cfg)
def test_str_is_classname(self):
@@ -81,6 +81,91 @@ class TestDataSource(CiTestCase):
'DataSourceTestSubclassNet',
str(DataSourceTestSubclassNet('', '', self.paths)))
+ def test_datasource_get_url_params_defaults(self):
+ """get_url_params default url config settings for the datasource."""
+ params = self.datasource.get_url_params()
+ self.assertEqual(params.max_wait_seconds, self.datasource.url_max_wait)
+ self.assertEqual(params.timeout_seconds, self.datasource.url_timeout)
+ self.assertEqual(params.num_retries, self.datasource.url_retries)
+
+ def test_datasource_get_url_params_subclassed(self):
+ """Subclasses can override get_url_params defaults."""
+ sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}}
+ distro = 'distrotest' # generally should be a Distro object
+ datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths)
+ expected = (datasource.url_max_wait, datasource.url_timeout,
+ datasource.url_retries)
+ url_params = datasource.get_url_params()
+ self.assertNotEqual(self.datasource.get_url_params(), url_params)
+ self.assertEqual(expected, url_params)
+
+ def test_datasource_get_url_params_ds_config_override(self):
+ """Datasource configuration options can override url param defaults."""
+ sys_cfg = {
+ 'datasource': {
+ 'MyTestSubclass': {
+ 'max_wait': '1', 'timeout': '2', 'retries': '3'}}}
+ datasource = DataSourceTestSubclassNet(
+ sys_cfg, self.distro, self.paths)
+ expected = (1, 2, 3)
+ url_params = datasource.get_url_params()
+ self.assertNotEqual(
+ (datasource.url_max_wait, datasource.url_timeout,
+ datasource.url_retries),
+ url_params)
+ self.assertEqual(expected, url_params)
+
+ def test_datasource_get_url_params_is_zero_or_greater(self):
+ """get_url_params ignores timeouts with a value below 0."""
+ # Set an override that is below 0 which gets ignored.
+ sys_cfg = {'datasource': {'_undef': {'timeout': '-1'}}}
+ datasource = DataSource(sys_cfg, self.distro, self.paths)
+ (_max_wait, timeout, _retries) = datasource.get_url_params()
+ self.assertEqual(0, timeout)
+
+ def test_datasource_get_url_uses_defaults_on_errors(self):
+ """On invalid system config values for url_params defaults are used."""
+ # All invalid values should be logged
+ sys_cfg = {'datasource': {
+ '_undef': {
+ 'max_wait': 'nope', 'timeout': 'bug', 'retries': 'nonint'}}}
+ datasource = DataSource(sys_cfg, self.distro, self.paths)
+ url_params = datasource.get_url_params()
+ expected = (datasource.url_max_wait, datasource.url_timeout,
+ datasource.url_retries)
+ self.assertEqual(expected, url_params)
+ logs = self.logs.getvalue()
+ expected_logs = [
+ "Config max_wait 'nope' is not an int, using default '-1'",
+ "Config timeout 'bug' is not an int, using default '10'",
+ "Config retries 'nonint' is not an int, using default '5'",
+ ]
+ for log in expected_logs:
+ self.assertIn(log, logs)
+
+ @mock.patch('cloudinit.sources.net.find_fallback_nic')
+ def test_fallback_interface_is_discovered(self, m_get_fallback_nic):
+ """The fallback_interface is discovered via find_fallback_nic."""
+ m_get_fallback_nic.return_value = 'nic9'
+ self.assertEqual('nic9', self.datasource.fallback_interface)
+
+ @mock.patch('cloudinit.sources.net.find_fallback_nic')
+ def test_fallback_interface_logs_undiscovered(self, m_get_fallback_nic):
+ """Log a warning when fallback_interface can not discover the nic."""
+ self.datasource._cloud_name = 'MySupahCloud'
+ m_get_fallback_nic.return_value = None # Couldn't discover nic
+ self.assertIsNone(self.datasource.fallback_interface)
+ self.assertEqual(
+ 'WARNING: Did not find a fallback interface on MySupahCloud.\n',
+ self.logs.getvalue())
+
+ @mock.patch('cloudinit.sources.net.find_fallback_nic')
+ def test_wb_fallback_interface_is_cached(self, m_get_fallback_nic):
+ """The fallback_interface is cached and won't be rediscovered."""
+ self.datasource._fallback_interface = 'nic10'
+ self.assertEqual('nic10', self.datasource.fallback_interface)
+ m_get_fallback_nic.assert_not_called()
+
def test__get_data_unimplemented(self):
"""Raise an error when _get_data is not implemented."""
with self.assertRaises(NotImplementedError) as context_manager:
@@ -278,7 +363,7 @@ class TestDataSource(CiTestCase):
base_args = get_args(DataSource.get_hostname) # pylint: disable=W1505
# Import all DataSource subclasses so we can inspect them.
modules = util.find_modules(os.path.dirname(os.path.dirname(__file__)))
- for loc, name in modules.items():
+ for _loc, name in modules.items():
mod_locs, _ = importer.find_module(name, ['cloudinit.sources'], [])
if mod_locs:
importer.import_module(mod_locs[0])
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index 882517f5..73c31772 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -279,24 +279,28 @@ class SshdConfigLine(object):
def parse_ssh_config(fname):
+ if not os.path.isfile(fname):
+ return []
+ return parse_ssh_config_lines(util.load_file(fname).splitlines())
+
+
+def parse_ssh_config_lines(lines):
# See: man sshd_config
# The file contains keyword-argument pairs, one per line.
# Lines starting with '#' and empty lines are interpreted as comments.
# Note: key-words are case-insensitive and arguments are case-sensitive
- lines = []
- if not os.path.isfile(fname):
- return lines
- for line in util.load_file(fname).splitlines():
+ ret = []
+ for line in lines:
line = line.strip()
if not line or line.startswith("#"):
- lines.append(SshdConfigLine(line))
+ ret.append(SshdConfigLine(line))
continue
try:
key, val = line.split(None, 1)
except ValueError:
key, val = line.split('=', 1)
- lines.append(SshdConfigLine(line, key, val))
- return lines
+ ret.append(SshdConfigLine(line, key, val))
+ return ret
def parse_ssh_config_map(fname):
@@ -310,4 +314,56 @@ def parse_ssh_config_map(fname):
ret[line.key] = line.value
return ret
+
+def update_ssh_config(updates, fname=DEF_SSHD_CFG):
+ """Read fname, and update if changes are necessary.
+
+ @param updates: dictionary of desired values {Option: value}
+ @return: boolean indicating if an update was done."""
+ lines = parse_ssh_config(fname)
+ changed = update_ssh_config_lines(lines=lines, updates=updates)
+ if changed:
+ util.write_file(
+ fname, "\n".join([str(l) for l in lines]) + "\n", copy_mode=True)
+ return len(changed) != 0
+
+
+def update_ssh_config_lines(lines, updates):
+ """Update the ssh config lines per updates.
+
+ @param lines: array of SshdConfigLine. This array is updated in place.
+ @param updates: dictionary of desired values {Option: value}
+ @return: A list of keys in updates that were changed."""
+ found = set()
+ changed = []
+
+ # Keywords are case-insensitive and arguments are case-sensitive
+ casemap = dict([(k.lower(), k) for k in updates.keys()])
+
+ for (i, line) in enumerate(lines, start=1):
+ if not line.key:
+ continue
+ if line.key in casemap:
+ key = casemap[line.key]
+ value = updates[key]
+ found.add(key)
+ if line.value == value:
+ LOG.debug("line %d: option %s already set to %s",
+ i, key, value)
+ else:
+ changed.append(key)
+ LOG.debug("line %d: option %s updated %s -> %s", i,
+ key, line.value, value)
+ line.value = value
+
+ if len(found) != len(updates):
+ for key, value in updates.items():
+ if key in found:
+ continue
+ changed.append(key)
+ lines.append(SshdConfigLine('', key, value))
+ LOG.debug("line %d: option %s added with %s",
+ len(lines), key, value)
+ return changed
+
# vi: ts=4 expandtab
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index bc4ebc85..286607bf 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -362,16 +362,22 @@ class Init(object):
self._store_vendordata()
def setup_datasource(self):
- if self.datasource is None:
- raise RuntimeError("Datasource is None, cannot setup.")
- self.datasource.setup(is_new_instance=self.is_new_instance())
+ with events.ReportEventStack("setup-datasource",
+ "setting up datasource",
+ parent=self.reporter):
+ if self.datasource is None:
+ raise RuntimeError("Datasource is None, cannot setup.")
+ self.datasource.setup(is_new_instance=self.is_new_instance())
def activate_datasource(self):
- if self.datasource is None:
- raise RuntimeError("Datasource is None, cannot activate.")
- self.datasource.activate(cfg=self.cfg,
- is_new_instance=self.is_new_instance())
- self._write_to_cache()
+ with events.ReportEventStack("activate-datasource",
+ "activating datasource",
+ parent=self.reporter):
+ if self.datasource is None:
+ raise RuntimeError("Datasource is None, cannot activate.")
+ self.datasource.activate(cfg=self.cfg,
+ is_new_instance=self.is_new_instance())
+ self._write_to_cache()
def _store_userdata(self):
raw_ud = self.datasource.get_userdata_raw()
@@ -691,7 +697,9 @@ class Modules(object):
module_list = []
if name not in self.cfg:
return module_list
- cfg_mods = self.cfg[name]
+ cfg_mods = self.cfg.get(name)
+ if not cfg_mods:
+ return module_list
# Create 'module_list', an array of hashes
# Where hash['mod'] = module name
# hash['freq'] = frequency
diff --git a/cloudinit/templater.py b/cloudinit/templater.py
index b3ea64e4..7e7acb86 100644
--- a/cloudinit/templater.py
+++ b/cloudinit/templater.py
@@ -121,7 +121,11 @@ def detect_template(text):
def render_from_file(fn, params):
if not params:
params = {}
- template_type, renderer, content = detect_template(util.load_file(fn))
+ # jinja in python2 uses unicode internally. All py2 str will be decoded.
+ # If it is given a str that has non-ascii then it will raise a
+ # UnicodeDecodeError. So we explicitly convert to unicode type here.
+ template_type, renderer, content = detect_template(
+ util.load_file(fn, decode=False).decode('utf-8'))
LOG.debug("Rendering content of '%s' using renderer %s", fn, template_type)
return renderer(content, params)
@@ -132,14 +136,18 @@ def render_to_file(fn, outfn, params, mode=0o644):
def render_string_to_file(content, outfn, params, mode=0o644):
+ """Render string (or py2 unicode) to file.
+ Warning: py2 str with non-ascii chars will cause UnicodeDecodeError."""
contents = render_string(content, params)
util.write_file(outfn, contents, mode=mode)
def render_string(content, params):
+ """Render string (or py2 unicode).
+ Warning: py2 str with non-ascii chars will cause UnicodeDecodeError."""
if not params:
params = {}
- template_type, renderer, content = detect_template(content)
+ _template_type, renderer, content = detect_template(content)
return renderer(content, params)
# vi: ts=4 expandtab
diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py
index 999b1d7c..5bfe7fa4 100644
--- a/cloudinit/tests/helpers.py
+++ b/cloudinit/tests/helpers.py
@@ -3,11 +3,13 @@
from __future__ import print_function
import functools
+import httpretty
import logging
import os
import shutil
import sys
import tempfile
+import time
import unittest
import mock
@@ -24,6 +26,8 @@ try:
except ImportError:
from ConfigParser import ConfigParser
+from cloudinit.config.schema import (
+ SchemaValidationError, validate_cloudconfig_schema)
from cloudinit import helpers as ch
from cloudinit import util
@@ -108,12 +112,12 @@ class TestCase(unittest2.TestCase):
super(TestCase, self).setUp()
self.reset_global_state()
- def add_patch(self, target, attr, **kwargs):
+ def add_patch(self, target, attr, *args, **kwargs):
"""Patches specified target object and sets it as attr on test
instance also schedules cleanup"""
if 'autospec' not in kwargs:
kwargs['autospec'] = True
- m = mock.patch(target, **kwargs)
+ m = mock.patch(target, *args, **kwargs)
p = m.start()
self.addCleanup(m.stop)
setattr(self, attr, p)
@@ -190,35 +194,11 @@ class ResourceUsingTestCase(CiTestCase):
super(ResourceUsingTestCase, self).setUp()
self.resource_path = None
- def resourceLocation(self, subname=None):
- if self.resource_path is None:
- paths = [
- os.path.join('tests', 'data'),
- os.path.join('data'),
- os.path.join(os.pardir, 'tests', 'data'),
- os.path.join(os.pardir, 'data'),
- ]
- for p in paths:
- if os.path.isdir(p):
- self.resource_path = p
- break
- self.assertTrue((self.resource_path and
- os.path.isdir(self.resource_path)),
- msg="Unable to locate test resource data path!")
- if not subname:
- return self.resource_path
- return os.path.join(self.resource_path, subname)
-
- def readResource(self, name):
- where = self.resourceLocation(name)
- with open(where, 'r') as fh:
- return fh.read()
-
def getCloudPaths(self, ds=None):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
cp = ch.Paths({'cloud_dir': tmpdir,
- 'templates_dir': self.resourceLocation()},
+ 'templates_dir': resourceLocation()},
ds=ds)
return cp
@@ -234,7 +214,7 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
ResourceUsingTestCase.tearDown(self)
def replicateTestRoot(self, example_root, target_root):
- real_root = self.resourceLocation()
+ real_root = resourceLocation()
real_root = os.path.join(real_root, 'roots', example_root)
for (dir_path, _dirnames, filenames) in os.walk(real_root):
real_path = dir_path
@@ -285,7 +265,8 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
os.path: [('isfile', 1), ('exists', 1),
('islink', 1), ('isdir', 1), ('lexists', 1)],
os: [('listdir', 1), ('mkdir', 1),
- ('lstat', 1), ('symlink', 2)]
+ ('lstat', 1), ('symlink', 2),
+ ('stat', 1)]
}
if hasattr(os, 'scandir'):
@@ -323,19 +304,43 @@ class FilesystemMockingTestCase(ResourceUsingTestCase):
class HttprettyTestCase(CiTestCase):
# necessary as http_proxy gets in the way of httpretty
# https://github.com/gabrielfalcao/HTTPretty/issues/122
+ # Also make sure that allow_net_connect is set to False.
+ # And make sure reset and enable/disable are done.
def setUp(self):
self.restore_proxy = os.environ.get('http_proxy')
if self.restore_proxy is not None:
del os.environ['http_proxy']
super(HttprettyTestCase, self).setUp()
+ httpretty.HTTPretty.allow_net_connect = False
+ httpretty.reset()
+ httpretty.enable()
def tearDown(self):
+ httpretty.disable()
+ httpretty.reset()
if self.restore_proxy:
os.environ['http_proxy'] = self.restore_proxy
super(HttprettyTestCase, self).tearDown()
+class SchemaTestCaseMixin(unittest2.TestCase):
+
+ def assertSchemaValid(self, cfg, msg="Valid Schema failed validation."):
+ """Assert the config is valid per self.schema.
+
+ If there is only one top level key in the schema properties, then
+ the cfg will be put under that key."""
+ props = list(self.schema.get('properties'))
+ # put cfg under top level key if there is only one in the schema
+ if len(props) == 1:
+ cfg = {props[0]: cfg}
+ try:
+ validate_cloudconfig_schema(cfg, self.schema, strict=True)
+ except SchemaValidationError:
+ self.fail(msg)
+
+
def populate_dir(path, files):
if not os.path.exists(path):
os.makedirs(path)
@@ -354,11 +359,20 @@ def populate_dir(path, files):
return ret
+def populate_dir_with_ts(path, data):
+ """data is {'file': ('contents', mtime)}. mtime relative to now."""
+ populate_dir(path, dict((k, v[0]) for k, v in data.items()))
+ btime = time.time()
+ for fpath, (_contents, mtime) in data.items():
+ ts = btime + mtime if mtime else btime
+ os.utime(os.path.sep.join((path, fpath)), (ts, ts))
+
+
def dir2dict(startdir, prefix=None):
flist = {}
if prefix is None:
prefix = startdir
- for root, dirs, files in os.walk(startdir):
+ for root, _dirs, files in os.walk(startdir):
for fname in files:
fpath = os.path.join(root, fname)
key = fpath[len(prefix):]
@@ -399,6 +413,18 @@ def wrap_and_call(prefix, mocks, func, *args, **kwargs):
p.stop()
+def resourceLocation(subname=None):
+ path = os.path.join('tests', 'data')
+ if not subname:
+ return path
+ return os.path.join(path, subname)
+
+
+def readResource(name, mode='r'):
+ with open(resourceLocation(name), mode) as fh:
+ return fh.read()
+
+
try:
skipIf = unittest.skipIf
except AttributeError:
diff --git a/cloudinit/tests/test_netinfo.py b/cloudinit/tests/test_netinfo.py
index 7dea2e41..d76e768e 100644
--- a/cloudinit/tests/test_netinfo.py
+++ b/cloudinit/tests/test_netinfo.py
@@ -2,105 +2,166 @@
"""Tests netinfo module functions and classes."""
-from cloudinit.netinfo import netdev_pformat, route_pformat
-from cloudinit.tests.helpers import CiTestCase, mock
+from copy import copy
+
+from cloudinit.netinfo import netdev_info, netdev_pformat, route_pformat
+from cloudinit.tests.helpers import CiTestCase, mock, readResource
# Example ifconfig and route output
-SAMPLE_IFCONFIG_OUT = """\
-enp0s25 Link encap:Ethernet HWaddr 50:7b:9d:2c:af:91
- inet addr:192.168.2.18 Bcast:192.168.2.255 Mask:255.255.255.0
- inet6 addr: fe80::8107:2b92:867e:f8a6/64 Scope:Link
- UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
- RX packets:8106427 errors:55 dropped:0 overruns:0 frame:37
- TX packets:9339739 errors:0 dropped:0 overruns:0 carrier:0
- collisions:0 txqueuelen:1000
- RX bytes:4953721719 (4.9 GB) TX bytes:7731890194 (7.7 GB)
- Interrupt:20 Memory:e1200000-e1220000
-
-lo Link encap:Local Loopback
- inet addr:127.0.0.1 Mask:255.0.0.0
- inet6 addr: ::1/128 Scope:Host
- UP LOOPBACK RUNNING MTU:65536 Metric:1
- RX packets:579230851 errors:0 dropped:0 overruns:0 frame:0
- TX packets:579230851 errors:0 dropped:0 overruns:0 carrier:0
- collisions:0 txqueuelen:1
-"""
-
-SAMPLE_ROUTE_OUT = '\n'.join([
- '0.0.0.0 192.168.2.1 0.0.0.0 UG 0 0 0'
- ' enp0s25',
- '0.0.0.0 192.168.2.1 0.0.0.0 UG 0 0 0'
- ' wlp3s0',
- '192.168.2.0 0.0.0.0 255.255.255.0 U 0 0 0'
- ' enp0s25'])
-
-
-NETDEV_FORMATTED_OUT = '\n'.join([
- '+++++++++++++++++++++++++++++++++++++++Net device info+++++++++++++++++++'
- '++++++++++++++++++++',
- '+---------+------+------------------------------+---------------+-------+'
- '-------------------+',
- '| Device | Up | Address | Mask | Scope |'
- ' Hw-Address |',
- '+---------+------+------------------------------+---------------+-------+'
- '-------------------+',
- '| enp0s25 | True | 192.168.2.18 | 255.255.255.0 | . |'
- ' 50:7b:9d:2c:af:91 |',
- '| enp0s25 | True | fe80::8107:2b92:867e:f8a6/64 | . | link |'
- ' 50:7b:9d:2c:af:91 |',
- '| lo | True | 127.0.0.1 | 255.0.0.0 | . |'
- ' . |',
- '| lo | True | ::1/128 | . | host |'
- ' . |',
- '+---------+------+------------------------------+---------------+-------+'
- '-------------------+'])
-
-ROUTE_FORMATTED_OUT = '\n'.join([
- '+++++++++++++++++++++++++++++Route IPv4 info++++++++++++++++++++++++++'
- '+++',
- '+-------+-------------+-------------+---------------+-----------+-----'
- '--+',
- '| Route | Destination | Gateway | Genmask | Interface | Flags'
- ' |',
- '+-------+-------------+-------------+---------------+-----------+'
- '-------+',
- '| 0 | 0.0.0.0 | 192.168.2.1 | 0.0.0.0 | wlp3s0 |'
- ' UG |',
- '| 1 | 192.168.2.0 | 0.0.0.0 | 255.255.255.0 | enp0s25 |'
- ' U |',
- '+-------+-------------+-------------+---------------+-----------+'
- '-------+',
- '++++++++++++++++++++++++++++++++++++++++Route IPv6 info++++++++++'
- '++++++++++++++++++++++++++++++',
- '+-------+-------------+-------------+---------------+---------------+'
- '-----------------+-------+',
- '| Route | Proto | Recv-Q | Send-Q | Local Address |'
- ' Foreign Address | State |',
- '+-------+-------------+-------------+---------------+---------------+'
- '-----------------+-------+',
- '| 0 | 0.0.0.0 | 192.168.2.1 | 0.0.0.0 | UG |'
- ' 0 | 0 |',
- '| 1 | 192.168.2.0 | 0.0.0.0 | 255.255.255.0 | U |'
- ' 0 | 0 |',
- '+-------+-------------+-------------+---------------+---------------+'
- '-----------------+-------+'])
+SAMPLE_OLD_IFCONFIG_OUT = readResource("netinfo/old-ifconfig-output")
+SAMPLE_NEW_IFCONFIG_OUT = readResource("netinfo/new-ifconfig-output")
+SAMPLE_IPADDRSHOW_OUT = readResource("netinfo/sample-ipaddrshow-output")
+SAMPLE_ROUTE_OUT_V4 = readResource("netinfo/sample-route-output-v4")
+SAMPLE_ROUTE_OUT_V6 = readResource("netinfo/sample-route-output-v6")
+SAMPLE_IPROUTE_OUT_V4 = readResource("netinfo/sample-iproute-output-v4")
+SAMPLE_IPROUTE_OUT_V6 = readResource("netinfo/sample-iproute-output-v6")
+NETDEV_FORMATTED_OUT = readResource("netinfo/netdev-formatted-output")
+ROUTE_FORMATTED_OUT = readResource("netinfo/route-formatted-output")
class TestNetInfo(CiTestCase):
maxDiff = None
+ with_logs = True
+ @mock.patch('cloudinit.netinfo.util.which')
@mock.patch('cloudinit.netinfo.util.subp')
- def test_netdev_pformat(self, m_subp):
- """netdev_pformat properly rendering network device information."""
- m_subp.return_value = (SAMPLE_IFCONFIG_OUT, '')
+ def test_netdev_old_nettools_pformat(self, m_subp, m_which):
+ """netdev_pformat properly rendering old nettools info."""
+ m_subp.return_value = (SAMPLE_OLD_IFCONFIG_OUT, '')
+ m_which.side_effect = lambda x: x if x == 'ifconfig' else None
content = netdev_pformat()
self.assertEqual(NETDEV_FORMATTED_OUT, content)
+ @mock.patch('cloudinit.netinfo.util.which')
+ @mock.patch('cloudinit.netinfo.util.subp')
+ def test_netdev_new_nettools_pformat(self, m_subp, m_which):
+ """netdev_pformat properly rendering netdev new nettools info."""
+ m_subp.return_value = (SAMPLE_NEW_IFCONFIG_OUT, '')
+ m_which.side_effect = lambda x: x if x == 'ifconfig' else None
+ content = netdev_pformat()
+ self.assertEqual(NETDEV_FORMATTED_OUT, content)
+
+ @mock.patch('cloudinit.netinfo.util.which')
+ @mock.patch('cloudinit.netinfo.util.subp')
+ def test_netdev_iproute_pformat(self, m_subp, m_which):
+ """netdev_pformat properly rendering ip route info."""
+ m_subp.return_value = (SAMPLE_IPADDRSHOW_OUT, '')
+ m_which.side_effect = lambda x: x if x == 'ip' else None
+ content = netdev_pformat()
+ new_output = copy(NETDEV_FORMATTED_OUT)
+ # ip route show describes global scopes on ipv4 addresses
+ # whereas ifconfig does not. Add proper global/host scope to output.
+ new_output = new_output.replace('| . | 50:7b', '| global | 50:7b')
+ new_output = new_output.replace(
+ '255.0.0.0 | . |', '255.0.0.0 | host |')
+ self.assertEqual(new_output, content)
+
+ @mock.patch('cloudinit.netinfo.util.which')
+ @mock.patch('cloudinit.netinfo.util.subp')
+ def test_netdev_warn_on_missing_commands(self, m_subp, m_which):
+ """netdev_pformat warns when missing both ip and 'netstat'."""
+ m_which.return_value = None # Niether ip nor netstat found
+ content = netdev_pformat()
+ self.assertEqual('\n', content)
+ self.assertEqual(
+ "WARNING: Could not print networks: missing 'ip' and 'ifconfig'"
+ " commands\n",
+ self.logs.getvalue())
+ m_subp.assert_not_called()
+
+ @mock.patch('cloudinit.netinfo.util.which')
+ @mock.patch('cloudinit.netinfo.util.subp')
+ def test_netdev_info_nettools_down(self, m_subp, m_which):
+ """test netdev_info using nettools and down interfaces."""
+ m_subp.return_value = (
+ readResource("netinfo/new-ifconfig-output-down"), "")
+ m_which.side_effect = lambda x: x if x == 'ifconfig' else None
+ self.assertEqual(
+ {'eth0': {'ipv4': [], 'ipv6': [],
+ 'hwaddr': '00:16:3e:de:51:a6', 'up': False},
+ 'lo': {'ipv4': [{'ip': '127.0.0.1', 'mask': '255.0.0.0'}],
+ 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}],
+ 'hwaddr': '.', 'up': True}},
+ netdev_info("."))
+
+ @mock.patch('cloudinit.netinfo.util.which')
+ @mock.patch('cloudinit.netinfo.util.subp')
+ def test_netdev_info_iproute_down(self, m_subp, m_which):
+ """Test netdev_info with ip and down interfaces."""
+ m_subp.return_value = (
+ readResource("netinfo/sample-ipaddrshow-output-down"), "")
+ m_which.side_effect = lambda x: x if x == 'ip' else None
+ self.assertEqual(
+ {'lo': {'ipv4': [{'ip': '127.0.0.1', 'bcast': '.',
+ 'mask': '255.0.0.0', 'scope': 'host'}],
+ 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}],
+ 'hwaddr': '.', 'up': True},
+ 'eth0': {'ipv4': [], 'ipv6': [],
+ 'hwaddr': '00:16:3e:de:51:a6', 'up': False}},
+ netdev_info("."))
+
+ @mock.patch('cloudinit.netinfo.netdev_info')
+ def test_netdev_pformat_with_down(self, m_netdev_info):
+ """test netdev_pformat when netdev_info returns 'down' interfaces."""
+ m_netdev_info.return_value = (
+ {'lo': {'ipv4': [{'ip': '127.0.0.1', 'mask': '255.0.0.0',
+ 'scope': 'host'}],
+ 'ipv6': [{'ip': '::1/128', 'scope6': 'host'}],
+ 'hwaddr': '.', 'up': True},
+ 'eth0': {'ipv4': [], 'ipv6': [],
+ 'hwaddr': '00:16:3e:de:51:a6', 'up': False}})
+ self.assertEqual(
+ readResource("netinfo/netdev-formatted-output-down"),
+ netdev_pformat())
+
+ @mock.patch('cloudinit.netinfo.util.which')
+ @mock.patch('cloudinit.netinfo.util.subp')
+ def test_route_nettools_pformat(self, m_subp, m_which):
+ """route_pformat properly rendering nettools route info."""
+
+ def subp_netstat_route_selector(*args, **kwargs):
+ if args[0] == ['netstat', '--route', '--numeric', '--extend']:
+ return (SAMPLE_ROUTE_OUT_V4, '')
+ if args[0] == ['netstat', '-A', 'inet6', '--route', '--numeric']:
+ return (SAMPLE_ROUTE_OUT_V6, '')
+ raise Exception('Unexpected subp call %s' % args[0])
+
+ m_subp.side_effect = subp_netstat_route_selector
+ m_which.side_effect = lambda x: x if x == 'netstat' else None
+ content = route_pformat()
+ self.assertEqual(ROUTE_FORMATTED_OUT, content)
+
+ @mock.patch('cloudinit.netinfo.util.which')
@mock.patch('cloudinit.netinfo.util.subp')
- def test_route_pformat(self, m_subp):
- """netdev_pformat properly rendering network device information."""
- m_subp.return_value = (SAMPLE_ROUTE_OUT, '')
+ def test_route_iproute_pformat(self, m_subp, m_which):
+ """route_pformat properly rendering ip route info."""
+
+ def subp_iproute_selector(*args, **kwargs):
+ if ['ip', '-o', 'route', 'list'] == args[0]:
+ return (SAMPLE_IPROUTE_OUT_V4, '')
+ v6cmd = ['ip', '--oneline', '-6', 'route', 'list', 'table', 'all']
+ if v6cmd == args[0]:
+ return (SAMPLE_IPROUTE_OUT_V6, '')
+ raise Exception('Unexpected subp call %s' % args[0])
+
+ m_subp.side_effect = subp_iproute_selector
+ m_which.side_effect = lambda x: x if x == 'ip' else None
content = route_pformat()
self.assertEqual(ROUTE_FORMATTED_OUT, content)
+
+ @mock.patch('cloudinit.netinfo.util.which')
+ @mock.patch('cloudinit.netinfo.util.subp')
+ def test_route_warn_on_missing_commands(self, m_subp, m_which):
+ """route_pformat warns when missing both ip and 'netstat'."""
+ m_which.return_value = None # Niether ip nor netstat found
+ content = route_pformat()
+ self.assertEqual('\n', content)
+ self.assertEqual(
+ "WARNING: Could not print routes: missing 'ip' and 'netstat'"
+ " commands\n",
+ self.logs.getvalue())
+ m_subp.assert_not_called()
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py
index b778a3a7..113249d9 100644
--- a/cloudinit/tests/test_url_helper.py
+++ b/cloudinit/tests/test_url_helper.py
@@ -1,7 +1,10 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit.url_helper import oauth_headers
+from cloudinit.url_helper import oauth_headers, read_file_or_url
from cloudinit.tests.helpers import CiTestCase, mock, skipIf
+from cloudinit import util
+
+import httpretty
try:
@@ -38,3 +41,26 @@ class TestOAuthHeaders(CiTestCase):
'url', 'consumer_key', 'token_key', 'token_secret',
'consumer_secret')
self.assertEqual('url', return_value)
+
+
+class TestReadFileOrUrl(CiTestCase):
+ def test_read_file_or_url_str_from_file(self):
+ """Test that str(result.contents) on file is text version of contents.
+ It should not be "b'data'", but just "'data'" """
+ tmpf = self.tmp_path("myfile1")
+ data = b'This is my file content\n'
+ util.write_file(tmpf, data, omode="wb")
+ result = read_file_or_url("file://%s" % tmpf)
+ self.assertEqual(result.contents, data)
+ self.assertEqual(str(result), data.decode('utf-8'))
+
+ @httpretty.activate
+ def test_read_file_or_url_str_from_url(self):
+ """Test that str(result.contents) on url is text version of contents.
+ It should not be "b'data'", but just "'data'" """
+ url = 'http://hostname/path'
+ data = b'This is my url content\n'
+ httpretty.register_uri(httpretty.GET, url, data)
+ result = read_file_or_url(url)
+ self.assertEqual(result.contents, data)
+ self.assertEqual(str(result), data.decode('utf-8'))
diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py
index 3f37dbb6..17853fc7 100644
--- a/cloudinit/tests/test_util.py
+++ b/cloudinit/tests/test_util.py
@@ -3,11 +3,12 @@
"""Tests for cloudinit.util"""
import logging
-from textwrap import dedent
+import platform
import cloudinit.util as util
from cloudinit.tests.helpers import CiTestCase, mock
+from textwrap import dedent
LOG = logging.getLogger(__name__)
@@ -16,6 +17,29 @@ MOUNT_INFO = [
'153 68 254:0 / /home rw,relatime shared:101 - xfs /dev/sda2 rw,attr2'
]
+OS_RELEASE_SLES = dedent("""\
+ NAME="SLES"\n
+ VERSION="12-SP3"\n
+ VERSION_ID="12.3"\n
+ PRETTY_NAME="SUSE Linux Enterprise Server 12 SP3"\n
+ ID="sles"\nANSI_COLOR="0;32"\n
+ CPE_NAME="cpe:/o:suse:sles:12:sp3"\n
+""")
+
+OS_RELEASE_UBUNTU = dedent("""\
+ NAME="Ubuntu"\n
+ VERSION="16.04.3 LTS (Xenial Xerus)"\n
+ ID=ubuntu\n
+ ID_LIKE=debian\n
+ PRETTY_NAME="Ubuntu 16.04.3 LTS"\n
+ VERSION_ID="16.04"\n
+ HOME_URL="http://www.ubuntu.com/"\n
+ SUPPORT_URL="http://help.ubuntu.com/"\n
+ BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"\n
+ VERSION_CODENAME=xenial\n
+ UBUNTU_CODENAME=xenial\n
+""")
+
class FakeCloud(object):
@@ -135,7 +159,7 @@ class TestGetHostnameFqdn(CiTestCase):
def test_get_hostname_fqdn_from_passes_metadata_only_to_cloud(self):
"""Calls to cloud.get_hostname pass the metadata_only parameter."""
mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com')
- hostname, fqdn = util.get_hostname_fqdn(
+ _hn, _fqdn = util.get_hostname_fqdn(
cfg={}, cloud=mycloud, metadata_only=True)
self.assertEqual(
[{'fqdn': True, 'metadata_only': True},
@@ -212,4 +236,105 @@ class TestBlkid(CiTestCase):
capture=True, decode="replace")
+@mock.patch('cloudinit.util.subp')
+class TestUdevadmSettle(CiTestCase):
+ def test_with_no_params(self, m_subp):
+ """called with no parameters."""
+ util.udevadm_settle()
+ m_subp.called_once_with(mock.call(['udevadm', 'settle']))
+
+ def test_with_exists_and_not_exists(self, m_subp):
+ """with exists=file where file does not exist should invoke subp."""
+ mydev = self.tmp_path("mydev")
+ util.udevadm_settle(exists=mydev)
+ m_subp.called_once_with(
+ ['udevadm', 'settle', '--exit-if-exists=%s' % mydev])
+
+ def test_with_exists_and_file_exists(self, m_subp):
+ """with exists=file where file does exist should not invoke subp."""
+ mydev = self.tmp_path("mydev")
+ util.write_file(mydev, "foo\n")
+ util.udevadm_settle(exists=mydev)
+ self.assertIsNone(m_subp.call_args)
+
+ def test_with_timeout_int(self, m_subp):
+ """timeout can be an integer."""
+ timeout = 9
+ util.udevadm_settle(timeout=timeout)
+ m_subp.called_once_with(
+ ['udevadm', 'settle', '--timeout=%s' % timeout])
+
+ def test_with_timeout_string(self, m_subp):
+ """timeout can be a string."""
+ timeout = "555"
+ util.udevadm_settle(timeout=timeout)
+ m_subp.assert_called_once_with(
+ ['udevadm', 'settle', '--timeout=%s' % timeout])
+
+ def test_with_exists_and_timeout(self, m_subp):
+ """test call with both exists and timeout."""
+ mydev = self.tmp_path("mydev")
+ timeout = "3"
+ util.udevadm_settle(exists=mydev)
+ m_subp.called_once_with(
+ ['udevadm', 'settle', '--exit-if-exists=%s' % mydev,
+ '--timeout=%s' % timeout])
+
+ def test_subp_exception_raises_to_caller(self, m_subp):
+ m_subp.side_effect = util.ProcessExecutionError("BOOM")
+ self.assertRaises(util.ProcessExecutionError, util.udevadm_settle)
+
+
+@mock.patch('os.path.exists')
+class TestGetLinuxDistro(CiTestCase):
+
+ @classmethod
+ def os_release_exists(self, path):
+ """Side effect function"""
+ if path == '/etc/os-release':
+ return 1
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_distro_quoted_name(self, m_os_release, m_path_exists):
+ """Verify we get the correct name if the os-release file has
+ the distro name in quotes"""
+ m_os_release.return_value = OS_RELEASE_SLES
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('sles', '12.3', platform.machine()), dist)
+
+ @mock.patch('cloudinit.util.load_file')
+ def test_get_linux_distro_bare_name(self, m_os_release, m_path_exists):
+ """Verify we get the correct name if the os-release file does not
+ have the distro name in quotes"""
+ m_os_release.return_value = OS_RELEASE_UBUNTU
+ m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists
+ dist = util.get_linux_distro()
+ self.assertEqual(('ubuntu', '16.04', platform.machine()), dist)
+
+ @mock.patch('platform.dist')
+ def test_get_linux_distro_no_data(self, m_platform_dist, m_path_exists):
+ """Verify we get no information if os-release does not exist"""
+ m_platform_dist.return_value = ('', '', '')
+ m_path_exists.return_value = 0
+ dist = util.get_linux_distro()
+ self.assertEqual(('', '', ''), dist)
+
+ @mock.patch('platform.dist')
+ def test_get_linux_distro_no_impl(self, m_platform_dist, m_path_exists):
+ """Verify we get an empty tuple when no information exists and
+ Exceptions are not propagated"""
+ m_platform_dist.side_effect = Exception()
+ m_path_exists.return_value = 0
+ dist = util.get_linux_distro()
+ self.assertEqual(('', '', ''), dist)
+
+ @mock.patch('platform.dist')
+ def test_get_linux_distro_plat_data(self, m_platform_dist, m_path_exists):
+ """Verify we get the correct platform information"""
+ m_platform_dist.return_value = ('foo', '1.1', 'aarch64')
+ m_path_exists.return_value = 0
+ dist = util.get_linux_distro()
+ self.assertEqual(('foo', '1.1', 'aarch64'), dist)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/tests/test_version.py b/cloudinit/tests/test_version.py
new file mode 100644
index 00000000..a96c2a47
--- /dev/null
+++ b/cloudinit/tests/test_version.py
@@ -0,0 +1,31 @@
+# This file is part of cloud-init. See LICENSE file for license information.
+
+from cloudinit.tests.helpers import CiTestCase
+from cloudinit import version
+
+import mock
+
+
+class TestExportsFeatures(CiTestCase):
+ def test_has_network_config_v1(self):
+ self.assertIn('NETWORK_CONFIG_V1', version.FEATURES)
+
+ def test_has_network_config_v2(self):
+ self.assertIn('NETWORK_CONFIG_V2', version.FEATURES)
+
+
+class TestVersionString(CiTestCase):
+ @mock.patch("cloudinit.version._PACKAGED_VERSION",
+ "17.2-3-gb05b9972-0ubuntu1")
+ def test_package_version_respected(self):
+ """If _PACKAGED_VERSION is filled in, then it should be returned."""
+ self.assertEqual("17.2-3-gb05b9972-0ubuntu1", version.version_string())
+
+ @mock.patch("cloudinit.version._PACKAGED_VERSION", "@@PACKAGED_VERSION@@")
+ @mock.patch("cloudinit.version.__VERSION__", "17.2")
+ def test_package_version_skipped(self):
+ """If _PACKAGED_VERSION is not modified, then return __VERSION__."""
+ self.assertEqual("17.2", version.version_string())
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 03a573af..8067979e 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -15,6 +15,7 @@ import six
import time
from email.utils import parsedate
+from errno import ENOENT
from functools import partial
from itertools import count
from requests import exceptions
@@ -80,6 +81,32 @@ def combine_url(base, *add_ons):
return url
+def read_file_or_url(url, timeout=5, retries=10,
+ headers=None, data=None, sec_between=1, ssl_details=None,
+ headers_cb=None, exception_cb=None):
+ url = url.lstrip()
+ if url.startswith("/"):
+ url = "file://%s" % url
+ if url.lower().startswith("file://"):
+ if data:
+ LOG.warning("Unable to post data to file resource %s", url)
+ file_path = url[len("file://"):]
+ try:
+ with open(file_path, "rb") as fp:
+ contents = fp.read()
+ except IOError as e:
+ code = e.errno
+ if e.errno == ENOENT:
+ code = NOT_FOUND
+ raise UrlError(cause=e, code=code, headers=None, url=url)
+ return FileResponse(file_path, contents=contents)
+ else:
+ return readurl(url, timeout=timeout, retries=retries, headers=headers,
+ headers_cb=headers_cb, data=data,
+ sec_between=sec_between, ssl_details=ssl_details,
+ exception_cb=exception_cb)
+
+
# Made to have same accessors as UrlResponse so that the
# read_file_or_url can return this or that object and the
# 'user' of those objects will not need to know the difference.
@@ -96,7 +123,7 @@ class StringResponse(object):
return True
def __str__(self):
- return self.contents
+ return self.contents.decode('utf-8')
class FileResponse(StringResponse):
@@ -519,7 +546,7 @@ def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret,
resource_owner_secret=token_secret,
signature_method=oauth1.SIGNATURE_PLAINTEXT,
timestamp=timestamp)
- uri, signed_headers, body = client.sign(url)
+ _uri, signed_headers, _body = client.sign(url)
return signed_headers
# vi: ts=4 expandtab
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index cc55daf8..ed83d2d8 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -19,7 +19,7 @@ import six
from cloudinit import handlers
from cloudinit import log as logging
-from cloudinit.url_helper import UrlError
+from cloudinit.url_helper import read_file_or_url, UrlError
from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -224,8 +224,8 @@ class UserDataProcessor(object):
content = util.load_file(include_once_fn)
else:
try:
- resp = util.read_file_or_url(include_url,
- ssl_details=self.ssl_details)
+ resp = read_file_or_url(include_url,
+ ssl_details=self.ssl_details)
if include_once_on and resp.ok():
util.write_file(include_once_fn, resp.contents,
mode=0o600)
@@ -337,8 +337,10 @@ def is_skippable(part):
# Coverts a raw string into a mime message
def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE):
+ """convert a string (more likely bytes) or a message into
+ a mime message."""
if not raw_data:
- raw_data = ''
+ raw_data = b''
def create_binmsg(data, content_type):
maintype, subtype = content_type.split("/", 1)
@@ -346,15 +348,17 @@ def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE):
msg.set_payload(data)
return msg
- try:
- data = util.decode_binary(util.decomp_gzip(raw_data))
- if "mime-version:" in data[0:4096].lower():
- msg = util.message_from_string(data)
- else:
- msg = create_binmsg(data, content_type)
- except UnicodeDecodeError:
- msg = create_binmsg(raw_data, content_type)
+ if isinstance(raw_data, six.text_type):
+ bdata = raw_data.encode('utf-8')
+ else:
+ bdata = raw_data
+ bdata = util.decomp_gzip(bdata, decode=False)
+ if b"mime-version:" in bdata[0:4096].lower():
+ msg = util.message_from_string(bdata.decode('utf-8'))
+ else:
+ msg = create_binmsg(bdata, content_type)
return msg
+
# vi: ts=4 expandtab
diff --git a/cloudinit/util.py b/cloudinit/util.py
index acdc0d85..6da95113 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -576,6 +576,39 @@ def get_cfg_option_int(yobj, key, default=0):
return int(get_cfg_option_str(yobj, key, default=default))
+def get_linux_distro():
+ distro_name = ''
+ distro_version = ''
+ if os.path.exists('/etc/os-release'):
+ os_release = load_file('/etc/os-release')
+ for line in os_release.splitlines():
+ if line.strip().startswith('ID='):
+ distro_name = line.split('=')[-1]
+ distro_name = distro_name.replace('"', '')
+ if line.strip().startswith('VERSION_ID='):
+ # Lets hope for the best that distros stay consistent ;)
+ distro_version = line.split('=')[-1]
+ distro_version = distro_version.replace('"', '')
+ else:
+ dist = ('', '', '')
+ try:
+ # Will be removed in 3.7
+ dist = platform.dist() # pylint: disable=W1505
+ except Exception:
+ pass
+ finally:
+ found = None
+ for entry in dist:
+ if entry:
+ found = 1
+ if not found:
+ LOG.warning('Unable to determine distribution, template '
+ 'expansion may have unexpected results')
+ return dist
+
+ return (distro_name, distro_version, platform.machine())
+
+
def system_info():
info = {
'platform': platform.platform(),
@@ -583,19 +616,19 @@ def system_info():
'release': platform.release(),
'python': platform.python_version(),
'uname': platform.uname(),
- 'dist': platform.dist(), # pylint: disable=W1505
+ 'dist': get_linux_distro()
}
system = info['system'].lower()
var = 'unknown'
if system == "linux":
linux_dist = info['dist'][0].lower()
- if linux_dist in ('centos', 'fedora', 'debian'):
+ if linux_dist in ('centos', 'debian', 'fedora', 'rhel', 'suse'):
var = linux_dist
elif linux_dist in ('ubuntu', 'linuxmint', 'mint'):
var = 'ubuntu'
elif linux_dist == 'redhat':
var = 'rhel'
- elif linux_dist == 'suse':
+ elif linux_dist in ('opensuse', 'sles'):
var = 'suse'
else:
var = 'linux'
@@ -857,37 +890,6 @@ def fetch_ssl_details(paths=None):
return ssl_details
-def read_file_or_url(url, timeout=5, retries=10,
- headers=None, data=None, sec_between=1, ssl_details=None,
- headers_cb=None, exception_cb=None):
- url = url.lstrip()
- if url.startswith("/"):
- url = "file://%s" % url
- if url.lower().startswith("file://"):
- if data:
- LOG.warning("Unable to post data to file resource %s", url)
- file_path = url[len("file://"):]
- try:
- contents = load_file(file_path, decode=False)
- except IOError as e:
- code = e.errno
- if e.errno == ENOENT:
- code = url_helper.NOT_FOUND
- raise url_helper.UrlError(cause=e, code=code, headers=None,
- url=url)
- return url_helper.FileResponse(file_path, contents=contents)
- else:
- return url_helper.readurl(url,
- timeout=timeout,
- retries=retries,
- headers=headers,
- headers_cb=headers_cb,
- data=data,
- sec_between=sec_between,
- ssl_details=ssl_details,
- exception_cb=exception_cb)
-
-
def load_yaml(blob, default=None, allowed=(dict,)):
loaded = default
blob = decode_binary(blob)
@@ -905,8 +907,20 @@ def load_yaml(blob, default=None, allowed=(dict,)):
" but got %s instead") %
(allowed, type_utils.obj_name(converted)))
loaded = converted
- except (yaml.YAMLError, TypeError, ValueError):
- logexc(LOG, "Failed loading yaml blob")
+ except (yaml.YAMLError, TypeError, ValueError) as e:
+ msg = 'Failed loading yaml blob'
+ mark = None
+ if hasattr(e, 'context_mark') and getattr(e, 'context_mark'):
+ mark = getattr(e, 'context_mark')
+ elif hasattr(e, 'problem_mark') and getattr(e, 'problem_mark'):
+ mark = getattr(e, 'problem_mark')
+ if mark:
+ msg += (
+ '. Invalid format at line {line} column {col}: "{err}"'.format(
+ line=mark.line + 1, col=mark.column + 1, err=e))
+ else:
+ msg += '. {err}'.format(err=e)
+ LOG.warning(msg)
return loaded
@@ -925,12 +939,14 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
ud_url = "%s%s%s" % (base, "user-data", ext)
md_url = "%s%s%s" % (base, "meta-data", ext)
- md_resp = read_file_or_url(md_url, timeout, retries, file_retries)
+ md_resp = url_helper.read_file_or_url(md_url, timeout, retries,
+ file_retries)
md = None
if md_resp.ok():
md = load_yaml(decode_binary(md_resp.contents), default={})
- ud_resp = read_file_or_url(ud_url, timeout, retries, file_retries)
+ ud_resp = url_helper.read_file_or_url(ud_url, timeout, retries,
+ file_retries)
ud = None
if ud_resp.ok():
ud = ud_resp.contents
@@ -1154,7 +1170,9 @@ def gethostbyaddr(ip):
def is_resolvable_url(url):
"""determine if this url is resolvable (existing or ip)."""
- return is_resolvable(urlparse.urlparse(url).hostname)
+ return log_time(logfunc=LOG.debug, msg="Resolving URL: " + url,
+ func=is_resolvable,
+ args=(urlparse.urlparse(url).hostname,))
def search_for_mirror(candidates):
@@ -1446,7 +1464,7 @@ def get_config_logfiles(cfg):
for fmt in get_output_cfg(cfg, None):
if not fmt:
continue
- match = re.match('(?P<type>\||>+)\s*(?P<target>.*)', fmt)
+ match = re.match(r'(?P<type>\||>+)\s*(?P<target>.*)', fmt)
if not match:
continue
target = match.group('target')
@@ -1608,7 +1626,8 @@ def mounts():
return mounted
-def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True):
+def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True,
+ update_env_for_mount=None):
"""
Mount the device, call method 'callback' passing the directory
in which it was mounted, then unmount. Return whatever 'callback'
@@ -1670,7 +1689,7 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True):
mountcmd.extend(['-t', mtype])
mountcmd.append(device)
mountcmd.append(tmpd)
- subp(mountcmd)
+ subp(mountcmd, update_env=update_env_for_mount)
umount = tmpd # This forces it to be unmounted (when set)
mountpoint = tmpd
break
@@ -1857,9 +1876,55 @@ def subp_blob_in_tempfile(blob, *args, **kwargs):
return subp(*args, **kwargs)
-def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
+def subp(args, data=None, rcs=None, env=None, capture=True,
+ combine_capture=False, shell=False,
logstring=False, decode="replace", target=None, update_env=None,
status_cb=None):
+ """Run a subprocess.
+
+ :param args: command to run in a list. [cmd, arg1, arg2...]
+ :param data: input to the command, made available on its stdin.
+ :param rcs:
+ a list of allowed return codes. If subprocess exits with a value not
+ in this list, a ProcessExecutionError will be raised. By default,
+ data is returned as a string. See 'decode' parameter.
+ :param env: a dictionary for the command's environment.
+ :param capture:
+ boolean indicating if output should be captured. If True, then stderr
+ and stdout will be returned. If False, they will not be redirected.
+ :param combine_capture:
+ boolean indicating if stderr should be redirected to stdout. When True,
+ interleaved stderr and stdout will be returned as the first element of
+ a tuple, the second will be empty string or bytes (per decode).
+ if combine_capture is True, then output is captured independent of
+ the value of capture.
+ :param shell: boolean indicating if this should be run with a shell.
+ :param logstring:
+ the command will be logged to DEBUG. If it contains info that should
+ not be logged, then logstring will be logged instead.
+ :param decode:
+ if False, no decoding will be done and returned stdout and stderr will
+ be bytes. Other allowed values are 'strict', 'ignore', and 'replace'.
+ These values are passed through to bytes().decode() as the 'errors'
+ parameter. There is no support for decoding to other than utf-8.
+ :param target:
+ not supported, kwarg present only to make function signature similar
+ to curtin's subp.
+ :param update_env:
+ update the enviornment for this command with this dictionary.
+ this will not affect the current processes os.environ.
+ :param status_cb:
+ call this fuction with a single string argument before starting
+ and after finishing.
+
+ :return
+ if not capturing, return is (None, None)
+ if capturing, stdout and stderr are returned.
+ if decode:
+ entries in tuple will be python2 unicode or python3 string
+ if not decode:
+ entries in tuple will be python2 string or python3 bytes
+ """
# not supported in cloud-init (yet), for now kept in the call signature
# to ease maintaining code shared between cloud-init and curtin
@@ -1885,7 +1950,8 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
status_cb('Begin run command: {command}\n'.format(command=command))
if not logstring:
LOG.debug(("Running command %s with allowed return codes %s"
- " (shell=%s, capture=%s)"), args, rcs, shell, capture)
+ " (shell=%s, capture=%s)"),
+ args, rcs, shell, 'combine' if combine_capture else capture)
else:
LOG.debug(("Running hidden command to protect sensitive "
"input/output logstring: %s"), logstring)
@@ -1896,6 +1962,9 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
if capture:
stdout = subprocess.PIPE
stderr = subprocess.PIPE
+ if combine_capture:
+ stdout = subprocess.PIPE
+ stderr = subprocess.STDOUT
if data is None:
# using devnull assures any reads get null, rather
# than possibly waiting on input.
@@ -1934,10 +2003,11 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
devnull_fp.close()
# Just ensure blank instead of none.
- if not out and capture:
- out = b''
- if not err and capture:
- err = b''
+ if capture or combine_capture:
+ if not out:
+ out = b''
+ if not err:
+ err = b''
if decode:
def ldecode(data, m='utf-8'):
if not isinstance(data, bytes):
@@ -2061,24 +2131,33 @@ def is_container():
return False
-def get_proc_env(pid):
+def get_proc_env(pid, encoding='utf-8', errors='replace'):
"""
Return the environment in a dict that a given process id was started with.
- """
- env = {}
- fn = os.path.join("/proc/", str(pid), "environ")
+ @param encoding: if true, then decoding will be done with
+ .decode(encoding, errors) and text will be returned.
+ if false then binary will be returned.
+ @param errors: only used if encoding is true."""
+ fn = os.path.join("/proc", str(pid), "environ")
+
try:
- contents = load_file(fn)
- toks = contents.split("\x00")
- for tok in toks:
- if tok == "":
- continue
- (name, val) = tok.split("=", 1)
- if name:
- env[name] = val
+ contents = load_file(fn, decode=False)
except (IOError, OSError):
- pass
+ return {}
+
+ env = {}
+ null, equal = (b"\x00", b"=")
+ if encoding:
+ null, equal = ("\x00", "=")
+ contents = contents.decode(encoding, errors)
+
+ for tok in contents.split(null):
+ if not tok:
+ continue
+ (name, val) = tok.split(equal, 1)
+ if name:
+ env[name] = val
return env
@@ -2214,7 +2293,7 @@ def parse_mtab(path):
def find_freebsd_part(label_part):
if label_part.startswith("/dev/label/"):
target_label = label_part[5:]
- (label_part, err) = subp(['glabel', 'status', '-s'])
+ (label_part, _err) = subp(['glabel', 'status', '-s'])
for labels in label_part.split("\n"):
items = labels.split()
if len(items) > 0 and items[0].startswith(target_label):
@@ -2275,8 +2354,8 @@ def parse_mount(path):
# the regex is a bit complex. to better understand this regex see:
# https://regex101.com/r/2F6c1k/1
# https://regex101.com/r/T2en7a/1
- regex = r'^(/dev/[\S]+|.*zroot\S*?) on (/[\S]*) ' + \
- '(?=(?:type)[\s]+([\S]+)|\(([^,]*))'
+ regex = (r'^(/dev/[\S]+|.*zroot\S*?) on (/[\S]*) '
+ r'(?=(?:type)[\s]+([\S]+)|\(([^,]*))')
for line in mount_locs:
m = re.search(regex, line)
if not m:
@@ -2545,11 +2624,21 @@ def _call_dmidecode(key, dmidecode_path):
if result.replace(".", "") == "":
return ""
return result
- except (IOError, OSError) as _err:
- LOG.debug('failed dmidecode cmd: %s\n%s', cmd, _err)
+ except (IOError, OSError) as e:
+ LOG.debug('failed dmidecode cmd: %s\n%s', cmd, e)
return None
+def is_x86(uname_arch=None):
+ """Return True if platform is x86-based"""
+ if uname_arch is None:
+ uname_arch = os.uname()[4]
+ x86_arch_match = (
+ uname_arch == 'x86_64' or
+ (uname_arch[0] == 'i' and uname_arch[2:] == '86'))
+ return x86_arch_match
+
+
def read_dmi_data(key):
"""
Wrapper for reading DMI data.
@@ -2577,8 +2666,7 @@ def read_dmi_data(key):
# running dmidecode can be problematic on some arches (LP: #1243287)
uname_arch = os.uname()[4]
- if not (uname_arch == "x86_64" or
- (uname_arch.startswith("i") and uname_arch[2:] == "86") or
+ if not (is_x86(uname_arch) or
uname_arch == 'aarch64' or
uname_arch == 'amd64'):
LOG.debug("dmidata is not supported on %s", uname_arch)
@@ -2727,4 +2815,19 @@ def mount_is_read_write(mount_point):
mount_opts = result[-1].split(',')
return mount_opts[0] == 'rw'
+
+def udevadm_settle(exists=None, timeout=None):
+ """Invoke udevadm settle with optional exists and timeout parameters"""
+ settle_cmd = ["udevadm", "settle"]
+ if exists:
+ # skip the settle if the requested path already exists
+ if os.path.exists(exists):
+ return
+ settle_cmd.extend(['--exit-if-exists=%s' % exists])
+ if timeout:
+ settle_cmd.extend(['--timeout=%s' % timeout])
+
+ return subp(settle_cmd)
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/version.py b/cloudinit/version.py
index ccd0f84e..3b60fc49 100644
--- a/cloudinit/version.py
+++ b/cloudinit/version.py
@@ -4,7 +4,8 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-__VERSION__ = "18.2"
+__VERSION__ = "18.3"
+_PACKAGED_VERSION = '@@PACKAGED_VERSION@@'
FEATURES = [
# supports network config version 1
@@ -15,6 +16,9 @@ FEATURES = [
def version_string():
+ """Extract a version string from cloud-init."""
+ if not _PACKAGED_VERSION.startswith('@@'):
+ return _PACKAGED_VERSION
return __VERSION__
# vi: ts=4 expandtab